1 /* 2 * Copyright 2015 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: AMD 23 */ 24 25 #include "dm_services.h" 26 27 #include "amdgpu.h" 28 29 #include "dc.h" 30 31 #include "core_status.h" 32 #include "core_types.h" 33 #include "hw_sequencer.h" 34 #include "dce/dce_hwseq.h" 35 36 #include "resource.h" 37 #include "dc_state.h" 38 #include "dc_state_priv.h" 39 #include "dc_plane_priv.h" 40 41 #include "gpio_service_interface.h" 42 #include "clk_mgr.h" 43 #include "clock_source.h" 44 #include "dc_bios_types.h" 45 46 #include "bios_parser_interface.h" 47 #include "bios/bios_parser_helper.h" 48 #include "include/irq_service_interface.h" 49 #include "transform.h" 50 #include "dmcu.h" 51 #include "dpp.h" 52 #include "timing_generator.h" 53 #include "abm.h" 54 #include "virtual/virtual_link_encoder.h" 55 #include "hubp.h" 56 57 #include "link_hwss.h" 58 #include "link_encoder.h" 59 #include "link_enc_cfg.h" 60 61 #include "link.h" 62 #include "dm_helpers.h" 63 #include "mem_input.h" 64 65 #include "dc_dmub_srv.h" 66 67 #include "dsc.h" 68 69 #include "vm_helper.h" 70 71 #include "dce/dce_i2c.h" 72 73 #include "dmub/dmub_srv.h" 74 75 #include "dce/dmub_psr.h" 76 77 #include "dce/dmub_hw_lock_mgr.h" 78 79 #include "dc_trace.h" 80 81 #include "hw_sequencer_private.h" 82 83 #if defined(CONFIG_DRM_AMD_DC_FP) 84 #include "dml2/dml2_internal_types.h" 85 #endif 86 87 #include "dce/dmub_outbox.h" 88 89 #define CTX \ 90 dc->ctx 91 92 #define DC_LOGGER \ 93 dc->ctx->logger 94 95 static const char DC_BUILD_ID[] = "production-build"; 96 97 /** 98 * DOC: Overview 99 * 100 * DC is the OS-agnostic component of the amdgpu DC driver. 101 * 102 * DC maintains and validates a set of structs representing the state of the 103 * driver and writes that state to AMD hardware 104 * 105 * Main DC HW structs: 106 * 107 * struct dc - The central struct. One per driver. Created on driver load, 108 * destroyed on driver unload. 109 * 110 * struct dc_context - One per driver. 111 * Used as a backpointer by most other structs in dc. 112 * 113 * struct dc_link - One per connector (the physical DP, HDMI, miniDP, or eDP 114 * plugpoints). Created on driver load, destroyed on driver unload. 115 * 116 * struct dc_sink - One per display. Created on boot or hotplug. 117 * Destroyed on shutdown or hotunplug. A dc_link can have a local sink 118 * (the display directly attached). It may also have one or more remote 119 * sinks (in the Multi-Stream Transport case) 120 * 121 * struct resource_pool - One per driver. Represents the hw blocks not in the 122 * main pipeline. Not directly accessible by dm. 123 * 124 * Main dc state structs: 125 * 126 * These structs can be created and destroyed as needed. There is a full set of 127 * these structs in dc->current_state representing the currently programmed state. 128 * 129 * struct dc_state - The global DC state to track global state information, 130 * such as bandwidth values. 131 * 132 * struct dc_stream_state - Represents the hw configuration for the pipeline from 133 * a framebuffer to a display. Maps one-to-one with dc_sink. 134 * 135 * struct dc_plane_state - Represents a framebuffer. Each stream has at least one, 136 * and may have more in the Multi-Plane Overlay case. 137 * 138 * struct resource_context - Represents the programmable state of everything in 139 * the resource_pool. Not directly accessible by dm. 140 * 141 * struct pipe_ctx - A member of struct resource_context. Represents the 142 * internal hardware pipeline components. Each dc_plane_state has either 143 * one or two (in the pipe-split case). 144 */ 145 146 /* Private functions */ 147 148 static inline void elevate_update_type(enum surface_update_type *original, enum surface_update_type new) 149 { 150 if (new > *original) 151 *original = new; 152 } 153 154 static void destroy_links(struct dc *dc) 155 { 156 uint32_t i; 157 158 for (i = 0; i < dc->link_count; i++) { 159 if (NULL != dc->links[i]) 160 dc->link_srv->destroy_link(&dc->links[i]); 161 } 162 } 163 164 static uint32_t get_num_of_internal_disp(struct dc_link **links, uint32_t num_links) 165 { 166 int i; 167 uint32_t count = 0; 168 169 for (i = 0; i < num_links; i++) { 170 if (links[i]->connector_signal == SIGNAL_TYPE_EDP || 171 links[i]->is_internal_display) 172 count++; 173 } 174 175 return count; 176 } 177 178 static int get_seamless_boot_stream_count(struct dc_state *ctx) 179 { 180 uint8_t i; 181 uint8_t seamless_boot_stream_count = 0; 182 183 for (i = 0; i < ctx->stream_count; i++) 184 if (ctx->streams[i]->apply_seamless_boot_optimization) 185 seamless_boot_stream_count++; 186 187 return seamless_boot_stream_count; 188 } 189 190 static bool create_links( 191 struct dc *dc, 192 uint32_t num_virtual_links) 193 { 194 int i; 195 int connectors_num; 196 struct dc_bios *bios = dc->ctx->dc_bios; 197 198 dc->link_count = 0; 199 200 connectors_num = bios->funcs->get_connectors_number(bios); 201 202 DC_LOG_DC("BIOS object table - number of connectors: %d", connectors_num); 203 204 if (connectors_num > ENUM_ID_COUNT) { 205 dm_error( 206 "DC: Number of connectors %d exceeds maximum of %d!\n", 207 connectors_num, 208 ENUM_ID_COUNT); 209 return false; 210 } 211 212 dm_output_to_console( 213 "DC: %s: connectors_num: physical:%d, virtual:%d\n", 214 __func__, 215 connectors_num, 216 num_virtual_links); 217 218 // condition loop on link_count to allow skipping invalid indices 219 for (i = 0; dc->link_count < connectors_num && i < MAX_LINKS; i++) { 220 struct link_init_data link_init_params = {0}; 221 struct dc_link *link; 222 223 DC_LOG_DC("BIOS object table - printing link object info for connector number: %d, link_index: %d", i, dc->link_count); 224 225 link_init_params.ctx = dc->ctx; 226 /* next BIOS object table connector */ 227 link_init_params.connector_index = i; 228 link_init_params.link_index = dc->link_count; 229 link_init_params.dc = dc; 230 link = dc->link_srv->create_link(&link_init_params); 231 232 if (link) { 233 dc->links[dc->link_count] = link; 234 link->dc = dc; 235 ++dc->link_count; 236 } 237 } 238 239 DC_LOG_DC("BIOS object table - end"); 240 241 /* Create a link for each usb4 dpia port */ 242 for (i = 0; i < dc->res_pool->usb4_dpia_count; i++) { 243 struct link_init_data link_init_params = {0}; 244 struct dc_link *link; 245 246 link_init_params.ctx = dc->ctx; 247 link_init_params.connector_index = i; 248 link_init_params.link_index = dc->link_count; 249 link_init_params.dc = dc; 250 link_init_params.is_dpia_link = true; 251 252 link = dc->link_srv->create_link(&link_init_params); 253 if (link) { 254 dc->links[dc->link_count] = link; 255 link->dc = dc; 256 ++dc->link_count; 257 } 258 } 259 260 for (i = 0; i < num_virtual_links; i++) { 261 struct dc_link *link = kzalloc(sizeof(*link), GFP_KERNEL); 262 struct encoder_init_data enc_init = {0}; 263 264 if (link == NULL) { 265 BREAK_TO_DEBUGGER(); 266 goto failed_alloc; 267 } 268 269 link->link_index = dc->link_count; 270 dc->links[dc->link_count] = link; 271 dc->link_count++; 272 273 link->ctx = dc->ctx; 274 link->dc = dc; 275 link->connector_signal = SIGNAL_TYPE_VIRTUAL; 276 link->link_id.type = OBJECT_TYPE_CONNECTOR; 277 link->link_id.id = CONNECTOR_ID_VIRTUAL; 278 link->link_id.enum_id = ENUM_ID_1; 279 link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED; 280 link->link_enc = kzalloc(sizeof(*link->link_enc), GFP_KERNEL); 281 282 if (!link->link_enc) { 283 BREAK_TO_DEBUGGER(); 284 goto failed_alloc; 285 } 286 287 link->link_status.dpcd_caps = &link->dpcd_caps; 288 289 enc_init.ctx = dc->ctx; 290 enc_init.channel = CHANNEL_ID_UNKNOWN; 291 enc_init.hpd_source = HPD_SOURCEID_UNKNOWN; 292 enc_init.transmitter = TRANSMITTER_UNKNOWN; 293 enc_init.connector = link->link_id; 294 enc_init.encoder.type = OBJECT_TYPE_ENCODER; 295 enc_init.encoder.id = ENCODER_ID_INTERNAL_VIRTUAL; 296 enc_init.encoder.enum_id = ENUM_ID_1; 297 virtual_link_encoder_construct(link->link_enc, &enc_init); 298 } 299 300 dc->caps.num_of_internal_disp = get_num_of_internal_disp(dc->links, dc->link_count); 301 302 return true; 303 304 failed_alloc: 305 return false; 306 } 307 308 /* Create additional DIG link encoder objects if fewer than the platform 309 * supports were created during link construction. This can happen if the 310 * number of physical connectors is less than the number of DIGs. 311 */ 312 static bool create_link_encoders(struct dc *dc) 313 { 314 bool res = true; 315 unsigned int num_usb4_dpia = dc->res_pool->res_cap->num_usb4_dpia; 316 unsigned int num_dig_link_enc = dc->res_pool->res_cap->num_dig_link_enc; 317 int i; 318 319 /* A platform without USB4 DPIA endpoints has a fixed mapping between DIG 320 * link encoders and physical display endpoints and does not require 321 * additional link encoder objects. 322 */ 323 if (num_usb4_dpia == 0) 324 return res; 325 326 /* Create as many link encoder objects as the platform supports. DPIA 327 * endpoints can be programmably mapped to any DIG. 328 */ 329 if (num_dig_link_enc > dc->res_pool->dig_link_enc_count) { 330 for (i = 0; i < num_dig_link_enc; i++) { 331 struct link_encoder *link_enc = dc->res_pool->link_encoders[i]; 332 333 if (!link_enc && dc->res_pool->funcs->link_enc_create_minimal) { 334 link_enc = dc->res_pool->funcs->link_enc_create_minimal(dc->ctx, 335 (enum engine_id)(ENGINE_ID_DIGA + i)); 336 if (link_enc) { 337 dc->res_pool->link_encoders[i] = link_enc; 338 dc->res_pool->dig_link_enc_count++; 339 } else { 340 res = false; 341 } 342 } 343 } 344 } 345 346 return res; 347 } 348 349 /* Destroy any additional DIG link encoder objects created by 350 * create_link_encoders(). 351 * NB: Must only be called after destroy_links(). 352 */ 353 static void destroy_link_encoders(struct dc *dc) 354 { 355 unsigned int num_usb4_dpia; 356 unsigned int num_dig_link_enc; 357 int i; 358 359 if (!dc->res_pool) 360 return; 361 362 num_usb4_dpia = dc->res_pool->res_cap->num_usb4_dpia; 363 num_dig_link_enc = dc->res_pool->res_cap->num_dig_link_enc; 364 365 /* A platform without USB4 DPIA endpoints has a fixed mapping between DIG 366 * link encoders and physical display endpoints and does not require 367 * additional link encoder objects. 368 */ 369 if (num_usb4_dpia == 0) 370 return; 371 372 for (i = 0; i < num_dig_link_enc; i++) { 373 struct link_encoder *link_enc = dc->res_pool->link_encoders[i]; 374 375 if (link_enc) { 376 link_enc->funcs->destroy(&link_enc); 377 dc->res_pool->link_encoders[i] = NULL; 378 dc->res_pool->dig_link_enc_count--; 379 } 380 } 381 } 382 383 static struct dc_perf_trace *dc_perf_trace_create(void) 384 { 385 return kzalloc(sizeof(struct dc_perf_trace), GFP_KERNEL); 386 } 387 388 static void dc_perf_trace_destroy(struct dc_perf_trace **perf_trace) 389 { 390 kfree(*perf_trace); 391 *perf_trace = NULL; 392 } 393 394 static bool set_long_vtotal(struct dc *dc, struct dc_stream_state *stream, struct dc_crtc_timing_adjust *adjust) 395 { 396 if (!dc || !stream || !adjust) 397 return false; 398 399 if (!dc->current_state) 400 return false; 401 402 int i; 403 404 for (i = 0; i < MAX_PIPES; i++) { 405 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 406 407 if (pipe->stream == stream && pipe->stream_res.tg) { 408 if (dc->hwss.set_long_vtotal) 409 dc->hwss.set_long_vtotal(&pipe, 1, adjust->v_total_min, adjust->v_total_max); 410 411 return true; 412 } 413 } 414 415 return false; 416 } 417 418 /** 419 * dc_stream_adjust_vmin_vmax - look up pipe context & update parts of DRR 420 * @dc: dc reference 421 * @stream: Initial dc stream state 422 * @adjust: Updated parameters for vertical_total_min and vertical_total_max 423 * 424 * Looks up the pipe context of dc_stream_state and updates the 425 * vertical_total_min and vertical_total_max of the DRR, Dynamic Refresh 426 * Rate, which is a power-saving feature that targets reducing panel 427 * refresh rate while the screen is static 428 * 429 * Return: %true if the pipe context is found and adjusted; 430 * %false if the pipe context is not found. 431 */ 432 bool dc_stream_adjust_vmin_vmax(struct dc *dc, 433 struct dc_stream_state *stream, 434 struct dc_crtc_timing_adjust *adjust) 435 { 436 int i; 437 438 /* 439 * Don't adjust DRR while there's bandwidth optimizations pending to 440 * avoid conflicting with firmware updates. 441 */ 442 if (dc->ctx->dce_version > DCE_VERSION_MAX) 443 if (dc->optimized_required || dc->wm_optimized_required) 444 return false; 445 446 dc_exit_ips_for_hw_access(dc); 447 448 stream->adjust.v_total_max = adjust->v_total_max; 449 stream->adjust.v_total_mid = adjust->v_total_mid; 450 stream->adjust.v_total_mid_frame_num = adjust->v_total_mid_frame_num; 451 stream->adjust.v_total_min = adjust->v_total_min; 452 stream->adjust.allow_otg_v_count_halt = adjust->allow_otg_v_count_halt; 453 454 if (dc->caps.max_v_total != 0 && 455 (adjust->v_total_max > dc->caps.max_v_total || adjust->v_total_min > dc->caps.max_v_total)) { 456 stream->adjust.timing_adjust_pending = false; 457 if (adjust->allow_otg_v_count_halt) 458 return set_long_vtotal(dc, stream, adjust); 459 else 460 return false; 461 } 462 463 for (i = 0; i < MAX_PIPES; i++) { 464 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 465 466 if (pipe->stream == stream && pipe->stream_res.tg) { 467 dc->hwss.set_drr(&pipe, 468 1, 469 *adjust); 470 stream->adjust.timing_adjust_pending = false; 471 return true; 472 } 473 } 474 return false; 475 } 476 477 /** 478 * dc_stream_get_last_used_drr_vtotal - Looks up the pipe context of 479 * dc_stream_state and gets the last VTOTAL used by DRR (Dynamic Refresh Rate) 480 * 481 * @dc: [in] dc reference 482 * @stream: [in] Initial dc stream state 483 * @refresh_rate: [in] new refresh_rate 484 * 485 * Return: %true if the pipe context is found and there is an associated 486 * timing_generator for the DC; 487 * %false if the pipe context is not found or there is no 488 * timing_generator for the DC. 489 */ 490 bool dc_stream_get_last_used_drr_vtotal(struct dc *dc, 491 struct dc_stream_state *stream, 492 uint32_t *refresh_rate) 493 { 494 bool status = false; 495 496 int i = 0; 497 498 dc_exit_ips_for_hw_access(dc); 499 500 for (i = 0; i < MAX_PIPES; i++) { 501 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 502 503 if (pipe->stream == stream && pipe->stream_res.tg) { 504 /* Only execute if a function pointer has been defined for 505 * the DC version in question 506 */ 507 if (pipe->stream_res.tg->funcs->get_last_used_drr_vtotal) { 508 pipe->stream_res.tg->funcs->get_last_used_drr_vtotal(pipe->stream_res.tg, refresh_rate); 509 510 status = true; 511 512 break; 513 } 514 } 515 } 516 517 return status; 518 } 519 520 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 521 static inline void 522 dc_stream_forward_dmub_crc_window(struct dc_dmub_srv *dmub_srv, 523 struct rect *rect, struct otg_phy_mux *mux_mapping, bool is_stop) 524 { 525 union dmub_rb_cmd cmd = {0}; 526 527 cmd.secure_display.roi_info.phy_id = mux_mapping->phy_output_num; 528 cmd.secure_display.roi_info.otg_id = mux_mapping->otg_output_num; 529 530 if (is_stop) { 531 cmd.secure_display.header.type = DMUB_CMD__SECURE_DISPLAY; 532 cmd.secure_display.header.sub_type = DMUB_CMD__SECURE_DISPLAY_CRC_STOP_UPDATE; 533 } else { 534 cmd.secure_display.header.type = DMUB_CMD__SECURE_DISPLAY; 535 cmd.secure_display.header.sub_type = DMUB_CMD__SECURE_DISPLAY_CRC_WIN_NOTIFY; 536 cmd.secure_display.roi_info.x_start = rect->x; 537 cmd.secure_display.roi_info.y_start = rect->y; 538 cmd.secure_display.roi_info.x_end = rect->x + rect->width; 539 cmd.secure_display.roi_info.y_end = rect->y + rect->height; 540 } 541 542 dc_wake_and_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT); 543 } 544 545 static inline void 546 dc_stream_forward_dmcu_crc_window(struct dmcu *dmcu, 547 struct rect *rect, struct otg_phy_mux *mux_mapping, bool is_stop) 548 { 549 if (is_stop) 550 dmcu->funcs->stop_crc_win_update(dmcu, mux_mapping); 551 else 552 dmcu->funcs->forward_crc_window(dmcu, rect, mux_mapping); 553 } 554 555 bool 556 dc_stream_forward_crc_window(struct dc_stream_state *stream, 557 struct rect *rect, uint8_t phy_id, bool is_stop) 558 { 559 struct dmcu *dmcu; 560 struct dc_dmub_srv *dmub_srv; 561 struct otg_phy_mux mux_mapping; 562 struct pipe_ctx *pipe; 563 int i; 564 struct dc *dc = stream->ctx->dc; 565 566 for (i = 0; i < MAX_PIPES; i++) { 567 pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 568 if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe) 569 break; 570 } 571 572 /* Stream not found */ 573 if (i == MAX_PIPES) 574 return false; 575 576 mux_mapping.phy_output_num = phy_id; 577 mux_mapping.otg_output_num = pipe->stream_res.tg->inst; 578 579 dmcu = dc->res_pool->dmcu; 580 dmub_srv = dc->ctx->dmub_srv; 581 582 /* forward to dmub */ 583 if (dmub_srv) 584 dc_stream_forward_dmub_crc_window(dmub_srv, rect, &mux_mapping, is_stop); 585 /* forward to dmcu */ 586 else if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) 587 dc_stream_forward_dmcu_crc_window(dmcu, rect, &mux_mapping, is_stop); 588 else 589 return false; 590 591 return true; 592 } 593 594 static void 595 dc_stream_forward_dmub_multiple_crc_window(struct dc_dmub_srv *dmub_srv, 596 struct crc_window *window, struct otg_phy_mux *mux_mapping, bool stop) 597 { 598 int i; 599 union dmub_rb_cmd cmd = {0}; 600 601 cmd.secure_display.mul_roi_ctl.phy_id = mux_mapping->phy_output_num; 602 cmd.secure_display.mul_roi_ctl.otg_id = mux_mapping->otg_output_num; 603 604 cmd.secure_display.header.type = DMUB_CMD__SECURE_DISPLAY; 605 606 if (stop) { 607 cmd.secure_display.header.sub_type = DMUB_CMD__SECURE_DISPLAY_MULTIPLE_CRC_STOP_UPDATE; 608 } else { 609 cmd.secure_display.header.sub_type = DMUB_CMD__SECURE_DISPLAY_MULTIPLE_CRC_WIN_NOTIFY; 610 for (i = 0; i < MAX_CRC_WINDOW_NUM; i++) { 611 cmd.secure_display.mul_roi_ctl.roi_ctl[i].x_start = window[i].rect.x; 612 cmd.secure_display.mul_roi_ctl.roi_ctl[i].y_start = window[i].rect.y; 613 cmd.secure_display.mul_roi_ctl.roi_ctl[i].x_end = window[i].rect.x + window[i].rect.width; 614 cmd.secure_display.mul_roi_ctl.roi_ctl[i].y_end = window[i].rect.y + window[i].rect.height; 615 cmd.secure_display.mul_roi_ctl.roi_ctl[i].enable = window[i].enable; 616 } 617 } 618 619 dc_wake_and_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT); 620 } 621 622 bool 623 dc_stream_forward_multiple_crc_window(struct dc_stream_state *stream, 624 struct crc_window *window, uint8_t phy_id, bool stop) 625 { 626 struct dc_dmub_srv *dmub_srv; 627 struct otg_phy_mux mux_mapping; 628 struct pipe_ctx *pipe; 629 int i; 630 struct dc *dc = stream->ctx->dc; 631 632 for (i = 0; i < MAX_PIPES; i++) { 633 pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 634 if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe) 635 break; 636 } 637 638 /* Stream not found */ 639 if (i == MAX_PIPES) 640 return false; 641 642 mux_mapping.phy_output_num = phy_id; 643 mux_mapping.otg_output_num = pipe->stream_res.tg->inst; 644 645 dmub_srv = dc->ctx->dmub_srv; 646 647 /* forward to dmub only. no dmcu support*/ 648 if (dmub_srv) 649 dc_stream_forward_dmub_multiple_crc_window(dmub_srv, window, &mux_mapping, stop); 650 else 651 return false; 652 653 return true; 654 } 655 #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */ 656 657 /** 658 * dc_stream_configure_crc() - Configure CRC capture for the given stream. 659 * @dc: DC Object 660 * @stream: The stream to configure CRC on. 661 * @crc_window: CRC window (x/y start/end) information 662 * @enable: Enable CRC if true, disable otherwise. 663 * @continuous: Capture CRC on every frame if true. Otherwise, only capture 664 * once. 665 * @idx: Capture CRC on which CRC engine instance 666 * @reset: Reset CRC engine before the configuration 667 * 668 * By default, the entire frame is used to calculate the CRC. 669 * 670 * Return: %false if the stream is not found or CRC capture is not supported; 671 * %true if the stream has been configured. 672 */ 673 bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream, 674 struct crc_params *crc_window, bool enable, bool continuous, 675 uint8_t idx, bool reset) 676 { 677 struct pipe_ctx *pipe; 678 struct crc_params param; 679 struct timing_generator *tg; 680 681 pipe = resource_get_otg_master_for_stream( 682 &dc->current_state->res_ctx, stream); 683 684 /* Stream not found */ 685 if (pipe == NULL) 686 return false; 687 688 dc_exit_ips_for_hw_access(dc); 689 690 /* By default, capture the full frame */ 691 param.windowa_x_start = 0; 692 param.windowa_y_start = 0; 693 param.windowa_x_end = pipe->stream->timing.h_addressable; 694 param.windowa_y_end = pipe->stream->timing.v_addressable; 695 param.windowb_x_start = 0; 696 param.windowb_y_start = 0; 697 param.windowb_x_end = pipe->stream->timing.h_addressable; 698 param.windowb_y_end = pipe->stream->timing.v_addressable; 699 700 if (crc_window) { 701 param.windowa_x_start = crc_window->windowa_x_start; 702 param.windowa_y_start = crc_window->windowa_y_start; 703 param.windowa_x_end = crc_window->windowa_x_end; 704 param.windowa_y_end = crc_window->windowa_y_end; 705 param.windowb_x_start = crc_window->windowb_x_start; 706 param.windowb_y_start = crc_window->windowb_y_start; 707 param.windowb_x_end = crc_window->windowb_x_end; 708 param.windowb_y_end = crc_window->windowb_y_end; 709 } 710 711 param.dsc_mode = pipe->stream->timing.flags.DSC ? 1:0; 712 param.odm_mode = pipe->next_odm_pipe ? 1:0; 713 714 /* Default to the union of both windows */ 715 param.selection = UNION_WINDOW_A_B; 716 param.continuous_mode = continuous; 717 param.enable = enable; 718 719 param.crc_eng_inst = idx; 720 param.reset = reset; 721 722 tg = pipe->stream_res.tg; 723 724 /* Only call if supported */ 725 if (tg->funcs->configure_crc) 726 return tg->funcs->configure_crc(tg, ¶m); 727 DC_LOG_WARNING("CRC capture not supported."); 728 return false; 729 } 730 731 /** 732 * dc_stream_get_crc() - Get CRC values for the given stream. 733 * 734 * @dc: DC object. 735 * @stream: The DC stream state of the stream to get CRCs from. 736 * @idx: index of crc engine to get CRC from 737 * @r_cr: CRC value for the red component. 738 * @g_y: CRC value for the green component. 739 * @b_cb: CRC value for the blue component. 740 * 741 * dc_stream_configure_crc needs to be called beforehand to enable CRCs. 742 * 743 * Return: 744 * %false if stream is not found, or if CRCs are not enabled. 745 */ 746 bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream, uint8_t idx, 747 uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb) 748 { 749 int i; 750 struct pipe_ctx *pipe; 751 struct timing_generator *tg; 752 753 dc_exit_ips_for_hw_access(dc); 754 755 for (i = 0; i < MAX_PIPES; i++) { 756 pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 757 if (pipe->stream == stream) 758 break; 759 } 760 /* Stream not found */ 761 if (i == MAX_PIPES) 762 return false; 763 764 tg = pipe->stream_res.tg; 765 766 if (tg->funcs->get_crc) 767 return tg->funcs->get_crc(tg, idx, r_cr, g_y, b_cb); 768 DC_LOG_WARNING("CRC capture not supported."); 769 return false; 770 } 771 772 void dc_stream_set_dyn_expansion(struct dc *dc, struct dc_stream_state *stream, 773 enum dc_dynamic_expansion option) 774 { 775 /* OPP FMT dyn expansion updates*/ 776 int i; 777 struct pipe_ctx *pipe_ctx; 778 779 dc_exit_ips_for_hw_access(dc); 780 781 for (i = 0; i < MAX_PIPES; i++) { 782 if (dc->current_state->res_ctx.pipe_ctx[i].stream 783 == stream) { 784 pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; 785 pipe_ctx->stream_res.opp->dyn_expansion = option; 786 pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion( 787 pipe_ctx->stream_res.opp, 788 COLOR_SPACE_YCBCR601, 789 stream->timing.display_color_depth, 790 stream->signal); 791 } 792 } 793 } 794 795 void dc_stream_set_dither_option(struct dc_stream_state *stream, 796 enum dc_dither_option option) 797 { 798 struct bit_depth_reduction_params params; 799 struct dc_link *link = stream->link; 800 struct pipe_ctx *pipes = NULL; 801 int i; 802 803 for (i = 0; i < MAX_PIPES; i++) { 804 if (link->dc->current_state->res_ctx.pipe_ctx[i].stream == 805 stream) { 806 pipes = &link->dc->current_state->res_ctx.pipe_ctx[i]; 807 break; 808 } 809 } 810 811 if (!pipes) 812 return; 813 if (option > DITHER_OPTION_MAX) 814 return; 815 816 dc_exit_ips_for_hw_access(stream->ctx->dc); 817 818 stream->dither_option = option; 819 820 memset(¶ms, 0, sizeof(params)); 821 resource_build_bit_depth_reduction_params(stream, ¶ms); 822 stream->bit_depth_params = params; 823 824 if (pipes->plane_res.xfm && 825 pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth) { 826 pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth( 827 pipes->plane_res.xfm, 828 pipes->plane_res.scl_data.lb_params.depth, 829 &stream->bit_depth_params); 830 } 831 832 pipes->stream_res.opp->funcs-> 833 opp_program_bit_depth_reduction(pipes->stream_res.opp, ¶ms); 834 } 835 836 bool dc_stream_set_gamut_remap(struct dc *dc, const struct dc_stream_state *stream) 837 { 838 int i; 839 bool ret = false; 840 struct pipe_ctx *pipes; 841 842 dc_exit_ips_for_hw_access(dc); 843 844 for (i = 0; i < MAX_PIPES; i++) { 845 if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) { 846 pipes = &dc->current_state->res_ctx.pipe_ctx[i]; 847 dc->hwss.program_gamut_remap(pipes); 848 ret = true; 849 } 850 } 851 852 return ret; 853 } 854 855 bool dc_stream_program_csc_matrix(struct dc *dc, struct dc_stream_state *stream) 856 { 857 int i; 858 bool ret = false; 859 struct pipe_ctx *pipes; 860 861 dc_exit_ips_for_hw_access(dc); 862 863 for (i = 0; i < MAX_PIPES; i++) { 864 if (dc->current_state->res_ctx.pipe_ctx[i].stream 865 == stream) { 866 867 pipes = &dc->current_state->res_ctx.pipe_ctx[i]; 868 dc->hwss.program_output_csc(dc, 869 pipes, 870 stream->output_color_space, 871 stream->csc_color_matrix.matrix, 872 pipes->stream_res.opp->inst); 873 ret = true; 874 } 875 } 876 877 return ret; 878 } 879 880 void dc_stream_set_static_screen_params(struct dc *dc, 881 struct dc_stream_state **streams, 882 int num_streams, 883 const struct dc_static_screen_params *params) 884 { 885 int i, j; 886 struct pipe_ctx *pipes_affected[MAX_PIPES]; 887 int num_pipes_affected = 0; 888 889 dc_exit_ips_for_hw_access(dc); 890 891 for (i = 0; i < num_streams; i++) { 892 struct dc_stream_state *stream = streams[i]; 893 894 for (j = 0; j < MAX_PIPES; j++) { 895 if (dc->current_state->res_ctx.pipe_ctx[j].stream 896 == stream) { 897 pipes_affected[num_pipes_affected++] = 898 &dc->current_state->res_ctx.pipe_ctx[j]; 899 } 900 } 901 } 902 903 dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, params); 904 } 905 906 static void dc_destruct(struct dc *dc) 907 { 908 // reset link encoder assignment table on destruct 909 if (dc->res_pool && dc->res_pool->funcs->link_encs_assign && 910 !dc->config.unify_link_enc_assignment) 911 link_enc_cfg_init(dc, dc->current_state); 912 913 if (dc->current_state) { 914 dc_state_release(dc->current_state); 915 dc->current_state = NULL; 916 } 917 918 destroy_links(dc); 919 920 destroy_link_encoders(dc); 921 922 if (dc->clk_mgr) { 923 dc_destroy_clk_mgr(dc->clk_mgr); 924 dc->clk_mgr = NULL; 925 } 926 927 dc_destroy_resource_pool(dc); 928 929 if (dc->link_srv) 930 link_destroy_link_service(&dc->link_srv); 931 932 if (dc->ctx->gpio_service) 933 dal_gpio_service_destroy(&dc->ctx->gpio_service); 934 935 if (dc->ctx->created_bios) 936 dal_bios_parser_destroy(&dc->ctx->dc_bios); 937 938 kfree(dc->ctx->logger); 939 dc_perf_trace_destroy(&dc->ctx->perf_trace); 940 941 kfree(dc->ctx); 942 dc->ctx = NULL; 943 944 kfree(dc->bw_vbios); 945 dc->bw_vbios = NULL; 946 947 kfree(dc->bw_dceip); 948 dc->bw_dceip = NULL; 949 950 kfree(dc->dcn_soc); 951 dc->dcn_soc = NULL; 952 953 kfree(dc->dcn_ip); 954 dc->dcn_ip = NULL; 955 956 kfree(dc->vm_helper); 957 dc->vm_helper = NULL; 958 959 } 960 961 static bool dc_construct_ctx(struct dc *dc, 962 const struct dc_init_data *init_params) 963 { 964 struct dc_context *dc_ctx; 965 966 dc_ctx = kzalloc(sizeof(*dc_ctx), GFP_KERNEL); 967 if (!dc_ctx) 968 return false; 969 970 dc_ctx->cgs_device = init_params->cgs_device; 971 dc_ctx->driver_context = init_params->driver; 972 dc_ctx->dc = dc; 973 dc_ctx->asic_id = init_params->asic_id; 974 dc_ctx->dc_sink_id_count = 0; 975 dc_ctx->dc_stream_id_count = 0; 976 dc_ctx->dce_environment = init_params->dce_environment; 977 dc_ctx->dcn_reg_offsets = init_params->dcn_reg_offsets; 978 dc_ctx->nbio_reg_offsets = init_params->nbio_reg_offsets; 979 dc_ctx->clk_reg_offsets = init_params->clk_reg_offsets; 980 981 /* Create logger */ 982 dc_ctx->logger = kmalloc(sizeof(*dc_ctx->logger), GFP_KERNEL); 983 984 if (!dc_ctx->logger) { 985 kfree(dc_ctx); 986 return false; 987 } 988 989 dc_ctx->logger->dev = adev_to_drm(init_params->driver); 990 dc->dml.logger = dc_ctx->logger; 991 992 dc_ctx->dce_version = resource_parse_asic_id(init_params->asic_id); 993 994 dc_ctx->perf_trace = dc_perf_trace_create(); 995 if (!dc_ctx->perf_trace) { 996 kfree(dc_ctx); 997 ASSERT_CRITICAL(false); 998 return false; 999 } 1000 1001 dc->ctx = dc_ctx; 1002 1003 dc->link_srv = link_create_link_service(); 1004 if (!dc->link_srv) 1005 return false; 1006 1007 return true; 1008 } 1009 1010 static bool dc_construct(struct dc *dc, 1011 const struct dc_init_data *init_params) 1012 { 1013 struct dc_context *dc_ctx; 1014 struct bw_calcs_dceip *dc_dceip; 1015 struct bw_calcs_vbios *dc_vbios; 1016 struct dcn_soc_bounding_box *dcn_soc; 1017 struct dcn_ip_params *dcn_ip; 1018 1019 dc->config = init_params->flags; 1020 1021 // Allocate memory for the vm_helper 1022 dc->vm_helper = kzalloc(sizeof(struct vm_helper), GFP_KERNEL); 1023 if (!dc->vm_helper) { 1024 dm_error("%s: failed to create dc->vm_helper\n", __func__); 1025 goto fail; 1026 } 1027 1028 memcpy(&dc->bb_overrides, &init_params->bb_overrides, sizeof(dc->bb_overrides)); 1029 1030 dc_dceip = kzalloc(sizeof(*dc_dceip), GFP_KERNEL); 1031 if (!dc_dceip) { 1032 dm_error("%s: failed to create dceip\n", __func__); 1033 goto fail; 1034 } 1035 1036 dc->bw_dceip = dc_dceip; 1037 1038 dc_vbios = kzalloc(sizeof(*dc_vbios), GFP_KERNEL); 1039 if (!dc_vbios) { 1040 dm_error("%s: failed to create vbios\n", __func__); 1041 goto fail; 1042 } 1043 1044 dc->bw_vbios = dc_vbios; 1045 dcn_soc = kzalloc(sizeof(*dcn_soc), GFP_KERNEL); 1046 if (!dcn_soc) { 1047 dm_error("%s: failed to create dcn_soc\n", __func__); 1048 goto fail; 1049 } 1050 1051 dc->dcn_soc = dcn_soc; 1052 1053 dcn_ip = kzalloc(sizeof(*dcn_ip), GFP_KERNEL); 1054 if (!dcn_ip) { 1055 dm_error("%s: failed to create dcn_ip\n", __func__); 1056 goto fail; 1057 } 1058 1059 dc->dcn_ip = dcn_ip; 1060 1061 if (init_params->bb_from_dmub) 1062 dc->dml2_options.bb_from_dmub = init_params->bb_from_dmub; 1063 else 1064 dc->dml2_options.bb_from_dmub = NULL; 1065 1066 if (!dc_construct_ctx(dc, init_params)) { 1067 dm_error("%s: failed to create ctx\n", __func__); 1068 goto fail; 1069 } 1070 1071 dc_ctx = dc->ctx; 1072 1073 /* Resource should construct all asic specific resources. 1074 * This should be the only place where we need to parse the asic id 1075 */ 1076 if (init_params->vbios_override) 1077 dc_ctx->dc_bios = init_params->vbios_override; 1078 else { 1079 /* Create BIOS parser */ 1080 struct bp_init_data bp_init_data; 1081 1082 bp_init_data.ctx = dc_ctx; 1083 bp_init_data.bios = init_params->asic_id.atombios_base_address; 1084 1085 dc_ctx->dc_bios = dal_bios_parser_create( 1086 &bp_init_data, dc_ctx->dce_version); 1087 1088 if (!dc_ctx->dc_bios) { 1089 ASSERT_CRITICAL(false); 1090 goto fail; 1091 } 1092 1093 dc_ctx->created_bios = true; 1094 } 1095 1096 dc->vendor_signature = init_params->vendor_signature; 1097 1098 /* Create GPIO service */ 1099 dc_ctx->gpio_service = dal_gpio_service_create( 1100 dc_ctx->dce_version, 1101 dc_ctx->dce_environment, 1102 dc_ctx); 1103 1104 if (!dc_ctx->gpio_service) { 1105 ASSERT_CRITICAL(false); 1106 goto fail; 1107 } 1108 1109 dc->res_pool = dc_create_resource_pool(dc, init_params, dc_ctx->dce_version); 1110 if (!dc->res_pool) 1111 goto fail; 1112 1113 /* set i2c speed if not done by the respective dcnxxx__resource.c */ 1114 if (dc->caps.i2c_speed_in_khz_hdcp == 0) 1115 dc->caps.i2c_speed_in_khz_hdcp = dc->caps.i2c_speed_in_khz; 1116 if (dc->caps.max_optimizable_video_width == 0) 1117 dc->caps.max_optimizable_video_width = 5120; 1118 dc->clk_mgr = dc_clk_mgr_create(dc->ctx, dc->res_pool->pp_smu, dc->res_pool->dccg); 1119 if (!dc->clk_mgr) 1120 goto fail; 1121 #ifdef CONFIG_DRM_AMD_DC_FP 1122 dc->clk_mgr->force_smu_not_present = init_params->force_smu_not_present; 1123 1124 if (dc->res_pool->funcs->update_bw_bounding_box) { 1125 DC_FP_START(); 1126 dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params); 1127 DC_FP_END(); 1128 } 1129 #endif 1130 1131 if (!create_links(dc, init_params->num_virtual_links)) 1132 goto fail; 1133 1134 /* Create additional DIG link encoder objects if fewer than the platform 1135 * supports were created during link construction. 1136 */ 1137 if (!create_link_encoders(dc)) 1138 goto fail; 1139 1140 /* Creation of current_state must occur after dc->dml 1141 * is initialized in dc_create_resource_pool because 1142 * on creation it copies the contents of dc->dml 1143 */ 1144 dc->current_state = dc_state_create(dc, NULL); 1145 1146 if (!dc->current_state) { 1147 dm_error("%s: failed to create validate ctx\n", __func__); 1148 goto fail; 1149 } 1150 1151 return true; 1152 1153 fail: 1154 return false; 1155 } 1156 1157 static void disable_all_writeback_pipes_for_stream( 1158 const struct dc *dc, 1159 struct dc_stream_state *stream, 1160 struct dc_state *context) 1161 { 1162 int i; 1163 1164 for (i = 0; i < stream->num_wb_info; i++) 1165 stream->writeback_info[i].wb_enabled = false; 1166 } 1167 1168 static void apply_ctx_interdependent_lock(struct dc *dc, 1169 struct dc_state *context, 1170 struct dc_stream_state *stream, 1171 bool lock) 1172 { 1173 int i; 1174 1175 /* Checks if interdependent update function pointer is NULL or not, takes care of DCE110 case */ 1176 if (dc->hwss.interdependent_update_lock) 1177 dc->hwss.interdependent_update_lock(dc, context, lock); 1178 else { 1179 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1180 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 1181 struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; 1182 1183 // Copied conditions that were previously in dce110_apply_ctx_for_surface 1184 if (stream == pipe_ctx->stream) { 1185 if (resource_is_pipe_type(pipe_ctx, OPP_HEAD) && 1186 (pipe_ctx->plane_state || old_pipe_ctx->plane_state)) 1187 dc->hwss.pipe_control_lock(dc, pipe_ctx, lock); 1188 } 1189 } 1190 } 1191 } 1192 1193 static void dc_update_visual_confirm_color(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx) 1194 { 1195 if (dc->ctx->dce_version >= DCN_VERSION_1_0) { 1196 memset(&pipe_ctx->visual_confirm_color, 0, sizeof(struct tg_color)); 1197 1198 if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR) 1199 get_hdr_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color)); 1200 else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE) 1201 get_surface_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color)); 1202 else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SWIZZLE) 1203 get_surface_tile_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color)); 1204 else if (dc->debug.visual_confirm == VISUAL_CONFIRM_HW_CURSOR) 1205 get_cursor_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color)); 1206 else if (dc->debug.visual_confirm == VISUAL_CONFIRM_DCC) 1207 get_dcc_visual_confirm_color(dc, pipe_ctx, &(pipe_ctx->visual_confirm_color)); 1208 else { 1209 if (dc->ctx->dce_version < DCN_VERSION_2_0) 1210 color_space_to_black_color( 1211 dc, pipe_ctx->stream->output_color_space, &(pipe_ctx->visual_confirm_color)); 1212 } 1213 if (dc->ctx->dce_version >= DCN_VERSION_2_0) { 1214 if (dc->debug.visual_confirm == VISUAL_CONFIRM_MPCTREE) 1215 get_mpctree_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color)); 1216 else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SUBVP) 1217 get_subvp_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color)); 1218 else if (dc->debug.visual_confirm == VISUAL_CONFIRM_MCLK_SWITCH) 1219 get_mclk_switch_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color)); 1220 else if (dc->debug.visual_confirm == VISUAL_CONFIRM_FAMS2) 1221 get_fams2_visual_confirm_color(dc, context, pipe_ctx, &(pipe_ctx->visual_confirm_color)); 1222 else if (dc->debug.visual_confirm == VISUAL_CONFIRM_VABC) 1223 get_vabc_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color)); 1224 } 1225 } 1226 } 1227 1228 static void disable_dangling_plane(struct dc *dc, struct dc_state *context) 1229 { 1230 int i, j; 1231 struct dc_state *dangling_context = dc_state_create_current_copy(dc); 1232 struct dc_state *current_ctx; 1233 struct pipe_ctx *pipe; 1234 struct timing_generator *tg; 1235 1236 if (dangling_context == NULL) 1237 return; 1238 1239 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1240 struct dc_stream_state *old_stream = 1241 dc->current_state->res_ctx.pipe_ctx[i].stream; 1242 bool should_disable = true; 1243 bool pipe_split_change = false; 1244 1245 if ((context->res_ctx.pipe_ctx[i].top_pipe) && 1246 (dc->current_state->res_ctx.pipe_ctx[i].top_pipe)) 1247 pipe_split_change = context->res_ctx.pipe_ctx[i].top_pipe->pipe_idx != 1248 dc->current_state->res_ctx.pipe_ctx[i].top_pipe->pipe_idx; 1249 else 1250 pipe_split_change = context->res_ctx.pipe_ctx[i].top_pipe != 1251 dc->current_state->res_ctx.pipe_ctx[i].top_pipe; 1252 1253 for (j = 0; j < context->stream_count; j++) { 1254 if (old_stream == context->streams[j]) { 1255 should_disable = false; 1256 break; 1257 } 1258 } 1259 if (!should_disable && pipe_split_change && 1260 dc->current_state->stream_count != context->stream_count) 1261 should_disable = true; 1262 1263 if (old_stream && !dc->current_state->res_ctx.pipe_ctx[i].top_pipe && 1264 !dc->current_state->res_ctx.pipe_ctx[i].prev_odm_pipe) { 1265 struct pipe_ctx *old_pipe, *new_pipe; 1266 1267 old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 1268 new_pipe = &context->res_ctx.pipe_ctx[i]; 1269 1270 if (old_pipe->plane_state && !new_pipe->plane_state) 1271 should_disable = true; 1272 } 1273 1274 if (should_disable && old_stream) { 1275 bool is_phantom = dc_state_get_stream_subvp_type(dc->current_state, old_stream) == SUBVP_PHANTOM; 1276 pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 1277 tg = pipe->stream_res.tg; 1278 /* When disabling plane for a phantom pipe, we must turn on the 1279 * phantom OTG so the disable programming gets the double buffer 1280 * update. Otherwise the pipe will be left in a partially disabled 1281 * state that can result in underflow or hang when enabling it 1282 * again for different use. 1283 */ 1284 if (is_phantom) { 1285 if (tg->funcs->enable_crtc) { 1286 if (dc->hwseq->funcs.blank_pixel_data) 1287 dc->hwseq->funcs.blank_pixel_data(dc, pipe, true); 1288 tg->funcs->enable_crtc(tg); 1289 } 1290 } 1291 1292 if (is_phantom) 1293 dc_state_rem_all_phantom_planes_for_stream(dc, old_stream, dangling_context, true); 1294 else 1295 dc_state_rem_all_planes_for_stream(dc, old_stream, dangling_context); 1296 disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context); 1297 1298 if (pipe->stream && pipe->plane_state) { 1299 if (!dc->debug.using_dml2) 1300 set_p_state_switch_method(dc, context, pipe); 1301 dc_update_visual_confirm_color(dc, context, pipe); 1302 } 1303 1304 if (dc->hwss.apply_ctx_for_surface) { 1305 apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, true); 1306 dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context); 1307 apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, false); 1308 dc->hwss.post_unlock_program_front_end(dc, dangling_context); 1309 } 1310 1311 if (dc->res_pool->funcs->prepare_mcache_programming) 1312 dc->res_pool->funcs->prepare_mcache_programming(dc, dangling_context); 1313 if (dc->hwss.program_front_end_for_ctx) { 1314 dc->hwss.interdependent_update_lock(dc, dc->current_state, true); 1315 dc->hwss.program_front_end_for_ctx(dc, dangling_context); 1316 dc->hwss.interdependent_update_lock(dc, dc->current_state, false); 1317 dc->hwss.post_unlock_program_front_end(dc, dangling_context); 1318 } 1319 /* We need to put the phantom OTG back into it's default (disabled) state or we 1320 * can get corruption when transition from one SubVP config to a different one. 1321 * The OTG is set to disable on falling edge of VUPDATE so the plane disable 1322 * will still get it's double buffer update. 1323 */ 1324 if (is_phantom) { 1325 if (tg->funcs->disable_phantom_crtc) 1326 tg->funcs->disable_phantom_crtc(tg); 1327 } 1328 } 1329 } 1330 1331 current_ctx = dc->current_state; 1332 dc->current_state = dangling_context; 1333 dc_state_release(current_ctx); 1334 } 1335 1336 static void disable_vbios_mode_if_required( 1337 struct dc *dc, 1338 struct dc_state *context) 1339 { 1340 unsigned int i, j; 1341 1342 /* check if timing_changed, disable stream*/ 1343 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1344 struct dc_stream_state *stream = NULL; 1345 struct dc_link *link = NULL; 1346 struct pipe_ctx *pipe = NULL; 1347 1348 pipe = &context->res_ctx.pipe_ctx[i]; 1349 stream = pipe->stream; 1350 if (stream == NULL) 1351 continue; 1352 1353 if (stream->apply_seamless_boot_optimization) 1354 continue; 1355 1356 // only looking for first odm pipe 1357 if (pipe->prev_odm_pipe) 1358 continue; 1359 1360 if (stream->link->local_sink && 1361 stream->link->local_sink->sink_signal == SIGNAL_TYPE_EDP) { 1362 link = stream->link; 1363 } 1364 1365 if (link != NULL && link->link_enc->funcs->is_dig_enabled(link->link_enc)) { 1366 unsigned int enc_inst, tg_inst = 0; 1367 unsigned int pix_clk_100hz = 0; 1368 1369 enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc); 1370 if (enc_inst != ENGINE_ID_UNKNOWN) { 1371 for (j = 0; j < dc->res_pool->stream_enc_count; j++) { 1372 if (dc->res_pool->stream_enc[j]->id == enc_inst) { 1373 tg_inst = dc->res_pool->stream_enc[j]->funcs->dig_source_otg( 1374 dc->res_pool->stream_enc[j]); 1375 break; 1376 } 1377 } 1378 1379 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz( 1380 dc->res_pool->dp_clock_source, 1381 tg_inst, &pix_clk_100hz); 1382 1383 if (link->link_status.link_active) { 1384 uint32_t requested_pix_clk_100hz = 1385 pipe->stream_res.pix_clk_params.requested_pix_clk_100hz; 1386 1387 if (pix_clk_100hz != requested_pix_clk_100hz) { 1388 dc->link_srv->set_dpms_off(pipe); 1389 pipe->stream->dpms_off = false; 1390 } 1391 } 1392 } 1393 } 1394 } 1395 } 1396 1397 /* Public functions */ 1398 1399 struct dc *dc_create(const struct dc_init_data *init_params) 1400 { 1401 struct dc *dc = kzalloc(sizeof(*dc), GFP_KERNEL); 1402 unsigned int full_pipe_count; 1403 1404 if (!dc) 1405 return NULL; 1406 1407 if (init_params->dce_environment == DCE_ENV_VIRTUAL_HW) { 1408 dc->caps.linear_pitch_alignment = 64; 1409 if (!dc_construct_ctx(dc, init_params)) 1410 goto destruct_dc; 1411 } else { 1412 if (!dc_construct(dc, init_params)) 1413 goto destruct_dc; 1414 1415 full_pipe_count = dc->res_pool->pipe_count; 1416 if (dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE) 1417 full_pipe_count--; 1418 dc->caps.max_streams = min( 1419 full_pipe_count, 1420 dc->res_pool->stream_enc_count); 1421 1422 dc->caps.max_links = dc->link_count; 1423 dc->caps.max_audios = dc->res_pool->audio_count; 1424 dc->caps.linear_pitch_alignment = 64; 1425 1426 dc->caps.max_dp_protocol_version = DP_VERSION_1_4; 1427 1428 dc->caps.max_otg_num = dc->res_pool->res_cap->num_timing_generator; 1429 1430 if (dc->res_pool->dmcu != NULL) 1431 dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version; 1432 } 1433 1434 dc->dcn_reg_offsets = init_params->dcn_reg_offsets; 1435 dc->nbio_reg_offsets = init_params->nbio_reg_offsets; 1436 dc->clk_reg_offsets = init_params->clk_reg_offsets; 1437 1438 /* Populate versioning information */ 1439 dc->versions.dc_ver = DC_VER; 1440 1441 dc->build_id = DC_BUILD_ID; 1442 1443 DC_LOG_DC("Display Core initialized\n"); 1444 1445 return dc; 1446 1447 destruct_dc: 1448 dc_destruct(dc); 1449 kfree(dc); 1450 return NULL; 1451 } 1452 1453 static void detect_edp_presence(struct dc *dc) 1454 { 1455 struct dc_link *edp_links[MAX_NUM_EDP]; 1456 struct dc_link *edp_link = NULL; 1457 enum dc_connection_type type; 1458 int i; 1459 int edp_num; 1460 1461 dc_get_edp_links(dc, edp_links, &edp_num); 1462 if (!edp_num) 1463 return; 1464 1465 for (i = 0; i < edp_num; i++) { 1466 edp_link = edp_links[i]; 1467 if (dc->config.edp_not_connected) { 1468 edp_link->edp_sink_present = false; 1469 } else { 1470 dc_link_detect_connection_type(edp_link, &type); 1471 edp_link->edp_sink_present = (type != dc_connection_none); 1472 } 1473 } 1474 } 1475 1476 void dc_hardware_init(struct dc *dc) 1477 { 1478 1479 detect_edp_presence(dc); 1480 if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW) 1481 dc->hwss.init_hw(dc); 1482 dc_dmub_srv_notify_fw_dc_power_state(dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D0); 1483 } 1484 1485 void dc_init_callbacks(struct dc *dc, 1486 const struct dc_callback_init *init_params) 1487 { 1488 dc->ctx->cp_psp = init_params->cp_psp; 1489 } 1490 1491 void dc_deinit_callbacks(struct dc *dc) 1492 { 1493 memset(&dc->ctx->cp_psp, 0, sizeof(dc->ctx->cp_psp)); 1494 } 1495 1496 void dc_destroy(struct dc **dc) 1497 { 1498 dc_destruct(*dc); 1499 kfree(*dc); 1500 *dc = NULL; 1501 } 1502 1503 static void enable_timing_multisync( 1504 struct dc *dc, 1505 struct dc_state *ctx) 1506 { 1507 int i, multisync_count = 0; 1508 int pipe_count = dc->res_pool->pipe_count; 1509 struct pipe_ctx *multisync_pipes[MAX_PIPES] = { NULL }; 1510 1511 for (i = 0; i < pipe_count; i++) { 1512 if (!ctx->res_ctx.pipe_ctx[i].stream || 1513 !ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.enabled) 1514 continue; 1515 if (ctx->res_ctx.pipe_ctx[i].stream == ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.event_source) 1516 continue; 1517 multisync_pipes[multisync_count] = &ctx->res_ctx.pipe_ctx[i]; 1518 multisync_count++; 1519 } 1520 1521 if (multisync_count > 0) { 1522 dc->hwss.enable_per_frame_crtc_position_reset( 1523 dc, multisync_count, multisync_pipes); 1524 } 1525 } 1526 1527 static void program_timing_sync( 1528 struct dc *dc, 1529 struct dc_state *ctx) 1530 { 1531 int i, j, k; 1532 int group_index = 0; 1533 int num_group = 0; 1534 int pipe_count = dc->res_pool->pipe_count; 1535 struct pipe_ctx *unsynced_pipes[MAX_PIPES] = { NULL }; 1536 1537 for (i = 0; i < pipe_count; i++) { 1538 if (!ctx->res_ctx.pipe_ctx[i].stream 1539 || ctx->res_ctx.pipe_ctx[i].top_pipe 1540 || ctx->res_ctx.pipe_ctx[i].prev_odm_pipe) 1541 continue; 1542 1543 unsynced_pipes[i] = &ctx->res_ctx.pipe_ctx[i]; 1544 } 1545 1546 for (i = 0; i < pipe_count; i++) { 1547 int group_size = 1; 1548 enum timing_synchronization_type sync_type = NOT_SYNCHRONIZABLE; 1549 struct pipe_ctx *pipe_set[MAX_PIPES]; 1550 1551 if (!unsynced_pipes[i]) 1552 continue; 1553 1554 pipe_set[0] = unsynced_pipes[i]; 1555 unsynced_pipes[i] = NULL; 1556 1557 /* Add tg to the set, search rest of the tg's for ones with 1558 * same timing, add all tgs with same timing to the group 1559 */ 1560 for (j = i + 1; j < pipe_count; j++) { 1561 if (!unsynced_pipes[j]) 1562 continue; 1563 if (sync_type != TIMING_SYNCHRONIZABLE && 1564 dc->hwss.enable_vblanks_synchronization && 1565 unsynced_pipes[j]->stream_res.tg->funcs->align_vblanks && 1566 resource_are_vblanks_synchronizable( 1567 unsynced_pipes[j]->stream, 1568 pipe_set[0]->stream)) { 1569 sync_type = VBLANK_SYNCHRONIZABLE; 1570 pipe_set[group_size] = unsynced_pipes[j]; 1571 unsynced_pipes[j] = NULL; 1572 group_size++; 1573 } else 1574 if (sync_type != VBLANK_SYNCHRONIZABLE && 1575 resource_are_streams_timing_synchronizable( 1576 unsynced_pipes[j]->stream, 1577 pipe_set[0]->stream)) { 1578 sync_type = TIMING_SYNCHRONIZABLE; 1579 pipe_set[group_size] = unsynced_pipes[j]; 1580 unsynced_pipes[j] = NULL; 1581 group_size++; 1582 } 1583 } 1584 1585 /* set first unblanked pipe as master */ 1586 for (j = 0; j < group_size; j++) { 1587 bool is_blanked; 1588 1589 if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked) 1590 is_blanked = 1591 pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp); 1592 else 1593 is_blanked = 1594 pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg); 1595 if (!is_blanked) { 1596 if (j == 0) 1597 break; 1598 1599 swap(pipe_set[0], pipe_set[j]); 1600 break; 1601 } 1602 } 1603 1604 for (k = 0; k < group_size; k++) { 1605 struct dc_stream_status *status = dc_state_get_stream_status(ctx, pipe_set[k]->stream); 1606 1607 if (!status) 1608 continue; 1609 1610 status->timing_sync_info.group_id = num_group; 1611 status->timing_sync_info.group_size = group_size; 1612 if (k == 0) 1613 status->timing_sync_info.master = true; 1614 else 1615 status->timing_sync_info.master = false; 1616 1617 } 1618 1619 /* remove any other unblanked pipes as they have already been synced */ 1620 if (dc->config.use_pipe_ctx_sync_logic) { 1621 /* check pipe's syncd to decide which pipe to be removed */ 1622 for (j = 1; j < group_size; j++) { 1623 if (pipe_set[j]->pipe_idx_syncd == pipe_set[0]->pipe_idx_syncd) { 1624 group_size--; 1625 pipe_set[j] = pipe_set[group_size]; 1626 j--; 1627 } else 1628 /* link slave pipe's syncd with master pipe */ 1629 pipe_set[j]->pipe_idx_syncd = pipe_set[0]->pipe_idx_syncd; 1630 } 1631 } else { 1632 /* remove any other pipes by checking valid plane */ 1633 for (j = j + 1; j < group_size; j++) { 1634 bool is_blanked; 1635 1636 if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked) 1637 is_blanked = 1638 pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp); 1639 else 1640 is_blanked = 1641 pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg); 1642 if (!is_blanked) { 1643 group_size--; 1644 pipe_set[j] = pipe_set[group_size]; 1645 j--; 1646 } 1647 } 1648 } 1649 1650 if (group_size > 1) { 1651 if (sync_type == TIMING_SYNCHRONIZABLE) { 1652 dc->hwss.enable_timing_synchronization( 1653 dc, ctx, group_index, group_size, pipe_set); 1654 } else 1655 if (sync_type == VBLANK_SYNCHRONIZABLE) { 1656 dc->hwss.enable_vblanks_synchronization( 1657 dc, group_index, group_size, pipe_set); 1658 } 1659 group_index++; 1660 } 1661 num_group++; 1662 } 1663 } 1664 1665 static bool streams_changed(struct dc *dc, 1666 struct dc_stream_state *streams[], 1667 uint8_t stream_count) 1668 { 1669 uint8_t i; 1670 1671 if (stream_count != dc->current_state->stream_count) 1672 return true; 1673 1674 for (i = 0; i < dc->current_state->stream_count; i++) { 1675 if (dc->current_state->streams[i] != streams[i]) 1676 return true; 1677 if (!streams[i]->link->link_state_valid) 1678 return true; 1679 } 1680 1681 return false; 1682 } 1683 1684 bool dc_validate_boot_timing(const struct dc *dc, 1685 const struct dc_sink *sink, 1686 struct dc_crtc_timing *crtc_timing) 1687 { 1688 struct timing_generator *tg; 1689 struct stream_encoder *se = NULL; 1690 1691 struct dc_crtc_timing hw_crtc_timing = {0}; 1692 1693 struct dc_link *link = sink->link; 1694 unsigned int i, enc_inst, tg_inst = 0; 1695 1696 /* Support seamless boot on EDP displays only */ 1697 if (sink->sink_signal != SIGNAL_TYPE_EDP) { 1698 return false; 1699 } 1700 1701 if (dc->debug.force_odm_combine) { 1702 DC_LOG_DEBUG("boot timing validation failed due to force_odm_combine\n"); 1703 return false; 1704 } 1705 1706 /* Check for enabled DIG to identify enabled display */ 1707 if (!link->link_enc->funcs->is_dig_enabled(link->link_enc)) { 1708 DC_LOG_DEBUG("boot timing validation failed due to disabled DIG\n"); 1709 return false; 1710 } 1711 1712 enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc); 1713 1714 if (enc_inst == ENGINE_ID_UNKNOWN) { 1715 DC_LOG_DEBUG("boot timing validation failed due to unknown DIG engine ID\n"); 1716 return false; 1717 } 1718 1719 for (i = 0; i < dc->res_pool->stream_enc_count; i++) { 1720 if (dc->res_pool->stream_enc[i]->id == enc_inst) { 1721 1722 se = dc->res_pool->stream_enc[i]; 1723 1724 tg_inst = dc->res_pool->stream_enc[i]->funcs->dig_source_otg( 1725 dc->res_pool->stream_enc[i]); 1726 break; 1727 } 1728 } 1729 1730 // tg_inst not found 1731 if (i == dc->res_pool->stream_enc_count) { 1732 DC_LOG_DEBUG("boot timing validation failed due to timing generator instance not found\n"); 1733 return false; 1734 } 1735 1736 if (tg_inst >= dc->res_pool->timing_generator_count) { 1737 DC_LOG_DEBUG("boot timing validation failed due to invalid timing generator count\n"); 1738 return false; 1739 } 1740 1741 if (tg_inst != link->link_enc->preferred_engine) { 1742 DC_LOG_DEBUG("boot timing validation failed due to non-preferred timing generator\n"); 1743 return false; 1744 } 1745 1746 tg = dc->res_pool->timing_generators[tg_inst]; 1747 1748 if (!tg->funcs->get_hw_timing) { 1749 DC_LOG_DEBUG("boot timing validation failed due to missing get_hw_timing callback\n"); 1750 return false; 1751 } 1752 1753 if (!tg->funcs->get_hw_timing(tg, &hw_crtc_timing)) { 1754 DC_LOG_DEBUG("boot timing validation failed due to failed get_hw_timing return\n"); 1755 return false; 1756 } 1757 1758 if (crtc_timing->h_total != hw_crtc_timing.h_total) { 1759 DC_LOG_DEBUG("boot timing validation failed due to h_total mismatch\n"); 1760 return false; 1761 } 1762 1763 if (crtc_timing->h_border_left != hw_crtc_timing.h_border_left) { 1764 DC_LOG_DEBUG("boot timing validation failed due to h_border_left mismatch\n"); 1765 return false; 1766 } 1767 1768 if (crtc_timing->h_addressable != hw_crtc_timing.h_addressable) { 1769 DC_LOG_DEBUG("boot timing validation failed due to h_addressable mismatch\n"); 1770 return false; 1771 } 1772 1773 if (crtc_timing->h_border_right != hw_crtc_timing.h_border_right) { 1774 DC_LOG_DEBUG("boot timing validation failed due to h_border_right mismatch\n"); 1775 return false; 1776 } 1777 1778 if (crtc_timing->h_front_porch != hw_crtc_timing.h_front_porch) { 1779 DC_LOG_DEBUG("boot timing validation failed due to h_front_porch mismatch\n"); 1780 return false; 1781 } 1782 1783 if (crtc_timing->h_sync_width != hw_crtc_timing.h_sync_width) { 1784 DC_LOG_DEBUG("boot timing validation failed due to h_sync_width mismatch\n"); 1785 return false; 1786 } 1787 1788 if (crtc_timing->v_total != hw_crtc_timing.v_total) { 1789 DC_LOG_DEBUG("boot timing validation failed due to v_total mismatch\n"); 1790 return false; 1791 } 1792 1793 if (crtc_timing->v_border_top != hw_crtc_timing.v_border_top) { 1794 DC_LOG_DEBUG("boot timing validation failed due to v_border_top mismatch\n"); 1795 return false; 1796 } 1797 1798 if (crtc_timing->v_addressable != hw_crtc_timing.v_addressable) { 1799 DC_LOG_DEBUG("boot timing validation failed due to v_addressable mismatch\n"); 1800 return false; 1801 } 1802 1803 if (crtc_timing->v_border_bottom != hw_crtc_timing.v_border_bottom) { 1804 DC_LOG_DEBUG("boot timing validation failed due to v_border_bottom mismatch\n"); 1805 return false; 1806 } 1807 1808 if (crtc_timing->v_front_porch != hw_crtc_timing.v_front_porch) { 1809 DC_LOG_DEBUG("boot timing validation failed due to v_front_porch mismatch\n"); 1810 return false; 1811 } 1812 1813 if (crtc_timing->v_sync_width != hw_crtc_timing.v_sync_width) { 1814 DC_LOG_DEBUG("boot timing validation failed due to v_sync_width mismatch\n"); 1815 return false; 1816 } 1817 1818 /* block DSC for now, as VBIOS does not currently support DSC timings */ 1819 if (crtc_timing->flags.DSC) { 1820 DC_LOG_DEBUG("boot timing validation failed due to DSC\n"); 1821 return false; 1822 } 1823 1824 if (dc_is_dp_signal(link->connector_signal)) { 1825 unsigned int pix_clk_100hz = 0; 1826 uint32_t numOdmPipes = 1; 1827 uint32_t id_src[4] = {0}; 1828 1829 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz( 1830 dc->res_pool->dp_clock_source, 1831 tg_inst, &pix_clk_100hz); 1832 1833 if (tg->funcs->get_optc_source) 1834 tg->funcs->get_optc_source(tg, 1835 &numOdmPipes, &id_src[0], &id_src[1]); 1836 1837 if (numOdmPipes == 2) { 1838 pix_clk_100hz *= 2; 1839 } else if (numOdmPipes == 4) { 1840 pix_clk_100hz *= 4; 1841 } else if (se && se->funcs->get_pixels_per_cycle) { 1842 uint32_t pixels_per_cycle = se->funcs->get_pixels_per_cycle(se); 1843 1844 if (pixels_per_cycle != 1 && !dc->debug.enable_dp_dig_pixel_rate_div_policy) { 1845 DC_LOG_DEBUG("boot timing validation failed due to pixels_per_cycle\n"); 1846 return false; 1847 } 1848 1849 pix_clk_100hz *= pixels_per_cycle; 1850 } 1851 1852 // Note: In rare cases, HW pixclk may differ from crtc's pixclk 1853 // slightly due to rounding issues in 10 kHz units. 1854 if (crtc_timing->pix_clk_100hz != pix_clk_100hz) { 1855 DC_LOG_DEBUG("boot timing validation failed due to pix_clk_100hz mismatch\n"); 1856 return false; 1857 } 1858 1859 if (!se || !se->funcs->dp_get_pixel_format) { 1860 DC_LOG_DEBUG("boot timing validation failed due to missing dp_get_pixel_format\n"); 1861 return false; 1862 } 1863 1864 if (!se->funcs->dp_get_pixel_format( 1865 se, 1866 &hw_crtc_timing.pixel_encoding, 1867 &hw_crtc_timing.display_color_depth)) { 1868 DC_LOG_DEBUG("boot timing validation failed due to dp_get_pixel_format failure\n"); 1869 return false; 1870 } 1871 1872 if (hw_crtc_timing.display_color_depth != crtc_timing->display_color_depth) { 1873 DC_LOG_DEBUG("boot timing validation failed due to display_color_depth mismatch\n"); 1874 return false; 1875 } 1876 1877 if (hw_crtc_timing.pixel_encoding != crtc_timing->pixel_encoding) { 1878 DC_LOG_DEBUG("boot timing validation failed due to pixel_encoding mismatch\n"); 1879 return false; 1880 } 1881 } 1882 1883 1884 if (link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) { 1885 DC_LOG_DEBUG("boot timing validation failed due to VSC SDP colorimetry\n"); 1886 return false; 1887 } 1888 1889 if (link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED) { 1890 DC_LOG_DEBUG("boot timing validation failed due to DP 128b/132b\n"); 1891 return false; 1892 } 1893 1894 if (dc->link_srv->edp_is_ilr_optimization_required(link, crtc_timing)) { 1895 DC_LOG_EVENT_LINK_TRAINING("Seamless boot disabled to optimize eDP link rate\n"); 1896 return false; 1897 } 1898 1899 return true; 1900 } 1901 1902 static inline bool should_update_pipe_for_stream( 1903 struct dc_state *context, 1904 struct pipe_ctx *pipe_ctx, 1905 struct dc_stream_state *stream) 1906 { 1907 return (pipe_ctx->stream && pipe_ctx->stream == stream); 1908 } 1909 1910 static inline bool should_update_pipe_for_plane( 1911 struct dc_state *context, 1912 struct pipe_ctx *pipe_ctx, 1913 struct dc_plane_state *plane_state) 1914 { 1915 return (pipe_ctx->plane_state == plane_state); 1916 } 1917 1918 void dc_enable_stereo( 1919 struct dc *dc, 1920 struct dc_state *context, 1921 struct dc_stream_state *streams[], 1922 uint8_t stream_count) 1923 { 1924 int i, j; 1925 struct pipe_ctx *pipe; 1926 1927 dc_exit_ips_for_hw_access(dc); 1928 1929 for (i = 0; i < MAX_PIPES; i++) { 1930 if (context != NULL) { 1931 pipe = &context->res_ctx.pipe_ctx[i]; 1932 } else { 1933 context = dc->current_state; 1934 pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 1935 } 1936 1937 for (j = 0; pipe && j < stream_count; j++) { 1938 if (should_update_pipe_for_stream(context, pipe, streams[j]) && 1939 dc->hwss.setup_stereo) 1940 dc->hwss.setup_stereo(pipe, dc); 1941 } 1942 } 1943 } 1944 1945 void dc_trigger_sync(struct dc *dc, struct dc_state *context) 1946 { 1947 if (context->stream_count > 1 && !dc->debug.disable_timing_sync) { 1948 dc_exit_ips_for_hw_access(dc); 1949 1950 enable_timing_multisync(dc, context); 1951 program_timing_sync(dc, context); 1952 } 1953 } 1954 1955 static uint8_t get_stream_mask(struct dc *dc, struct dc_state *context) 1956 { 1957 int i; 1958 unsigned int stream_mask = 0; 1959 1960 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1961 if (context->res_ctx.pipe_ctx[i].stream) 1962 stream_mask |= 1 << i; 1963 } 1964 1965 return stream_mask; 1966 } 1967 1968 void dc_z10_restore(const struct dc *dc) 1969 { 1970 if (dc->hwss.z10_restore) 1971 dc->hwss.z10_restore(dc); 1972 } 1973 1974 void dc_z10_save_init(struct dc *dc) 1975 { 1976 if (dc->hwss.z10_save_init) 1977 dc->hwss.z10_save_init(dc); 1978 } 1979 1980 /* Set a pipe unlock order based on the change in DET allocation and stores it in dc scratch memory 1981 * Prevents over allocation of DET during unlock process 1982 * e.g. 2 pipe config with different streams with a max of 20 DET segments 1983 * Before: After: 1984 * - Pipe0: 10 DET segments - Pipe0: 12 DET segments 1985 * - Pipe1: 10 DET segments - Pipe1: 8 DET segments 1986 * If Pipe0 gets updated first, 22 DET segments will be allocated 1987 */ 1988 static void determine_pipe_unlock_order(struct dc *dc, struct dc_state *context) 1989 { 1990 unsigned int i = 0; 1991 struct pipe_ctx *pipe = NULL; 1992 struct timing_generator *tg = NULL; 1993 1994 if (!dc->config.set_pipe_unlock_order) 1995 return; 1996 1997 memset(dc->scratch.pipes_to_unlock_first, 0, sizeof(dc->scratch.pipes_to_unlock_first)); 1998 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1999 pipe = &context->res_ctx.pipe_ctx[i]; 2000 tg = pipe->stream_res.tg; 2001 2002 if (!resource_is_pipe_type(pipe, OTG_MASTER) || 2003 !tg->funcs->is_tg_enabled(tg) || 2004 dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) { 2005 continue; 2006 } 2007 2008 if (resource_calculate_det_for_stream(context, pipe) < 2009 resource_calculate_det_for_stream(dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i])) { 2010 dc->scratch.pipes_to_unlock_first[i] = true; 2011 } 2012 } 2013 } 2014 2015 /** 2016 * dc_commit_state_no_check - Apply context to the hardware 2017 * 2018 * @dc: DC object with the current status to be updated 2019 * @context: New state that will become the current status at the end of this function 2020 * 2021 * Applies given context to the hardware and copy it into current context. 2022 * It's up to the user to release the src context afterwards. 2023 * 2024 * Return: an enum dc_status result code for the operation 2025 */ 2026 static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *context) 2027 { 2028 struct dc_bios *dcb = dc->ctx->dc_bios; 2029 enum dc_status result = DC_ERROR_UNEXPECTED; 2030 struct pipe_ctx *pipe; 2031 int i, k, l; 2032 struct dc_stream_state *dc_streams[MAX_STREAMS] = {0}; 2033 struct dc_state *old_state; 2034 bool subvp_prev_use = false; 2035 2036 dc_z10_restore(dc); 2037 dc_allow_idle_optimizations(dc, false); 2038 2039 for (i = 0; i < dc->res_pool->pipe_count; i++) { 2040 struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 2041 2042 /* Check old context for SubVP */ 2043 subvp_prev_use |= (dc_state_get_pipe_subvp_type(dc->current_state, old_pipe) == SUBVP_PHANTOM); 2044 if (subvp_prev_use) 2045 break; 2046 } 2047 2048 for (i = 0; i < context->stream_count; i++) 2049 dc_streams[i] = context->streams[i]; 2050 2051 if (!dcb->funcs->is_accelerated_mode(dcb)) { 2052 disable_vbios_mode_if_required(dc, context); 2053 dc->hwss.enable_accelerated_mode(dc, context); 2054 } 2055 2056 if (context->stream_count > get_seamless_boot_stream_count(context) || 2057 context->stream_count == 0) 2058 dc->hwss.prepare_bandwidth(dc, context); 2059 2060 /* When SubVP is active, all HW programming must be done while 2061 * SubVP lock is acquired 2062 */ 2063 if (dc->hwss.subvp_pipe_control_lock) 2064 dc->hwss.subvp_pipe_control_lock(dc, context, true, true, NULL, subvp_prev_use); 2065 if (dc->hwss.fams2_global_control_lock) 2066 dc->hwss.fams2_global_control_lock(dc, context, true); 2067 2068 if (dc->hwss.update_dsc_pg) 2069 dc->hwss.update_dsc_pg(dc, context, false); 2070 2071 disable_dangling_plane(dc, context); 2072 /* re-program planes for existing stream, in case we need to 2073 * free up plane resource for later use 2074 */ 2075 if (dc->hwss.apply_ctx_for_surface) { 2076 for (i = 0; i < context->stream_count; i++) { 2077 if (context->streams[i]->mode_changed) 2078 continue; 2079 apply_ctx_interdependent_lock(dc, context, context->streams[i], true); 2080 dc->hwss.apply_ctx_for_surface( 2081 dc, context->streams[i], 2082 context->stream_status[i].plane_count, 2083 context); /* use new pipe config in new context */ 2084 apply_ctx_interdependent_lock(dc, context, context->streams[i], false); 2085 dc->hwss.post_unlock_program_front_end(dc, context); 2086 } 2087 } 2088 2089 /* Program hardware */ 2090 for (i = 0; i < dc->res_pool->pipe_count; i++) { 2091 pipe = &context->res_ctx.pipe_ctx[i]; 2092 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe); 2093 } 2094 2095 result = dc->hwss.apply_ctx_to_hw(dc, context); 2096 2097 if (result != DC_OK) { 2098 /* Application of dc_state to hardware stopped. */ 2099 dc->current_state->res_ctx.link_enc_cfg_ctx.mode = LINK_ENC_CFG_STEADY; 2100 return result; 2101 } 2102 2103 dc_trigger_sync(dc, context); 2104 2105 /* Full update should unconditionally be triggered when dc_commit_state_no_check is called */ 2106 for (i = 0; i < context->stream_count; i++) { 2107 uint32_t prev_dsc_changed = context->streams[i]->update_flags.bits.dsc_changed; 2108 2109 context->streams[i]->update_flags.raw = 0xFFFFFFFF; 2110 context->streams[i]->update_flags.bits.dsc_changed = prev_dsc_changed; 2111 } 2112 2113 determine_pipe_unlock_order(dc, context); 2114 /* Program all planes within new context*/ 2115 if (dc->res_pool->funcs->prepare_mcache_programming) 2116 dc->res_pool->funcs->prepare_mcache_programming(dc, context); 2117 if (dc->hwss.program_front_end_for_ctx) { 2118 dc->hwss.interdependent_update_lock(dc, context, true); 2119 dc->hwss.program_front_end_for_ctx(dc, context); 2120 dc->hwss.interdependent_update_lock(dc, context, false); 2121 dc->hwss.post_unlock_program_front_end(dc, context); 2122 } 2123 2124 if (dc->hwss.commit_subvp_config) 2125 dc->hwss.commit_subvp_config(dc, context); 2126 if (dc->hwss.subvp_pipe_control_lock) 2127 dc->hwss.subvp_pipe_control_lock(dc, context, false, true, NULL, subvp_prev_use); 2128 if (dc->hwss.fams2_global_control_lock) 2129 dc->hwss.fams2_global_control_lock(dc, context, false); 2130 2131 for (i = 0; i < context->stream_count; i++) { 2132 const struct dc_link *link = context->streams[i]->link; 2133 2134 if (!context->streams[i]->mode_changed) 2135 continue; 2136 2137 if (dc->hwss.apply_ctx_for_surface) { 2138 apply_ctx_interdependent_lock(dc, context, context->streams[i], true); 2139 dc->hwss.apply_ctx_for_surface( 2140 dc, context->streams[i], 2141 context->stream_status[i].plane_count, 2142 context); 2143 apply_ctx_interdependent_lock(dc, context, context->streams[i], false); 2144 dc->hwss.post_unlock_program_front_end(dc, context); 2145 } 2146 2147 /* 2148 * enable stereo 2149 * TODO rework dc_enable_stereo call to work with validation sets? 2150 */ 2151 for (k = 0; k < MAX_PIPES; k++) { 2152 pipe = &context->res_ctx.pipe_ctx[k]; 2153 2154 for (l = 0 ; pipe && l < context->stream_count; l++) { 2155 if (context->streams[l] && 2156 context->streams[l] == pipe->stream && 2157 dc->hwss.setup_stereo) 2158 dc->hwss.setup_stereo(pipe, dc); 2159 } 2160 } 2161 2162 CONN_MSG_MODE(link, "{%dx%d, %dx%d@%dKhz}", 2163 context->streams[i]->timing.h_addressable, 2164 context->streams[i]->timing.v_addressable, 2165 context->streams[i]->timing.h_total, 2166 context->streams[i]->timing.v_total, 2167 context->streams[i]->timing.pix_clk_100hz / 10); 2168 } 2169 2170 dc_enable_stereo(dc, context, dc_streams, context->stream_count); 2171 2172 if (get_seamless_boot_stream_count(context) == 0 || 2173 context->stream_count == 0) { 2174 /* Must wait for no flips to be pending before doing optimize bw */ 2175 hwss_wait_for_no_pipes_pending(dc, context); 2176 /* 2177 * optimized dispclk depends on ODM setup. Need to wait for ODM 2178 * update pending complete before optimizing bandwidth. 2179 */ 2180 hwss_wait_for_odm_update_pending_complete(dc, context); 2181 /* pplib is notified if disp_num changed */ 2182 dc->hwss.optimize_bandwidth(dc, context); 2183 /* Need to do otg sync again as otg could be out of sync due to otg 2184 * workaround applied during clock update 2185 */ 2186 dc_trigger_sync(dc, context); 2187 } 2188 2189 if (dc->hwss.update_dsc_pg) 2190 dc->hwss.update_dsc_pg(dc, context, true); 2191 2192 if (dc->ctx->dce_version >= DCE_VERSION_MAX) 2193 TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk); 2194 else 2195 TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce); 2196 2197 context->stream_mask = get_stream_mask(dc, context); 2198 2199 if (context->stream_mask != dc->current_state->stream_mask) 2200 dc_dmub_srv_notify_stream_mask(dc->ctx->dmub_srv, context->stream_mask); 2201 2202 for (i = 0; i < context->stream_count; i++) 2203 context->streams[i]->mode_changed = false; 2204 2205 /* Clear update flags that were set earlier to avoid redundant programming */ 2206 for (i = 0; i < context->stream_count; i++) { 2207 context->streams[i]->update_flags.raw = 0x0; 2208 } 2209 2210 old_state = dc->current_state; 2211 dc->current_state = context; 2212 2213 dc_state_release(old_state); 2214 2215 dc_state_retain(dc->current_state); 2216 2217 return result; 2218 } 2219 2220 static bool commit_minimal_transition_state(struct dc *dc, 2221 struct dc_state *transition_base_context); 2222 2223 /** 2224 * dc_commit_streams - Commit current stream state 2225 * 2226 * @dc: DC object with the commit state to be configured in the hardware 2227 * @params: Parameters for the commit, including the streams to be committed 2228 * 2229 * Function responsible for commit streams change to the hardware. 2230 * 2231 * Return: 2232 * Return DC_OK if everything work as expected, otherwise, return a dc_status 2233 * code. 2234 */ 2235 enum dc_status dc_commit_streams(struct dc *dc, struct dc_commit_streams_params *params) 2236 { 2237 int i, j; 2238 struct dc_state *context; 2239 enum dc_status res = DC_OK; 2240 struct dc_validation_set set[MAX_STREAMS] = {0}; 2241 struct pipe_ctx *pipe; 2242 bool handle_exit_odm2to1 = false; 2243 2244 if (!params) 2245 return DC_ERROR_UNEXPECTED; 2246 2247 if (dc->ctx->dce_environment == DCE_ENV_VIRTUAL_HW) 2248 return res; 2249 2250 if (!streams_changed(dc, params->streams, params->stream_count) && 2251 dc->current_state->power_source == params->power_source) 2252 return res; 2253 2254 dc_exit_ips_for_hw_access(dc); 2255 2256 DC_LOG_DC("%s: %d streams\n", __func__, params->stream_count); 2257 2258 for (i = 0; i < params->stream_count; i++) { 2259 struct dc_stream_state *stream = params->streams[i]; 2260 struct dc_stream_status *status = dc_stream_get_status(stream); 2261 2262 /* revalidate streams */ 2263 res = dc_validate_stream(dc, stream); 2264 if (res != DC_OK) 2265 return res; 2266 2267 dc_stream_log(dc, stream); 2268 2269 set[i].stream = stream; 2270 2271 if (status) { 2272 set[i].plane_count = status->plane_count; 2273 for (j = 0; j < status->plane_count; j++) 2274 set[i].plane_states[j] = status->plane_states[j]; 2275 } 2276 } 2277 2278 /* ODM Combine 2:1 power optimization is only applied for single stream 2279 * scenario, it uses extra pipes than needed to reduce power consumption 2280 * We need to switch off this feature to make room for new streams. 2281 */ 2282 if (params->stream_count > dc->current_state->stream_count && 2283 dc->current_state->stream_count == 1) { 2284 for (i = 0; i < dc->res_pool->pipe_count; i++) { 2285 pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 2286 if (pipe->next_odm_pipe) 2287 handle_exit_odm2to1 = true; 2288 } 2289 } 2290 2291 if (handle_exit_odm2to1) 2292 res = commit_minimal_transition_state(dc, dc->current_state); 2293 2294 context = dc_state_create_current_copy(dc); 2295 if (!context) 2296 goto context_alloc_fail; 2297 2298 context->power_source = params->power_source; 2299 2300 res = dc_validate_with_context(dc, set, params->stream_count, context, false); 2301 2302 /* 2303 * Only update link encoder to stream assignment after bandwidth validation passed. 2304 */ 2305 if (res == DC_OK && dc->res_pool->funcs->link_encs_assign && !dc->config.unify_link_enc_assignment) 2306 dc->res_pool->funcs->link_encs_assign( 2307 dc, context, context->streams, context->stream_count); 2308 2309 if (res != DC_OK) { 2310 BREAK_TO_DEBUGGER(); 2311 goto fail; 2312 } 2313 2314 res = dc_commit_state_no_check(dc, context); 2315 2316 for (i = 0; i < params->stream_count; i++) { 2317 for (j = 0; j < context->stream_count; j++) { 2318 if (params->streams[i]->stream_id == context->streams[j]->stream_id) 2319 params->streams[i]->out.otg_offset = context->stream_status[j].primary_otg_inst; 2320 2321 if (dc_is_embedded_signal(params->streams[i]->signal)) { 2322 struct dc_stream_status *status = dc_state_get_stream_status(context, params->streams[i]); 2323 2324 if (!status) 2325 continue; 2326 2327 if (dc->hwss.is_abm_supported) 2328 status->is_abm_supported = dc->hwss.is_abm_supported(dc, context, params->streams[i]); 2329 else 2330 status->is_abm_supported = true; 2331 } 2332 } 2333 } 2334 2335 fail: 2336 dc_state_release(context); 2337 2338 context_alloc_fail: 2339 2340 DC_LOG_DC("%s Finished.\n", __func__); 2341 2342 return res; 2343 } 2344 2345 bool dc_acquire_release_mpc_3dlut( 2346 struct dc *dc, bool acquire, 2347 struct dc_stream_state *stream, 2348 struct dc_3dlut **lut, 2349 struct dc_transfer_func **shaper) 2350 { 2351 int pipe_idx; 2352 bool ret = false; 2353 bool found_pipe_idx = false; 2354 const struct resource_pool *pool = dc->res_pool; 2355 struct resource_context *res_ctx = &dc->current_state->res_ctx; 2356 int mpcc_id = 0; 2357 2358 if (pool && res_ctx) { 2359 if (acquire) { 2360 /*find pipe idx for the given stream*/ 2361 for (pipe_idx = 0; pipe_idx < pool->pipe_count; pipe_idx++) { 2362 if (res_ctx->pipe_ctx[pipe_idx].stream == stream) { 2363 found_pipe_idx = true; 2364 mpcc_id = res_ctx->pipe_ctx[pipe_idx].plane_res.hubp->inst; 2365 break; 2366 } 2367 } 2368 } else 2369 found_pipe_idx = true;/*for release pipe_idx is not required*/ 2370 2371 if (found_pipe_idx) { 2372 if (acquire && pool->funcs->acquire_post_bldn_3dlut) 2373 ret = pool->funcs->acquire_post_bldn_3dlut(res_ctx, pool, mpcc_id, lut, shaper); 2374 else if (!acquire && pool->funcs->release_post_bldn_3dlut) 2375 ret = pool->funcs->release_post_bldn_3dlut(res_ctx, pool, lut, shaper); 2376 } 2377 } 2378 return ret; 2379 } 2380 2381 static bool is_flip_pending_in_pipes(struct dc *dc, struct dc_state *context) 2382 { 2383 int i; 2384 struct pipe_ctx *pipe; 2385 2386 for (i = 0; i < MAX_PIPES; i++) { 2387 pipe = &context->res_ctx.pipe_ctx[i]; 2388 2389 // Don't check flip pending on phantom pipes 2390 if (!pipe->plane_state || (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM)) 2391 continue; 2392 2393 /* Must set to false to start with, due to OR in update function */ 2394 pipe->plane_state->status.is_flip_pending = false; 2395 dc->hwss.update_pending_status(pipe); 2396 if (pipe->plane_state->status.is_flip_pending) 2397 return true; 2398 } 2399 return false; 2400 } 2401 2402 /* Perform updates here which need to be deferred until next vupdate 2403 * 2404 * i.e. blnd lut, 3dlut, and shaper lut bypass regs are double buffered 2405 * but forcing lut memory to shutdown state is immediate. This causes 2406 * single frame corruption as lut gets disabled mid-frame unless shutdown 2407 * is deferred until after entering bypass. 2408 */ 2409 static void process_deferred_updates(struct dc *dc) 2410 { 2411 int i = 0; 2412 2413 if (dc->debug.enable_mem_low_power.bits.cm) { 2414 ASSERT(dc->dcn_ip->max_num_dpp); 2415 for (i = 0; i < dc->dcn_ip->max_num_dpp; i++) 2416 if (dc->res_pool->dpps[i]->funcs->dpp_deferred_update) 2417 dc->res_pool->dpps[i]->funcs->dpp_deferred_update(dc->res_pool->dpps[i]); 2418 } 2419 } 2420 2421 void dc_post_update_surfaces_to_stream(struct dc *dc) 2422 { 2423 int i; 2424 struct dc_state *context = dc->current_state; 2425 2426 if ((!dc->optimized_required) || get_seamless_boot_stream_count(context) > 0) 2427 return; 2428 2429 post_surface_trace(dc); 2430 2431 /* 2432 * Only relevant for DCN behavior where we can guarantee the optimization 2433 * is safe to apply - retain the legacy behavior for DCE. 2434 */ 2435 2436 if (dc->ctx->dce_version < DCE_VERSION_MAX) 2437 TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce); 2438 else { 2439 TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk); 2440 2441 if (is_flip_pending_in_pipes(dc, context)) 2442 return; 2443 2444 for (i = 0; i < dc->res_pool->pipe_count; i++) 2445 if (context->res_ctx.pipe_ctx[i].stream == NULL || 2446 context->res_ctx.pipe_ctx[i].plane_state == NULL) { 2447 context->res_ctx.pipe_ctx[i].pipe_idx = i; 2448 dc->hwss.disable_plane(dc, context, &context->res_ctx.pipe_ctx[i]); 2449 } 2450 2451 process_deferred_updates(dc); 2452 2453 dc->hwss.optimize_bandwidth(dc, context); 2454 2455 if (dc->hwss.update_dsc_pg) 2456 dc->hwss.update_dsc_pg(dc, context, true); 2457 } 2458 2459 dc->optimized_required = false; 2460 dc->wm_optimized_required = false; 2461 } 2462 2463 bool dc_set_generic_gpio_for_stereo(bool enable, 2464 struct gpio_service *gpio_service) 2465 { 2466 enum gpio_result gpio_result = GPIO_RESULT_NON_SPECIFIC_ERROR; 2467 struct gpio_pin_info pin_info; 2468 struct gpio *generic; 2469 struct gpio_generic_mux_config *config = kzalloc(sizeof(struct gpio_generic_mux_config), 2470 GFP_KERNEL); 2471 2472 if (!config) 2473 return false; 2474 pin_info = dal_gpio_get_generic_pin_info(gpio_service, GPIO_ID_GENERIC, 0); 2475 2476 if (pin_info.mask == 0xFFFFFFFF || pin_info.offset == 0xFFFFFFFF) { 2477 kfree(config); 2478 return false; 2479 } else { 2480 generic = dal_gpio_service_create_generic_mux( 2481 gpio_service, 2482 pin_info.offset, 2483 pin_info.mask); 2484 } 2485 2486 if (!generic) { 2487 kfree(config); 2488 return false; 2489 } 2490 2491 gpio_result = dal_gpio_open(generic, GPIO_MODE_OUTPUT); 2492 2493 config->enable_output_from_mux = enable; 2494 config->mux_select = GPIO_SIGNAL_SOURCE_PASS_THROUGH_STEREO_SYNC; 2495 2496 if (gpio_result == GPIO_RESULT_OK) 2497 gpio_result = dal_mux_setup_config(generic, config); 2498 2499 if (gpio_result == GPIO_RESULT_OK) { 2500 dal_gpio_close(generic); 2501 dal_gpio_destroy_generic_mux(&generic); 2502 kfree(config); 2503 return true; 2504 } else { 2505 dal_gpio_close(generic); 2506 dal_gpio_destroy_generic_mux(&generic); 2507 kfree(config); 2508 return false; 2509 } 2510 } 2511 2512 static bool is_surface_in_context( 2513 const struct dc_state *context, 2514 const struct dc_plane_state *plane_state) 2515 { 2516 int j; 2517 2518 for (j = 0; j < MAX_PIPES; j++) { 2519 const struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 2520 2521 if (plane_state == pipe_ctx->plane_state) { 2522 return true; 2523 } 2524 } 2525 2526 return false; 2527 } 2528 2529 static enum surface_update_type get_plane_info_update_type(const struct dc *dc, const struct dc_surface_update *u) 2530 { 2531 union surface_update_flags *update_flags = &u->surface->update_flags; 2532 enum surface_update_type update_type = UPDATE_TYPE_FAST; 2533 2534 if (!u->plane_info) 2535 return UPDATE_TYPE_FAST; 2536 2537 if (u->plane_info->color_space != u->surface->color_space) { 2538 update_flags->bits.color_space_change = 1; 2539 elevate_update_type(&update_type, UPDATE_TYPE_MED); 2540 } 2541 2542 if (u->plane_info->horizontal_mirror != u->surface->horizontal_mirror) { 2543 update_flags->bits.horizontal_mirror_change = 1; 2544 elevate_update_type(&update_type, UPDATE_TYPE_MED); 2545 } 2546 2547 if (u->plane_info->rotation != u->surface->rotation) { 2548 update_flags->bits.rotation_change = 1; 2549 elevate_update_type(&update_type, UPDATE_TYPE_FULL); 2550 } 2551 2552 if (u->plane_info->format != u->surface->format) { 2553 update_flags->bits.pixel_format_change = 1; 2554 elevate_update_type(&update_type, UPDATE_TYPE_FULL); 2555 } 2556 2557 if (u->plane_info->stereo_format != u->surface->stereo_format) { 2558 update_flags->bits.stereo_format_change = 1; 2559 elevate_update_type(&update_type, UPDATE_TYPE_FULL); 2560 } 2561 2562 if (u->plane_info->per_pixel_alpha != u->surface->per_pixel_alpha) { 2563 update_flags->bits.per_pixel_alpha_change = 1; 2564 elevate_update_type(&update_type, UPDATE_TYPE_MED); 2565 } 2566 2567 if (u->plane_info->global_alpha_value != u->surface->global_alpha_value) { 2568 update_flags->bits.global_alpha_change = 1; 2569 elevate_update_type(&update_type, UPDATE_TYPE_MED); 2570 } 2571 2572 if (u->plane_info->dcc.enable != u->surface->dcc.enable 2573 || u->plane_info->dcc.dcc_ind_blk != u->surface->dcc.dcc_ind_blk 2574 || u->plane_info->dcc.meta_pitch != u->surface->dcc.meta_pitch) { 2575 /* During DCC on/off, stutter period is calculated before 2576 * DCC has fully transitioned. This results in incorrect 2577 * stutter period calculation. Triggering a full update will 2578 * recalculate stutter period. 2579 */ 2580 update_flags->bits.dcc_change = 1; 2581 elevate_update_type(&update_type, UPDATE_TYPE_FULL); 2582 } 2583 2584 if (resource_pixel_format_to_bpp(u->plane_info->format) != 2585 resource_pixel_format_to_bpp(u->surface->format)) { 2586 /* different bytes per element will require full bandwidth 2587 * and DML calculation 2588 */ 2589 update_flags->bits.bpp_change = 1; 2590 elevate_update_type(&update_type, UPDATE_TYPE_FULL); 2591 } 2592 2593 if (u->plane_info->plane_size.surface_pitch != u->surface->plane_size.surface_pitch 2594 || u->plane_info->plane_size.chroma_pitch != u->surface->plane_size.chroma_pitch) { 2595 update_flags->bits.plane_size_change = 1; 2596 elevate_update_type(&update_type, UPDATE_TYPE_MED); 2597 } 2598 2599 2600 if (memcmp(&u->plane_info->tiling_info, &u->surface->tiling_info, 2601 sizeof(struct dc_tiling_info)) != 0) { 2602 update_flags->bits.swizzle_change = 1; 2603 elevate_update_type(&update_type, UPDATE_TYPE_MED); 2604 2605 /* todo: below are HW dependent, we should add a hook to 2606 * DCE/N resource and validated there. 2607 */ 2608 if (!dc->debug.skip_full_updated_if_possible) { 2609 /* swizzled mode requires RQ to be setup properly, 2610 * thus need to run DML to calculate RQ settings 2611 */ 2612 update_flags->bits.bandwidth_change = 1; 2613 elevate_update_type(&update_type, UPDATE_TYPE_FULL); 2614 } 2615 } 2616 2617 /* This should be UPDATE_TYPE_FAST if nothing has changed. */ 2618 return update_type; 2619 } 2620 2621 static enum surface_update_type get_scaling_info_update_type( 2622 const struct dc *dc, 2623 const struct dc_surface_update *u) 2624 { 2625 union surface_update_flags *update_flags = &u->surface->update_flags; 2626 2627 if (!u->scaling_info) 2628 return UPDATE_TYPE_FAST; 2629 2630 if (u->scaling_info->src_rect.width != u->surface->src_rect.width 2631 || u->scaling_info->src_rect.height != u->surface->src_rect.height 2632 || u->scaling_info->dst_rect.width != u->surface->dst_rect.width 2633 || u->scaling_info->dst_rect.height != u->surface->dst_rect.height 2634 || u->scaling_info->clip_rect.width != u->surface->clip_rect.width 2635 || u->scaling_info->clip_rect.height != u->surface->clip_rect.height 2636 || u->scaling_info->scaling_quality.integer_scaling != 2637 u->surface->scaling_quality.integer_scaling) { 2638 update_flags->bits.scaling_change = 1; 2639 2640 if (u->scaling_info->src_rect.width > u->surface->src_rect.width 2641 || u->scaling_info->src_rect.height > u->surface->src_rect.height) 2642 /* Making src rect bigger requires a bandwidth change */ 2643 update_flags->bits.clock_change = 1; 2644 2645 if ((u->scaling_info->dst_rect.width < u->surface->dst_rect.width 2646 || u->scaling_info->dst_rect.height < u->surface->dst_rect.height) 2647 && (u->scaling_info->dst_rect.width < u->surface->src_rect.width 2648 || u->scaling_info->dst_rect.height < u->surface->src_rect.height)) 2649 /* Making dst rect smaller requires a bandwidth change */ 2650 update_flags->bits.bandwidth_change = 1; 2651 2652 if (u->scaling_info->src_rect.width > dc->caps.max_optimizable_video_width && 2653 (u->scaling_info->clip_rect.width > u->surface->clip_rect.width || 2654 u->scaling_info->clip_rect.height > u->surface->clip_rect.height)) 2655 /* Changing clip size of a large surface may result in MPC slice count change */ 2656 update_flags->bits.bandwidth_change = 1; 2657 } 2658 2659 if (u->scaling_info->src_rect.x != u->surface->src_rect.x 2660 || u->scaling_info->src_rect.y != u->surface->src_rect.y 2661 || u->scaling_info->clip_rect.x != u->surface->clip_rect.x 2662 || u->scaling_info->clip_rect.y != u->surface->clip_rect.y 2663 || u->scaling_info->dst_rect.x != u->surface->dst_rect.x 2664 || u->scaling_info->dst_rect.y != u->surface->dst_rect.y) 2665 update_flags->bits.position_change = 1; 2666 2667 /* process every update flag before returning */ 2668 if (update_flags->bits.clock_change 2669 || update_flags->bits.bandwidth_change 2670 || update_flags->bits.scaling_change) 2671 return UPDATE_TYPE_FULL; 2672 2673 if (update_flags->bits.position_change) 2674 return UPDATE_TYPE_MED; 2675 2676 return UPDATE_TYPE_FAST; 2677 } 2678 2679 static enum surface_update_type det_surface_update(const struct dc *dc, 2680 const struct dc_surface_update *u) 2681 { 2682 const struct dc_state *context = dc->current_state; 2683 enum surface_update_type type; 2684 enum surface_update_type overall_type = UPDATE_TYPE_FAST; 2685 union surface_update_flags *update_flags = &u->surface->update_flags; 2686 2687 if (!is_surface_in_context(context, u->surface) || u->surface->force_full_update) { 2688 update_flags->raw = 0xFFFFFFFF; 2689 return UPDATE_TYPE_FULL; 2690 } 2691 2692 update_flags->raw = 0; // Reset all flags 2693 2694 type = get_plane_info_update_type(dc, u); 2695 elevate_update_type(&overall_type, type); 2696 2697 type = get_scaling_info_update_type(dc, u); 2698 elevate_update_type(&overall_type, type); 2699 2700 if (u->flip_addr) { 2701 update_flags->bits.addr_update = 1; 2702 if (u->flip_addr->address.tmz_surface != u->surface->address.tmz_surface) { 2703 update_flags->bits.tmz_changed = 1; 2704 elevate_update_type(&overall_type, UPDATE_TYPE_FULL); 2705 } 2706 } 2707 if (u->in_transfer_func) 2708 update_flags->bits.in_transfer_func_change = 1; 2709 2710 if (u->input_csc_color_matrix) 2711 update_flags->bits.input_csc_change = 1; 2712 2713 if (u->coeff_reduction_factor) 2714 update_flags->bits.coeff_reduction_change = 1; 2715 2716 if (u->gamut_remap_matrix) 2717 update_flags->bits.gamut_remap_change = 1; 2718 2719 if (u->blend_tf) 2720 update_flags->bits.gamma_change = 1; 2721 2722 if (u->gamma) { 2723 enum surface_pixel_format format = SURFACE_PIXEL_FORMAT_GRPH_BEGIN; 2724 2725 if (u->plane_info) 2726 format = u->plane_info->format; 2727 else 2728 format = u->surface->format; 2729 2730 if (dce_use_lut(format)) 2731 update_flags->bits.gamma_change = 1; 2732 } 2733 2734 if (u->lut3d_func || u->func_shaper) 2735 update_flags->bits.lut_3d = 1; 2736 2737 if (u->hdr_mult.value) 2738 if (u->hdr_mult.value != u->surface->hdr_mult.value) { 2739 update_flags->bits.hdr_mult = 1; 2740 elevate_update_type(&overall_type, UPDATE_TYPE_MED); 2741 } 2742 2743 if (u->sdr_white_level_nits) 2744 if (u->sdr_white_level_nits != u->surface->sdr_white_level_nits) { 2745 update_flags->bits.sdr_white_level_nits = 1; 2746 elevate_update_type(&overall_type, UPDATE_TYPE_FULL); 2747 } 2748 2749 if (u->cm2_params) { 2750 if ((u->cm2_params->component_settings.shaper_3dlut_setting 2751 != u->surface->mcm_shaper_3dlut_setting) 2752 || (u->cm2_params->component_settings.lut1d_enable 2753 != u->surface->mcm_lut1d_enable)) 2754 update_flags->bits.mcm_transfer_function_enable_change = 1; 2755 if (u->cm2_params->cm2_luts.lut3d_data.lut3d_src 2756 != u->surface->mcm_luts.lut3d_data.lut3d_src) 2757 update_flags->bits.mcm_transfer_function_enable_change = 1; 2758 } 2759 if (update_flags->bits.in_transfer_func_change) { 2760 type = UPDATE_TYPE_MED; 2761 elevate_update_type(&overall_type, type); 2762 } 2763 2764 if (update_flags->bits.lut_3d && 2765 u->surface->mcm_luts.lut3d_data.lut3d_src != DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM) { 2766 type = UPDATE_TYPE_FULL; 2767 elevate_update_type(&overall_type, type); 2768 } 2769 if (update_flags->bits.mcm_transfer_function_enable_change) { 2770 type = UPDATE_TYPE_FULL; 2771 elevate_update_type(&overall_type, type); 2772 } 2773 2774 if (dc->debug.enable_legacy_fast_update && 2775 (update_flags->bits.gamma_change || 2776 update_flags->bits.gamut_remap_change || 2777 update_flags->bits.input_csc_change || 2778 update_flags->bits.coeff_reduction_change)) { 2779 type = UPDATE_TYPE_FULL; 2780 elevate_update_type(&overall_type, type); 2781 } 2782 return overall_type; 2783 } 2784 2785 /* May need to flip the desktop plane in cases where MPO plane receives a flip but desktop plane doesn't 2786 * while both planes are flip_immediate 2787 */ 2788 static void force_immediate_gsl_plane_flip(struct dc *dc, struct dc_surface_update *updates, int surface_count) 2789 { 2790 bool has_flip_immediate_plane = false; 2791 int i; 2792 2793 for (i = 0; i < surface_count; i++) { 2794 if (updates[i].surface->flip_immediate) { 2795 has_flip_immediate_plane = true; 2796 break; 2797 } 2798 } 2799 2800 if (has_flip_immediate_plane && surface_count > 1) { 2801 for (i = 0; i < surface_count; i++) { 2802 if (updates[i].surface->flip_immediate) 2803 updates[i].surface->update_flags.bits.addr_update = 1; 2804 } 2805 } 2806 } 2807 2808 static enum surface_update_type check_update_surfaces_for_stream( 2809 struct dc *dc, 2810 struct dc_surface_update *updates, 2811 int surface_count, 2812 struct dc_stream_update *stream_update, 2813 const struct dc_stream_status *stream_status) 2814 { 2815 int i; 2816 enum surface_update_type overall_type = UPDATE_TYPE_FAST; 2817 2818 if (dc->idle_optimizations_allowed) 2819 overall_type = UPDATE_TYPE_FULL; 2820 2821 if (stream_status == NULL || stream_status->plane_count != surface_count) 2822 overall_type = UPDATE_TYPE_FULL; 2823 2824 if (stream_update && stream_update->pending_test_pattern) { 2825 overall_type = UPDATE_TYPE_FULL; 2826 } 2827 2828 if (stream_update && stream_update->hw_cursor_req) { 2829 overall_type = UPDATE_TYPE_FULL; 2830 } 2831 2832 /* some stream updates require passive update */ 2833 if (stream_update) { 2834 union stream_update_flags *su_flags = &stream_update->stream->update_flags; 2835 2836 if ((stream_update->src.height != 0 && stream_update->src.width != 0) || 2837 (stream_update->dst.height != 0 && stream_update->dst.width != 0) || 2838 stream_update->integer_scaling_update) 2839 su_flags->bits.scaling = 1; 2840 2841 if (dc->debug.enable_legacy_fast_update && stream_update->out_transfer_func) 2842 su_flags->bits.out_tf = 1; 2843 2844 if (stream_update->abm_level) 2845 su_flags->bits.abm_level = 1; 2846 2847 if (stream_update->dpms_off) 2848 su_flags->bits.dpms_off = 1; 2849 2850 if (stream_update->gamut_remap) 2851 su_flags->bits.gamut_remap = 1; 2852 2853 if (stream_update->wb_update) 2854 su_flags->bits.wb_update = 1; 2855 2856 if (stream_update->dsc_config) 2857 su_flags->bits.dsc_changed = 1; 2858 2859 if (stream_update->mst_bw_update) 2860 su_flags->bits.mst_bw = 1; 2861 2862 if (stream_update->stream->freesync_on_desktop && 2863 (stream_update->vrr_infopacket || stream_update->allow_freesync || 2864 stream_update->vrr_active_variable || stream_update->vrr_active_fixed)) 2865 su_flags->bits.fams_changed = 1; 2866 2867 if (stream_update->scaler_sharpener_update) 2868 su_flags->bits.scaler_sharpener = 1; 2869 2870 if (stream_update->sharpening_required) 2871 su_flags->bits.sharpening_required = 1; 2872 2873 if (stream_update->output_color_space) 2874 su_flags->bits.out_csc = 1; 2875 2876 if (su_flags->raw != 0) 2877 overall_type = UPDATE_TYPE_FULL; 2878 2879 if (stream_update->output_csc_transform) 2880 su_flags->bits.out_csc = 1; 2881 2882 /* Output transfer function changes do not require bandwidth recalculation, 2883 * so don't trigger a full update 2884 */ 2885 if (!dc->debug.enable_legacy_fast_update && stream_update->out_transfer_func) 2886 su_flags->bits.out_tf = 1; 2887 } 2888 2889 for (i = 0 ; i < surface_count; i++) { 2890 enum surface_update_type type = 2891 det_surface_update(dc, &updates[i]); 2892 2893 elevate_update_type(&overall_type, type); 2894 } 2895 2896 return overall_type; 2897 } 2898 2899 /* 2900 * dc_check_update_surfaces_for_stream() - Determine update type (fast, med, or full) 2901 * 2902 * See :c:type:`enum surface_update_type <surface_update_type>` for explanation of update types 2903 */ 2904 enum surface_update_type dc_check_update_surfaces_for_stream( 2905 struct dc *dc, 2906 struct dc_surface_update *updates, 2907 int surface_count, 2908 struct dc_stream_update *stream_update, 2909 const struct dc_stream_status *stream_status) 2910 { 2911 int i; 2912 enum surface_update_type type; 2913 2914 if (stream_update) 2915 stream_update->stream->update_flags.raw = 0; 2916 for (i = 0; i < surface_count; i++) 2917 updates[i].surface->update_flags.raw = 0; 2918 2919 type = check_update_surfaces_for_stream(dc, updates, surface_count, stream_update, stream_status); 2920 if (type == UPDATE_TYPE_FULL) { 2921 if (stream_update) { 2922 uint32_t dsc_changed = stream_update->stream->update_flags.bits.dsc_changed; 2923 stream_update->stream->update_flags.raw = 0xFFFFFFFF; 2924 stream_update->stream->update_flags.bits.dsc_changed = dsc_changed; 2925 } 2926 for (i = 0; i < surface_count; i++) 2927 updates[i].surface->update_flags.raw = 0xFFFFFFFF; 2928 } 2929 2930 if (type == UPDATE_TYPE_FAST) { 2931 // If there's an available clock comparator, we use that. 2932 if (dc->clk_mgr->funcs->are_clock_states_equal) { 2933 if (!dc->clk_mgr->funcs->are_clock_states_equal(&dc->clk_mgr->clks, &dc->current_state->bw_ctx.bw.dcn.clk)) 2934 dc->optimized_required = true; 2935 // Else we fallback to mem compare. 2936 } else if (memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0) { 2937 dc->optimized_required = true; 2938 } 2939 2940 dc->optimized_required |= dc->wm_optimized_required; 2941 } 2942 2943 return type; 2944 } 2945 2946 static struct dc_stream_status *stream_get_status( 2947 struct dc_state *ctx, 2948 struct dc_stream_state *stream) 2949 { 2950 uint8_t i; 2951 2952 for (i = 0; i < ctx->stream_count; i++) { 2953 if (stream == ctx->streams[i]) { 2954 return &ctx->stream_status[i]; 2955 } 2956 } 2957 2958 return NULL; 2959 } 2960 2961 static const enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL; 2962 2963 static void copy_surface_update_to_plane( 2964 struct dc_plane_state *surface, 2965 struct dc_surface_update *srf_update) 2966 { 2967 if (srf_update->flip_addr) { 2968 surface->address = srf_update->flip_addr->address; 2969 surface->flip_immediate = 2970 srf_update->flip_addr->flip_immediate; 2971 surface->time.time_elapsed_in_us[surface->time.index] = 2972 srf_update->flip_addr->flip_timestamp_in_us - 2973 surface->time.prev_update_time_in_us; 2974 surface->time.prev_update_time_in_us = 2975 srf_update->flip_addr->flip_timestamp_in_us; 2976 surface->time.index++; 2977 if (surface->time.index >= DC_PLANE_UPDATE_TIMES_MAX) 2978 surface->time.index = 0; 2979 2980 surface->triplebuffer_flips = srf_update->flip_addr->triplebuffer_flips; 2981 } 2982 2983 if (srf_update->scaling_info) { 2984 surface->scaling_quality = 2985 srf_update->scaling_info->scaling_quality; 2986 surface->dst_rect = 2987 srf_update->scaling_info->dst_rect; 2988 surface->src_rect = 2989 srf_update->scaling_info->src_rect; 2990 surface->clip_rect = 2991 srf_update->scaling_info->clip_rect; 2992 } 2993 2994 if (srf_update->plane_info) { 2995 surface->color_space = 2996 srf_update->plane_info->color_space; 2997 surface->format = 2998 srf_update->plane_info->format; 2999 surface->plane_size = 3000 srf_update->plane_info->plane_size; 3001 surface->rotation = 3002 srf_update->plane_info->rotation; 3003 surface->horizontal_mirror = 3004 srf_update->plane_info->horizontal_mirror; 3005 surface->stereo_format = 3006 srf_update->plane_info->stereo_format; 3007 surface->tiling_info = 3008 srf_update->plane_info->tiling_info; 3009 surface->visible = 3010 srf_update->plane_info->visible; 3011 surface->per_pixel_alpha = 3012 srf_update->plane_info->per_pixel_alpha; 3013 surface->global_alpha = 3014 srf_update->plane_info->global_alpha; 3015 surface->global_alpha_value = 3016 srf_update->plane_info->global_alpha_value; 3017 surface->dcc = 3018 srf_update->plane_info->dcc; 3019 surface->layer_index = 3020 srf_update->plane_info->layer_index; 3021 } 3022 3023 if (srf_update->gamma) { 3024 memcpy(&surface->gamma_correction.entries, 3025 &srf_update->gamma->entries, 3026 sizeof(struct dc_gamma_entries)); 3027 surface->gamma_correction.is_identity = 3028 srf_update->gamma->is_identity; 3029 surface->gamma_correction.num_entries = 3030 srf_update->gamma->num_entries; 3031 surface->gamma_correction.type = 3032 srf_update->gamma->type; 3033 } 3034 3035 if (srf_update->in_transfer_func) { 3036 surface->in_transfer_func.sdr_ref_white_level = 3037 srf_update->in_transfer_func->sdr_ref_white_level; 3038 surface->in_transfer_func.tf = 3039 srf_update->in_transfer_func->tf; 3040 surface->in_transfer_func.type = 3041 srf_update->in_transfer_func->type; 3042 memcpy(&surface->in_transfer_func.tf_pts, 3043 &srf_update->in_transfer_func->tf_pts, 3044 sizeof(struct dc_transfer_func_distributed_points)); 3045 } 3046 3047 if (srf_update->cm2_params) { 3048 surface->mcm_shaper_3dlut_setting = srf_update->cm2_params->component_settings.shaper_3dlut_setting; 3049 surface->mcm_lut1d_enable = srf_update->cm2_params->component_settings.lut1d_enable; 3050 surface->mcm_luts = srf_update->cm2_params->cm2_luts; 3051 } 3052 3053 if (srf_update->func_shaper) { 3054 memcpy(&surface->in_shaper_func, srf_update->func_shaper, 3055 sizeof(surface->in_shaper_func)); 3056 3057 if (surface->mcm_shaper_3dlut_setting >= DC_CM2_SHAPER_3DLUT_SETTING_ENABLE_SHAPER) 3058 surface->mcm_luts.shaper = &surface->in_shaper_func; 3059 } 3060 3061 if (srf_update->lut3d_func) 3062 memcpy(&surface->lut3d_func, srf_update->lut3d_func, 3063 sizeof(surface->lut3d_func)); 3064 3065 if (srf_update->hdr_mult.value) 3066 surface->hdr_mult = 3067 srf_update->hdr_mult; 3068 3069 if (srf_update->sdr_white_level_nits) 3070 surface->sdr_white_level_nits = 3071 srf_update->sdr_white_level_nits; 3072 3073 if (srf_update->blend_tf) { 3074 memcpy(&surface->blend_tf, srf_update->blend_tf, 3075 sizeof(surface->blend_tf)); 3076 3077 if (surface->mcm_lut1d_enable) 3078 surface->mcm_luts.lut1d_func = &surface->blend_tf; 3079 } 3080 3081 if (srf_update->cm2_params || srf_update->blend_tf) 3082 surface->lut_bank_a = !surface->lut_bank_a; 3083 3084 if (srf_update->input_csc_color_matrix) 3085 surface->input_csc_color_matrix = 3086 *srf_update->input_csc_color_matrix; 3087 3088 if (srf_update->coeff_reduction_factor) 3089 surface->coeff_reduction_factor = 3090 *srf_update->coeff_reduction_factor; 3091 3092 if (srf_update->gamut_remap_matrix) 3093 surface->gamut_remap_matrix = 3094 *srf_update->gamut_remap_matrix; 3095 3096 if (srf_update->cursor_csc_color_matrix) 3097 surface->cursor_csc_color_matrix = 3098 *srf_update->cursor_csc_color_matrix; 3099 3100 if (srf_update->bias_and_scale.bias_and_scale_valid) 3101 surface->bias_and_scale = 3102 srf_update->bias_and_scale; 3103 } 3104 3105 static void copy_stream_update_to_stream(struct dc *dc, 3106 struct dc_state *context, 3107 struct dc_stream_state *stream, 3108 struct dc_stream_update *update) 3109 { 3110 struct dc_context *dc_ctx = dc->ctx; 3111 3112 if (update == NULL || stream == NULL) 3113 return; 3114 3115 if (update->src.height && update->src.width) 3116 stream->src = update->src; 3117 3118 if (update->dst.height && update->dst.width) 3119 stream->dst = update->dst; 3120 3121 if (update->out_transfer_func) { 3122 stream->out_transfer_func.sdr_ref_white_level = 3123 update->out_transfer_func->sdr_ref_white_level; 3124 stream->out_transfer_func.tf = update->out_transfer_func->tf; 3125 stream->out_transfer_func.type = 3126 update->out_transfer_func->type; 3127 memcpy(&stream->out_transfer_func.tf_pts, 3128 &update->out_transfer_func->tf_pts, 3129 sizeof(struct dc_transfer_func_distributed_points)); 3130 } 3131 3132 if (update->hdr_static_metadata) 3133 stream->hdr_static_metadata = *update->hdr_static_metadata; 3134 3135 if (update->abm_level) 3136 stream->abm_level = *update->abm_level; 3137 3138 if (update->periodic_interrupt) 3139 stream->periodic_interrupt = *update->periodic_interrupt; 3140 3141 if (update->gamut_remap) 3142 stream->gamut_remap_matrix = *update->gamut_remap; 3143 3144 /* Note: this being updated after mode set is currently not a use case 3145 * however if it arises OCSC would need to be reprogrammed at the 3146 * minimum 3147 */ 3148 if (update->output_color_space) 3149 stream->output_color_space = *update->output_color_space; 3150 3151 if (update->output_csc_transform) 3152 stream->csc_color_matrix = *update->output_csc_transform; 3153 3154 if (update->vrr_infopacket) 3155 stream->vrr_infopacket = *update->vrr_infopacket; 3156 3157 if (update->hw_cursor_req) 3158 stream->hw_cursor_req = *update->hw_cursor_req; 3159 3160 if (update->allow_freesync) 3161 stream->allow_freesync = *update->allow_freesync; 3162 3163 if (update->vrr_active_variable) 3164 stream->vrr_active_variable = *update->vrr_active_variable; 3165 3166 if (update->vrr_active_fixed) 3167 stream->vrr_active_fixed = *update->vrr_active_fixed; 3168 3169 if (update->crtc_timing_adjust) { 3170 if (stream->adjust.v_total_min != update->crtc_timing_adjust->v_total_min || 3171 stream->adjust.v_total_max != update->crtc_timing_adjust->v_total_max) 3172 update->crtc_timing_adjust->timing_adjust_pending = true; 3173 stream->adjust = *update->crtc_timing_adjust; 3174 update->crtc_timing_adjust->timing_adjust_pending = false; 3175 } 3176 3177 if (update->dpms_off) 3178 stream->dpms_off = *update->dpms_off; 3179 3180 if (update->hfvsif_infopacket) 3181 stream->hfvsif_infopacket = *update->hfvsif_infopacket; 3182 3183 if (update->vtem_infopacket) 3184 stream->vtem_infopacket = *update->vtem_infopacket; 3185 3186 if (update->vsc_infopacket) 3187 stream->vsc_infopacket = *update->vsc_infopacket; 3188 3189 if (update->vsp_infopacket) 3190 stream->vsp_infopacket = *update->vsp_infopacket; 3191 3192 if (update->adaptive_sync_infopacket) 3193 stream->adaptive_sync_infopacket = *update->adaptive_sync_infopacket; 3194 3195 if (update->dither_option) 3196 stream->dither_option = *update->dither_option; 3197 3198 if (update->pending_test_pattern) 3199 stream->test_pattern = *update->pending_test_pattern; 3200 /* update current stream with writeback info */ 3201 if (update->wb_update) { 3202 int i; 3203 3204 stream->num_wb_info = update->wb_update->num_wb_info; 3205 ASSERT(stream->num_wb_info <= MAX_DWB_PIPES); 3206 for (i = 0; i < stream->num_wb_info; i++) 3207 stream->writeback_info[i] = 3208 update->wb_update->writeback_info[i]; 3209 } 3210 if (update->dsc_config) { 3211 struct dc_dsc_config old_dsc_cfg = stream->timing.dsc_cfg; 3212 uint32_t old_dsc_enabled = stream->timing.flags.DSC; 3213 uint32_t enable_dsc = (update->dsc_config->num_slices_h != 0 && 3214 update->dsc_config->num_slices_v != 0); 3215 3216 /* Use temporarry context for validating new DSC config */ 3217 struct dc_state *dsc_validate_context = dc_state_create_copy(dc->current_state); 3218 3219 if (dsc_validate_context) { 3220 stream->timing.dsc_cfg = *update->dsc_config; 3221 stream->timing.flags.DSC = enable_dsc; 3222 if (!dc->res_pool->funcs->validate_bandwidth(dc, dsc_validate_context, true)) { 3223 stream->timing.dsc_cfg = old_dsc_cfg; 3224 stream->timing.flags.DSC = old_dsc_enabled; 3225 update->dsc_config = NULL; 3226 } 3227 3228 dc_state_release(dsc_validate_context); 3229 } else { 3230 DC_ERROR("Failed to allocate new validate context for DSC change\n"); 3231 update->dsc_config = NULL; 3232 } 3233 } 3234 if (update->scaler_sharpener_update) 3235 stream->scaler_sharpener_update = *update->scaler_sharpener_update; 3236 if (update->sharpening_required) 3237 stream->sharpening_required = *update->sharpening_required; 3238 } 3239 3240 static void backup_planes_and_stream_state( 3241 struct dc_scratch_space *scratch, 3242 struct dc_stream_state *stream) 3243 { 3244 int i; 3245 struct dc_stream_status *status = dc_stream_get_status(stream); 3246 3247 if (!status) 3248 return; 3249 3250 for (i = 0; i < status->plane_count; i++) { 3251 scratch->plane_states[i] = *status->plane_states[i]; 3252 } 3253 scratch->stream_state = *stream; 3254 } 3255 3256 static void restore_planes_and_stream_state( 3257 struct dc_scratch_space *scratch, 3258 struct dc_stream_state *stream) 3259 { 3260 int i; 3261 struct dc_stream_status *status = dc_stream_get_status(stream); 3262 3263 if (!status) 3264 return; 3265 3266 for (i = 0; i < status->plane_count; i++) { 3267 /* refcount will always be valid, restore everything else */ 3268 struct kref refcount = status->plane_states[i]->refcount; 3269 *status->plane_states[i] = scratch->plane_states[i]; 3270 status->plane_states[i]->refcount = refcount; 3271 } 3272 *stream = scratch->stream_state; 3273 } 3274 3275 /** 3276 * update_seamless_boot_flags() - Helper function for updating seamless boot flags 3277 * 3278 * @dc: Current DC state 3279 * @context: New DC state to be programmed 3280 * @surface_count: Number of surfaces that have an updated 3281 * @stream: Corresponding stream to be updated in the current flip 3282 * 3283 * Updating seamless boot flags do not need to be part of the commit sequence. This 3284 * helper function will update the seamless boot flags on each flip (if required) 3285 * outside of the HW commit sequence (fast or slow). 3286 * 3287 * Return: void 3288 */ 3289 static void update_seamless_boot_flags(struct dc *dc, 3290 struct dc_state *context, 3291 int surface_count, 3292 struct dc_stream_state *stream) 3293 { 3294 if (get_seamless_boot_stream_count(context) > 0 && surface_count > 0) { 3295 /* Optimize seamless boot flag keeps clocks and watermarks high until 3296 * first flip. After first flip, optimization is required to lower 3297 * bandwidth. Important to note that it is expected UEFI will 3298 * only light up a single display on POST, therefore we only expect 3299 * one stream with seamless boot flag set. 3300 */ 3301 if (stream->apply_seamless_boot_optimization) { 3302 stream->apply_seamless_boot_optimization = false; 3303 3304 if (get_seamless_boot_stream_count(context) == 0) 3305 dc->optimized_required = true; 3306 } 3307 } 3308 } 3309 3310 /** 3311 * update_planes_and_stream_state() - The function takes planes and stream 3312 * updates as inputs and determines the appropriate update type. If update type 3313 * is FULL, the function allocates a new context, populates and validates it. 3314 * Otherwise, it updates current dc context. The function will return both 3315 * new_context and new_update_type back to the caller. The function also backs 3316 * up both current and new contexts into corresponding dc state scratch memory. 3317 * TODO: The function does too many things, and even conditionally allocates dc 3318 * context memory implicitly. We should consider to break it down. 3319 * 3320 * @dc: Current DC state 3321 * @srf_updates: an array of surface updates 3322 * @surface_count: surface update count 3323 * @stream: Corresponding stream to be updated 3324 * @stream_update: stream update 3325 * @new_update_type: [out] determined update type by the function 3326 * @new_context: [out] new context allocated and validated if update type is 3327 * FULL, reference to current context if update type is less than FULL. 3328 * 3329 * Return: true if a valid update is populated into new_context, false 3330 * otherwise. 3331 */ 3332 static bool update_planes_and_stream_state(struct dc *dc, 3333 struct dc_surface_update *srf_updates, int surface_count, 3334 struct dc_stream_state *stream, 3335 struct dc_stream_update *stream_update, 3336 enum surface_update_type *new_update_type, 3337 struct dc_state **new_context) 3338 { 3339 struct dc_state *context; 3340 int i, j; 3341 enum surface_update_type update_type; 3342 const struct dc_stream_status *stream_status; 3343 struct dc_context *dc_ctx = dc->ctx; 3344 3345 stream_status = dc_stream_get_status(stream); 3346 3347 if (!stream_status) { 3348 if (surface_count) /* Only an error condition if surf_count non-zero*/ 3349 ASSERT(false); 3350 3351 return false; /* Cannot commit surface to stream that is not committed */ 3352 } 3353 3354 context = dc->current_state; 3355 update_type = dc_check_update_surfaces_for_stream( 3356 dc, srf_updates, surface_count, stream_update, stream_status); 3357 /* It is possible to receive a flip for one plane while there are multiple flip_immediate planes in the same stream. 3358 * E.g. Desktop and MPO plane are flip_immediate but only the MPO plane received a flip 3359 * Force the other flip_immediate planes to flip so GSL doesn't wait for a flip that won't come. 3360 */ 3361 force_immediate_gsl_plane_flip(dc, srf_updates, surface_count); 3362 if (update_type == UPDATE_TYPE_FULL) 3363 backup_planes_and_stream_state(&dc->scratch.current_state, stream); 3364 3365 /* update current stream with the new updates */ 3366 copy_stream_update_to_stream(dc, context, stream, stream_update); 3367 3368 /* do not perform surface update if surface has invalid dimensions 3369 * (all zero) and no scaling_info is provided 3370 */ 3371 if (surface_count > 0) { 3372 for (i = 0; i < surface_count; i++) { 3373 if ((srf_updates[i].surface->src_rect.width == 0 || 3374 srf_updates[i].surface->src_rect.height == 0 || 3375 srf_updates[i].surface->dst_rect.width == 0 || 3376 srf_updates[i].surface->dst_rect.height == 0) && 3377 (!srf_updates[i].scaling_info || 3378 srf_updates[i].scaling_info->src_rect.width == 0 || 3379 srf_updates[i].scaling_info->src_rect.height == 0 || 3380 srf_updates[i].scaling_info->dst_rect.width == 0 || 3381 srf_updates[i].scaling_info->dst_rect.height == 0)) { 3382 DC_ERROR("Invalid src/dst rects in surface update!\n"); 3383 return false; 3384 } 3385 } 3386 } 3387 3388 if (update_type >= update_surface_trace_level) 3389 update_surface_trace(dc, srf_updates, surface_count); 3390 3391 for (i = 0; i < surface_count; i++) 3392 copy_surface_update_to_plane(srf_updates[i].surface, &srf_updates[i]); 3393 3394 if (update_type >= UPDATE_TYPE_FULL) { 3395 struct dc_plane_state *new_planes[MAX_SURFACES] = {0}; 3396 3397 for (i = 0; i < surface_count; i++) 3398 new_planes[i] = srf_updates[i].surface; 3399 3400 /* initialize scratch memory for building context */ 3401 context = dc_state_create_copy(dc->current_state); 3402 if (context == NULL) { 3403 DC_ERROR("Failed to allocate new validate context!\n"); 3404 return false; 3405 } 3406 3407 /* For each full update, remove all existing phantom pipes first. 3408 * Ensures that we have enough pipes for newly added MPO planes 3409 */ 3410 dc_state_remove_phantom_streams_and_planes(dc, context); 3411 dc_state_release_phantom_streams_and_planes(dc, context); 3412 3413 /*remove old surfaces from context */ 3414 if (!dc_state_rem_all_planes_for_stream(dc, stream, context)) { 3415 3416 BREAK_TO_DEBUGGER(); 3417 goto fail; 3418 } 3419 3420 /* add surface to context */ 3421 if (!dc_state_add_all_planes_for_stream(dc, stream, new_planes, surface_count, context)) { 3422 3423 BREAK_TO_DEBUGGER(); 3424 goto fail; 3425 } 3426 } 3427 3428 /* save update parameters into surface */ 3429 for (i = 0; i < surface_count; i++) { 3430 struct dc_plane_state *surface = srf_updates[i].surface; 3431 3432 if (update_type != UPDATE_TYPE_MED) 3433 continue; 3434 if (surface->update_flags.bits.position_change) { 3435 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3436 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3437 3438 if (pipe_ctx->plane_state != surface) 3439 continue; 3440 3441 resource_build_scaling_params(pipe_ctx); 3442 } 3443 } 3444 } 3445 3446 if (update_type == UPDATE_TYPE_FULL) { 3447 if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) { 3448 BREAK_TO_DEBUGGER(); 3449 goto fail; 3450 } 3451 } 3452 update_seamless_boot_flags(dc, context, surface_count, stream); 3453 3454 *new_context = context; 3455 *new_update_type = update_type; 3456 if (update_type == UPDATE_TYPE_FULL) 3457 backup_planes_and_stream_state(&dc->scratch.new_state, stream); 3458 3459 return true; 3460 3461 fail: 3462 dc_state_release(context); 3463 3464 return false; 3465 3466 } 3467 3468 static void commit_planes_do_stream_update(struct dc *dc, 3469 struct dc_stream_state *stream, 3470 struct dc_stream_update *stream_update, 3471 enum surface_update_type update_type, 3472 struct dc_state *context) 3473 { 3474 int j; 3475 3476 // Stream updates 3477 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3478 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3479 3480 if (resource_is_pipe_type(pipe_ctx, OTG_MASTER) && pipe_ctx->stream == stream) { 3481 3482 if (stream_update->periodic_interrupt && dc->hwss.setup_periodic_interrupt) 3483 dc->hwss.setup_periodic_interrupt(dc, pipe_ctx); 3484 3485 if ((stream_update->hdr_static_metadata && !stream->use_dynamic_meta) || 3486 stream_update->vrr_infopacket || 3487 stream_update->vsc_infopacket || 3488 stream_update->vsp_infopacket || 3489 stream_update->hfvsif_infopacket || 3490 stream_update->adaptive_sync_infopacket || 3491 stream_update->vtem_infopacket) { 3492 resource_build_info_frame(pipe_ctx); 3493 dc->hwss.update_info_frame(pipe_ctx); 3494 3495 if (dc_is_dp_signal(pipe_ctx->stream->signal)) 3496 dc->link_srv->dp_trace_source_sequence( 3497 pipe_ctx->stream->link, 3498 DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME); 3499 } 3500 3501 if (stream_update->hdr_static_metadata && 3502 stream->use_dynamic_meta && 3503 dc->hwss.set_dmdata_attributes && 3504 pipe_ctx->stream->dmdata_address.quad_part != 0) 3505 dc->hwss.set_dmdata_attributes(pipe_ctx); 3506 3507 if (stream_update->gamut_remap) 3508 dc_stream_set_gamut_remap(dc, stream); 3509 3510 if (stream_update->output_csc_transform) 3511 dc_stream_program_csc_matrix(dc, stream); 3512 3513 if (stream_update->dither_option) { 3514 struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe; 3515 resource_build_bit_depth_reduction_params(pipe_ctx->stream, 3516 &pipe_ctx->stream->bit_depth_params); 3517 pipe_ctx->stream_res.opp->funcs->opp_program_fmt(pipe_ctx->stream_res.opp, 3518 &stream->bit_depth_params, 3519 &stream->clamping); 3520 while (odm_pipe) { 3521 odm_pipe->stream_res.opp->funcs->opp_program_fmt(odm_pipe->stream_res.opp, 3522 &stream->bit_depth_params, 3523 &stream->clamping); 3524 odm_pipe = odm_pipe->next_odm_pipe; 3525 } 3526 } 3527 3528 if (stream_update->cursor_attributes) 3529 program_cursor_attributes(dc, stream); 3530 3531 if (stream_update->cursor_position) 3532 program_cursor_position(dc, stream); 3533 3534 /* Full fe update*/ 3535 if (update_type == UPDATE_TYPE_FAST) 3536 continue; 3537 3538 if (stream_update->dsc_config) 3539 dc->link_srv->update_dsc_config(pipe_ctx); 3540 3541 if (stream_update->mst_bw_update) { 3542 if (stream_update->mst_bw_update->is_increase) 3543 dc->link_srv->increase_mst_payload(pipe_ctx, 3544 stream_update->mst_bw_update->mst_stream_bw); 3545 else 3546 dc->link_srv->reduce_mst_payload(pipe_ctx, 3547 stream_update->mst_bw_update->mst_stream_bw); 3548 } 3549 3550 if (stream_update->pending_test_pattern) { 3551 /* 3552 * test pattern params depends on ODM topology 3553 * changes that we could be applying to front 3554 * end. Since at the current stage front end 3555 * changes are not yet applied. We can only 3556 * apply test pattern in hw based on current 3557 * state and populate the final test pattern 3558 * params in new state. If current and new test 3559 * pattern params are different as result of 3560 * different ODM topology being used, it will be 3561 * detected and handle during front end 3562 * programming update. 3563 */ 3564 dc->link_srv->dp_set_test_pattern(stream->link, 3565 stream->test_pattern.type, 3566 stream->test_pattern.color_space, 3567 stream->test_pattern.p_link_settings, 3568 stream->test_pattern.p_custom_pattern, 3569 stream->test_pattern.cust_pattern_size); 3570 resource_build_test_pattern_params(&context->res_ctx, pipe_ctx); 3571 } 3572 3573 if (stream_update->dpms_off) { 3574 if (*stream_update->dpms_off) { 3575 dc->link_srv->set_dpms_off(pipe_ctx); 3576 /* for dpms, keep acquired resources*/ 3577 if (pipe_ctx->stream_res.audio && !dc->debug.az_endpoint_mute_only) 3578 pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio); 3579 3580 dc->optimized_required = true; 3581 3582 } else { 3583 if (get_seamless_boot_stream_count(context) == 0) 3584 dc->hwss.prepare_bandwidth(dc, dc->current_state); 3585 dc->link_srv->set_dpms_on(dc->current_state, pipe_ctx); 3586 } 3587 } else if (pipe_ctx->stream->link->wa_flags.blank_stream_on_ocs_change && stream_update->output_color_space 3588 && !stream->dpms_off && dc_is_dp_signal(pipe_ctx->stream->signal)) { 3589 /* 3590 * Workaround for firmware issue in some receivers where they don't pick up 3591 * correct output color space unless DP link is disabled/re-enabled 3592 */ 3593 dc->link_srv->set_dpms_on(dc->current_state, pipe_ctx); 3594 } 3595 3596 if (stream_update->abm_level && pipe_ctx->stream_res.abm) { 3597 bool should_program_abm = true; 3598 3599 // if otg funcs defined check if blanked before programming 3600 if (pipe_ctx->stream_res.tg->funcs->is_blanked) 3601 if (pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg)) 3602 should_program_abm = false; 3603 3604 if (should_program_abm) { 3605 if (*stream_update->abm_level == ABM_LEVEL_IMMEDIATE_DISABLE) { 3606 dc->hwss.set_abm_immediate_disable(pipe_ctx); 3607 } else { 3608 pipe_ctx->stream_res.abm->funcs->set_abm_level( 3609 pipe_ctx->stream_res.abm, stream->abm_level); 3610 } 3611 } 3612 } 3613 } 3614 } 3615 } 3616 3617 static bool dc_dmub_should_send_dirty_rect_cmd(struct dc *dc, struct dc_stream_state *stream) 3618 { 3619 if ((stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1 3620 || stream->link->psr_settings.psr_version == DC_PSR_VERSION_1) 3621 && stream->ctx->dce_version >= DCN_VERSION_3_1) 3622 return true; 3623 3624 if (stream->link->replay_settings.config.replay_supported) 3625 return true; 3626 3627 if (stream->ctx->dce_version >= DCN_VERSION_3_5 && stream->abm_level) 3628 return true; 3629 3630 return false; 3631 } 3632 3633 void dc_dmub_update_dirty_rect(struct dc *dc, 3634 int surface_count, 3635 struct dc_stream_state *stream, 3636 struct dc_surface_update *srf_updates, 3637 struct dc_state *context) 3638 { 3639 union dmub_rb_cmd cmd; 3640 struct dmub_cmd_update_dirty_rect_data *update_dirty_rect; 3641 unsigned int i, j; 3642 unsigned int panel_inst = 0; 3643 3644 if (!dc_dmub_should_send_dirty_rect_cmd(dc, stream)) 3645 return; 3646 3647 if (!dc_get_edp_link_panel_inst(dc, stream->link, &panel_inst)) 3648 return; 3649 3650 memset(&cmd, 0x0, sizeof(cmd)); 3651 cmd.update_dirty_rect.header.type = DMUB_CMD__UPDATE_DIRTY_RECT; 3652 cmd.update_dirty_rect.header.sub_type = 0; 3653 cmd.update_dirty_rect.header.payload_bytes = 3654 sizeof(cmd.update_dirty_rect) - 3655 sizeof(cmd.update_dirty_rect.header); 3656 update_dirty_rect = &cmd.update_dirty_rect.update_dirty_rect_data; 3657 for (i = 0; i < surface_count; i++) { 3658 struct dc_plane_state *plane_state = srf_updates[i].surface; 3659 const struct dc_flip_addrs *flip_addr = srf_updates[i].flip_addr; 3660 3661 if (!srf_updates[i].surface || !flip_addr) 3662 continue; 3663 /* Do not send in immediate flip mode */ 3664 if (srf_updates[i].surface->flip_immediate) 3665 continue; 3666 3667 update_dirty_rect->cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1; 3668 update_dirty_rect->dirty_rect_count = flip_addr->dirty_rect_count; 3669 memcpy(update_dirty_rect->src_dirty_rects, flip_addr->dirty_rects, 3670 sizeof(flip_addr->dirty_rects)); 3671 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3672 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3673 3674 if (pipe_ctx->stream != stream) 3675 continue; 3676 if (pipe_ctx->plane_state != plane_state) 3677 continue; 3678 3679 update_dirty_rect->panel_inst = panel_inst; 3680 update_dirty_rect->pipe_idx = j; 3681 dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT); 3682 } 3683 } 3684 } 3685 3686 static void build_dmub_update_dirty_rect( 3687 struct dc *dc, 3688 int surface_count, 3689 struct dc_stream_state *stream, 3690 struct dc_surface_update *srf_updates, 3691 struct dc_state *context, 3692 struct dc_dmub_cmd dc_dmub_cmd[], 3693 unsigned int *dmub_cmd_count) 3694 { 3695 union dmub_rb_cmd cmd; 3696 struct dmub_cmd_update_dirty_rect_data *update_dirty_rect; 3697 unsigned int i, j; 3698 unsigned int panel_inst = 0; 3699 3700 if (!dc_dmub_should_send_dirty_rect_cmd(dc, stream)) 3701 return; 3702 3703 if (!dc_get_edp_link_panel_inst(dc, stream->link, &panel_inst)) 3704 return; 3705 3706 memset(&cmd, 0x0, sizeof(cmd)); 3707 cmd.update_dirty_rect.header.type = DMUB_CMD__UPDATE_DIRTY_RECT; 3708 cmd.update_dirty_rect.header.sub_type = 0; 3709 cmd.update_dirty_rect.header.payload_bytes = 3710 sizeof(cmd.update_dirty_rect) - 3711 sizeof(cmd.update_dirty_rect.header); 3712 update_dirty_rect = &cmd.update_dirty_rect.update_dirty_rect_data; 3713 for (i = 0; i < surface_count; i++) { 3714 struct dc_plane_state *plane_state = srf_updates[i].surface; 3715 const struct dc_flip_addrs *flip_addr = srf_updates[i].flip_addr; 3716 3717 if (!srf_updates[i].surface || !flip_addr) 3718 continue; 3719 /* Do not send in immediate flip mode */ 3720 if (srf_updates[i].surface->flip_immediate) 3721 continue; 3722 update_dirty_rect->cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1; 3723 update_dirty_rect->dirty_rect_count = flip_addr->dirty_rect_count; 3724 memcpy(update_dirty_rect->src_dirty_rects, flip_addr->dirty_rects, 3725 sizeof(flip_addr->dirty_rects)); 3726 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3727 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3728 3729 if (pipe_ctx->stream != stream) 3730 continue; 3731 if (pipe_ctx->plane_state != plane_state) 3732 continue; 3733 update_dirty_rect->panel_inst = panel_inst; 3734 update_dirty_rect->pipe_idx = j; 3735 dc_dmub_cmd[*dmub_cmd_count].dmub_cmd = cmd; 3736 dc_dmub_cmd[*dmub_cmd_count].wait_type = DM_DMUB_WAIT_TYPE_NO_WAIT; 3737 (*dmub_cmd_count)++; 3738 } 3739 } 3740 } 3741 3742 static bool check_address_only_update(union surface_update_flags update_flags) 3743 { 3744 union surface_update_flags addr_only_update_flags; 3745 addr_only_update_flags.raw = 0; 3746 addr_only_update_flags.bits.addr_update = 1; 3747 3748 return update_flags.bits.addr_update && 3749 !(update_flags.raw & ~addr_only_update_flags.raw); 3750 } 3751 3752 /** 3753 * build_dmub_cmd_list() - Build an array of DMCUB commands to be sent to DMCUB 3754 * 3755 * @dc: Current DC state 3756 * @srf_updates: Array of surface updates 3757 * @surface_count: Number of surfaces that have an updated 3758 * @stream: Corresponding stream to be updated in the current flip 3759 * @context: New DC state to be programmed 3760 * 3761 * @dc_dmub_cmd: Array of DMCUB commands to be sent to DMCUB 3762 * @dmub_cmd_count: Count indicating the number of DMCUB commands in dc_dmub_cmd array 3763 * 3764 * This function builds an array of DMCUB commands to be sent to DMCUB. This function is required 3765 * to build an array of commands and have them sent while the OTG lock is acquired. 3766 * 3767 * Return: void 3768 */ 3769 static void build_dmub_cmd_list(struct dc *dc, 3770 struct dc_surface_update *srf_updates, 3771 int surface_count, 3772 struct dc_stream_state *stream, 3773 struct dc_state *context, 3774 struct dc_dmub_cmd dc_dmub_cmd[], 3775 unsigned int *dmub_cmd_count) 3776 { 3777 // Initialize cmd count to 0 3778 *dmub_cmd_count = 0; 3779 build_dmub_update_dirty_rect(dc, surface_count, stream, srf_updates, context, dc_dmub_cmd, dmub_cmd_count); 3780 } 3781 3782 static void commit_plane_for_stream_offload_fams2_flip(struct dc *dc, 3783 struct dc_surface_update *srf_updates, 3784 int surface_count, 3785 struct dc_stream_state *stream, 3786 struct dc_state *context) 3787 { 3788 int i, j; 3789 3790 /* update dirty rect for PSR */ 3791 dc_dmub_update_dirty_rect(dc, surface_count, stream, 3792 srf_updates, context); 3793 3794 /* Perform requested Updates */ 3795 for (i = 0; i < surface_count; i++) { 3796 struct dc_plane_state *plane_state = srf_updates[i].surface; 3797 3798 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3799 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3800 3801 if (!should_update_pipe_for_stream(context, pipe_ctx, stream)) 3802 continue; 3803 3804 if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state)) 3805 continue; 3806 3807 /* update pipe context for plane */ 3808 if (pipe_ctx->plane_state->update_flags.bits.addr_update) 3809 dc->hwss.update_plane_addr(dc, pipe_ctx); 3810 } 3811 } 3812 3813 /* Send commands to DMCUB */ 3814 dc_dmub_srv_fams2_passthrough_flip(dc, 3815 context, 3816 stream, 3817 srf_updates, 3818 surface_count); 3819 } 3820 3821 static void commit_planes_for_stream_fast(struct dc *dc, 3822 struct dc_surface_update *srf_updates, 3823 int surface_count, 3824 struct dc_stream_state *stream, 3825 struct dc_stream_update *stream_update, 3826 enum surface_update_type update_type, 3827 struct dc_state *context) 3828 { 3829 int i, j; 3830 struct pipe_ctx *top_pipe_to_program = NULL; 3831 struct dc_stream_status *stream_status = NULL; 3832 bool should_offload_fams2_flip = false; 3833 bool should_lock_all_pipes = (update_type != UPDATE_TYPE_FAST); 3834 3835 if (should_lock_all_pipes) 3836 determine_pipe_unlock_order(dc, context); 3837 3838 if (dc->debug.fams2_config.bits.enable && 3839 dc->debug.fams2_config.bits.enable_offload_flip && 3840 dc_state_is_fams2_in_use(dc, context)) { 3841 /* if not offloading to HWFQ, offload to FAMS2 if needed */ 3842 should_offload_fams2_flip = true; 3843 for (i = 0; i < surface_count; i++) { 3844 if (srf_updates[i].surface && 3845 srf_updates[i].surface->update_flags.raw && 3846 !check_address_only_update(srf_updates[i].surface->update_flags)) { 3847 /* more than address update, need to acquire FAMS2 lock */ 3848 should_offload_fams2_flip = false; 3849 break; 3850 } 3851 } 3852 if (stream_update) { 3853 /* more than address update, need to acquire FAMS2 lock */ 3854 should_offload_fams2_flip = false; 3855 } 3856 } 3857 3858 dc_exit_ips_for_hw_access(dc); 3859 3860 dc_z10_restore(dc); 3861 3862 top_pipe_to_program = resource_get_otg_master_for_stream( 3863 &context->res_ctx, 3864 stream); 3865 3866 if (!top_pipe_to_program) 3867 return; 3868 3869 for (i = 0; i < dc->res_pool->pipe_count; i++) { 3870 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 3871 3872 if (pipe->stream && pipe->plane_state) { 3873 if (!dc->debug.using_dml2) 3874 set_p_state_switch_method(dc, context, pipe); 3875 3876 if (dc->debug.visual_confirm) 3877 dc_update_visual_confirm_color(dc, context, pipe); 3878 } 3879 } 3880 3881 for (i = 0; i < surface_count; i++) { 3882 struct dc_plane_state *plane_state = srf_updates[i].surface; 3883 /*set logical flag for lock/unlock use*/ 3884 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3885 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3886 3887 if (!pipe_ctx->plane_state) 3888 continue; 3889 if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state)) 3890 continue; 3891 3892 pipe_ctx->plane_state->triplebuffer_flips = false; 3893 if (update_type == UPDATE_TYPE_FAST && 3894 dc->hwss.program_triplebuffer != NULL && 3895 !pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) { 3896 /*triple buffer for VUpdate only*/ 3897 pipe_ctx->plane_state->triplebuffer_flips = true; 3898 } 3899 } 3900 } 3901 3902 stream_status = dc_state_get_stream_status(context, stream); 3903 3904 if (should_offload_fams2_flip) { 3905 commit_plane_for_stream_offload_fams2_flip(dc, 3906 srf_updates, 3907 surface_count, 3908 stream, 3909 context); 3910 } else if (stream_status) { 3911 build_dmub_cmd_list(dc, 3912 srf_updates, 3913 surface_count, 3914 stream, 3915 context, 3916 context->dc_dmub_cmd, 3917 &(context->dmub_cmd_count)); 3918 hwss_build_fast_sequence(dc, 3919 context->dc_dmub_cmd, 3920 context->dmub_cmd_count, 3921 context->block_sequence, 3922 &(context->block_sequence_steps), 3923 top_pipe_to_program, 3924 stream_status, 3925 context); 3926 hwss_execute_sequence(dc, 3927 context->block_sequence, 3928 context->block_sequence_steps); 3929 } 3930 3931 /* Clear update flags so next flip doesn't have redundant programming 3932 * (if there's no stream update, the update flags are not cleared). 3933 * Surface updates are cleared unconditionally at the beginning of each flip, 3934 * so no need to clear here. 3935 */ 3936 if (top_pipe_to_program->stream) 3937 top_pipe_to_program->stream->update_flags.raw = 0; 3938 } 3939 3940 static void commit_planes_for_stream(struct dc *dc, 3941 struct dc_surface_update *srf_updates, 3942 int surface_count, 3943 struct dc_stream_state *stream, 3944 struct dc_stream_update *stream_update, 3945 enum surface_update_type update_type, 3946 struct dc_state *context) 3947 { 3948 int i, j; 3949 struct pipe_ctx *top_pipe_to_program = NULL; 3950 bool should_lock_all_pipes = (update_type != UPDATE_TYPE_FAST); 3951 bool subvp_prev_use = false; 3952 bool subvp_curr_use = false; 3953 uint8_t current_stream_mask = 0; 3954 3955 if (should_lock_all_pipes) 3956 determine_pipe_unlock_order(dc, context); 3957 // Once we apply the new subvp context to hardware it won't be in the 3958 // dc->current_state anymore, so we have to cache it before we apply 3959 // the new SubVP context 3960 subvp_prev_use = false; 3961 dc_exit_ips_for_hw_access(dc); 3962 3963 dc_z10_restore(dc); 3964 if (update_type == UPDATE_TYPE_FULL && dc->optimized_required) 3965 hwss_process_outstanding_hw_updates(dc, dc->current_state); 3966 3967 if (update_type != UPDATE_TYPE_FAST && dc->res_pool->funcs->prepare_mcache_programming) 3968 dc->res_pool->funcs->prepare_mcache_programming(dc, context); 3969 3970 for (i = 0; i < dc->res_pool->pipe_count; i++) { 3971 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 3972 3973 if (pipe->stream && pipe->plane_state) { 3974 if (!dc->debug.using_dml2) 3975 set_p_state_switch_method(dc, context, pipe); 3976 3977 if (dc->debug.visual_confirm) 3978 dc_update_visual_confirm_color(dc, context, pipe); 3979 } 3980 } 3981 3982 if (update_type == UPDATE_TYPE_FULL) { 3983 dc_allow_idle_optimizations(dc, false); 3984 3985 if (get_seamless_boot_stream_count(context) == 0) 3986 dc->hwss.prepare_bandwidth(dc, context); 3987 3988 if (dc->hwss.update_dsc_pg) 3989 dc->hwss.update_dsc_pg(dc, context, false); 3990 3991 context_clock_trace(dc, context); 3992 } 3993 3994 if (update_type == UPDATE_TYPE_FULL) 3995 hwss_wait_for_outstanding_hw_updates(dc, dc->current_state); 3996 3997 top_pipe_to_program = resource_get_otg_master_for_stream( 3998 &context->res_ctx, 3999 stream); 4000 ASSERT(top_pipe_to_program != NULL); 4001 for (i = 0; i < dc->res_pool->pipe_count; i++) { 4002 struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 4003 4004 // Check old context for SubVP 4005 subvp_prev_use |= (dc_state_get_pipe_subvp_type(dc->current_state, old_pipe) == SUBVP_PHANTOM); 4006 if (subvp_prev_use) 4007 break; 4008 } 4009 4010 for (i = 0; i < dc->res_pool->pipe_count; i++) { 4011 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 4012 4013 if (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) { 4014 subvp_curr_use = true; 4015 break; 4016 } 4017 } 4018 4019 if (stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) { 4020 struct pipe_ctx *mpcc_pipe; 4021 struct pipe_ctx *odm_pipe; 4022 4023 for (mpcc_pipe = top_pipe_to_program; mpcc_pipe; mpcc_pipe = mpcc_pipe->bottom_pipe) 4024 for (odm_pipe = mpcc_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) 4025 odm_pipe->ttu_regs.min_ttu_vblank = MAX_TTU; 4026 } 4027 4028 if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed) 4029 if (top_pipe_to_program && 4030 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) { 4031 if (should_use_dmub_lock(stream->link)) { 4032 union dmub_hw_lock_flags hw_locks = { 0 }; 4033 struct dmub_hw_lock_inst_flags inst_flags = { 0 }; 4034 4035 hw_locks.bits.lock_dig = 1; 4036 inst_flags.dig_inst = top_pipe_to_program->stream_res.tg->inst; 4037 4038 dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv, 4039 true, 4040 &hw_locks, 4041 &inst_flags); 4042 } else 4043 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable( 4044 top_pipe_to_program->stream_res.tg); 4045 } 4046 4047 if (dc->hwss.wait_for_dcc_meta_propagation) { 4048 dc->hwss.wait_for_dcc_meta_propagation(dc, top_pipe_to_program); 4049 } 4050 4051 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) { 4052 if (dc->hwss.subvp_pipe_control_lock) 4053 dc->hwss.subvp_pipe_control_lock(dc, context, true, should_lock_all_pipes, NULL, subvp_prev_use); 4054 4055 if (dc->hwss.fams2_global_control_lock) 4056 dc->hwss.fams2_global_control_lock(dc, context, true); 4057 4058 dc->hwss.interdependent_update_lock(dc, context, true); 4059 } else { 4060 if (dc->hwss.subvp_pipe_control_lock) 4061 dc->hwss.subvp_pipe_control_lock(dc, context, true, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use); 4062 4063 if (dc->hwss.fams2_global_control_lock) 4064 dc->hwss.fams2_global_control_lock(dc, context, true); 4065 4066 /* Lock the top pipe while updating plane addrs, since freesync requires 4067 * plane addr update event triggers to be synchronized. 4068 * top_pipe_to_program is expected to never be NULL 4069 */ 4070 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true); 4071 } 4072 4073 dc_dmub_update_dirty_rect(dc, surface_count, stream, srf_updates, context); 4074 4075 // Stream updates 4076 if (stream_update) 4077 commit_planes_do_stream_update(dc, stream, stream_update, update_type, context); 4078 4079 if (surface_count == 0) { 4080 /* 4081 * In case of turning off screen, no need to program front end a second time. 4082 * just return after program blank. 4083 */ 4084 if (dc->hwss.apply_ctx_for_surface) 4085 dc->hwss.apply_ctx_for_surface(dc, stream, 0, context); 4086 if (dc->hwss.program_front_end_for_ctx) 4087 dc->hwss.program_front_end_for_ctx(dc, context); 4088 4089 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) { 4090 dc->hwss.interdependent_update_lock(dc, context, false); 4091 } else { 4092 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false); 4093 } 4094 dc->hwss.post_unlock_program_front_end(dc, context); 4095 4096 if (update_type != UPDATE_TYPE_FAST) 4097 if (dc->hwss.commit_subvp_config) 4098 dc->hwss.commit_subvp_config(dc, context); 4099 4100 /* Since phantom pipe programming is moved to post_unlock_program_front_end, 4101 * move the SubVP lock to after the phantom pipes have been setup 4102 */ 4103 if (dc->hwss.subvp_pipe_control_lock) 4104 dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, 4105 NULL, subvp_prev_use); 4106 4107 if (dc->hwss.fams2_global_control_lock) 4108 dc->hwss.fams2_global_control_lock(dc, context, false); 4109 4110 return; 4111 } 4112 4113 if (update_type != UPDATE_TYPE_FAST) { 4114 for (j = 0; j < dc->res_pool->pipe_count; j++) { 4115 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 4116 4117 if ((dc->debug.visual_confirm == VISUAL_CONFIRM_SUBVP || 4118 dc->debug.visual_confirm == VISUAL_CONFIRM_MCLK_SWITCH) && 4119 pipe_ctx->stream && pipe_ctx->plane_state) { 4120 /* Only update visual confirm for SUBVP and Mclk switching here. 4121 * The bar appears on all pipes, so we need to update the bar on all displays, 4122 * so the information doesn't get stale. 4123 */ 4124 dc->hwss.update_visual_confirm_color(dc, pipe_ctx, 4125 pipe_ctx->plane_res.hubp->inst); 4126 } 4127 } 4128 } 4129 4130 for (i = 0; i < surface_count; i++) { 4131 struct dc_plane_state *plane_state = srf_updates[i].surface; 4132 4133 /*set logical flag for lock/unlock use*/ 4134 for (j = 0; j < dc->res_pool->pipe_count; j++) { 4135 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 4136 if (!pipe_ctx->plane_state) 4137 continue; 4138 if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state)) 4139 continue; 4140 pipe_ctx->plane_state->triplebuffer_flips = false; 4141 if (update_type == UPDATE_TYPE_FAST && 4142 dc->hwss.program_triplebuffer != NULL && 4143 !pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) { 4144 /*triple buffer for VUpdate only*/ 4145 pipe_ctx->plane_state->triplebuffer_flips = true; 4146 } 4147 } 4148 if (update_type == UPDATE_TYPE_FULL) { 4149 /* force vsync flip when reconfiguring pipes to prevent underflow */ 4150 plane_state->flip_immediate = false; 4151 plane_state->triplebuffer_flips = false; 4152 } 4153 } 4154 4155 // Update Type FULL, Surface updates 4156 for (j = 0; j < dc->res_pool->pipe_count; j++) { 4157 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 4158 4159 if (!pipe_ctx->top_pipe && 4160 !pipe_ctx->prev_odm_pipe && 4161 should_update_pipe_for_stream(context, pipe_ctx, stream)) { 4162 struct dc_stream_status *stream_status = NULL; 4163 4164 if (!pipe_ctx->plane_state) 4165 continue; 4166 4167 /* Full fe update*/ 4168 if (update_type == UPDATE_TYPE_FAST) 4169 continue; 4170 4171 ASSERT(!pipe_ctx->plane_state->triplebuffer_flips); 4172 if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) { 4173 /*turn off triple buffer for full update*/ 4174 dc->hwss.program_triplebuffer( 4175 dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips); 4176 } 4177 stream_status = 4178 stream_get_status(context, pipe_ctx->stream); 4179 4180 if (dc->hwss.apply_ctx_for_surface && stream_status) 4181 dc->hwss.apply_ctx_for_surface( 4182 dc, pipe_ctx->stream, stream_status->plane_count, context); 4183 } 4184 } 4185 if (dc->hwss.program_front_end_for_ctx && update_type != UPDATE_TYPE_FAST) { 4186 dc->hwss.program_front_end_for_ctx(dc, context); 4187 if (dc->debug.validate_dml_output) { 4188 for (i = 0; i < dc->res_pool->pipe_count; i++) { 4189 struct pipe_ctx *cur_pipe = &context->res_ctx.pipe_ctx[i]; 4190 if (cur_pipe->stream == NULL) 4191 continue; 4192 4193 cur_pipe->plane_res.hubp->funcs->validate_dml_output( 4194 cur_pipe->plane_res.hubp, dc->ctx, 4195 &context->res_ctx.pipe_ctx[i].rq_regs, 4196 &context->res_ctx.pipe_ctx[i].dlg_regs, 4197 &context->res_ctx.pipe_ctx[i].ttu_regs); 4198 } 4199 } 4200 } 4201 4202 // Update Type FAST, Surface updates 4203 if (update_type == UPDATE_TYPE_FAST) { 4204 if (dc->hwss.set_flip_control_gsl) 4205 for (i = 0; i < surface_count; i++) { 4206 struct dc_plane_state *plane_state = srf_updates[i].surface; 4207 4208 for (j = 0; j < dc->res_pool->pipe_count; j++) { 4209 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 4210 4211 if (!should_update_pipe_for_stream(context, pipe_ctx, stream)) 4212 continue; 4213 4214 if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state)) 4215 continue; 4216 4217 // GSL has to be used for flip immediate 4218 dc->hwss.set_flip_control_gsl(pipe_ctx, 4219 pipe_ctx->plane_state->flip_immediate); 4220 } 4221 } 4222 4223 /* Perform requested Updates */ 4224 for (i = 0; i < surface_count; i++) { 4225 struct dc_plane_state *plane_state = srf_updates[i].surface; 4226 4227 for (j = 0; j < dc->res_pool->pipe_count; j++) { 4228 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 4229 4230 if (!should_update_pipe_for_stream(context, pipe_ctx, stream)) 4231 continue; 4232 4233 if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state)) 4234 continue; 4235 4236 if (srf_updates[i].cm2_params && 4237 srf_updates[i].cm2_params->cm2_luts.lut3d_data.lut3d_src == 4238 DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM && 4239 srf_updates[i].cm2_params->component_settings.shaper_3dlut_setting == 4240 DC_CM2_SHAPER_3DLUT_SETTING_ENABLE_SHAPER_3DLUT && 4241 dc->hwss.trigger_3dlut_dma_load) 4242 dc->hwss.trigger_3dlut_dma_load(dc, pipe_ctx); 4243 4244 /*program triple buffer after lock based on flip type*/ 4245 if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) { 4246 /*only enable triplebuffer for fast_update*/ 4247 dc->hwss.program_triplebuffer( 4248 dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips); 4249 } 4250 if (pipe_ctx->plane_state->update_flags.bits.addr_update) 4251 dc->hwss.update_plane_addr(dc, pipe_ctx); 4252 } 4253 } 4254 } 4255 4256 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) { 4257 dc->hwss.interdependent_update_lock(dc, context, false); 4258 } else { 4259 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false); 4260 } 4261 4262 if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed) 4263 if (top_pipe_to_program && 4264 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) { 4265 top_pipe_to_program->stream_res.tg->funcs->wait_for_state( 4266 top_pipe_to_program->stream_res.tg, 4267 CRTC_STATE_VACTIVE); 4268 top_pipe_to_program->stream_res.tg->funcs->wait_for_state( 4269 top_pipe_to_program->stream_res.tg, 4270 CRTC_STATE_VBLANK); 4271 top_pipe_to_program->stream_res.tg->funcs->wait_for_state( 4272 top_pipe_to_program->stream_res.tg, 4273 CRTC_STATE_VACTIVE); 4274 4275 if (should_use_dmub_lock(stream->link)) { 4276 union dmub_hw_lock_flags hw_locks = { 0 }; 4277 struct dmub_hw_lock_inst_flags inst_flags = { 0 }; 4278 4279 hw_locks.bits.lock_dig = 1; 4280 inst_flags.dig_inst = top_pipe_to_program->stream_res.tg->inst; 4281 4282 dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv, 4283 false, 4284 &hw_locks, 4285 &inst_flags); 4286 } else 4287 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_disable( 4288 top_pipe_to_program->stream_res.tg); 4289 } 4290 4291 if (subvp_curr_use) { 4292 /* If enabling subvp or transitioning from subvp->subvp, enable the 4293 * phantom streams before we program front end for the phantom pipes. 4294 */ 4295 if (update_type != UPDATE_TYPE_FAST) { 4296 if (dc->hwss.enable_phantom_streams) 4297 dc->hwss.enable_phantom_streams(dc, context); 4298 } 4299 } 4300 4301 if (update_type != UPDATE_TYPE_FAST) 4302 dc->hwss.post_unlock_program_front_end(dc, context); 4303 4304 if (subvp_prev_use && !subvp_curr_use) { 4305 /* If disabling subvp, disable phantom streams after front end 4306 * programming has completed (we turn on phantom OTG in order 4307 * to complete the plane disable for phantom pipes). 4308 */ 4309 4310 if (dc->hwss.disable_phantom_streams) 4311 dc->hwss.disable_phantom_streams(dc, context); 4312 } 4313 4314 if (update_type != UPDATE_TYPE_FAST) 4315 if (dc->hwss.commit_subvp_config) 4316 dc->hwss.commit_subvp_config(dc, context); 4317 /* Since phantom pipe programming is moved to post_unlock_program_front_end, 4318 * move the SubVP lock to after the phantom pipes have been setup 4319 */ 4320 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) { 4321 if (dc->hwss.subvp_pipe_control_lock) 4322 dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use); 4323 if (dc->hwss.fams2_global_control_lock) 4324 dc->hwss.fams2_global_control_lock(dc, context, false); 4325 } else { 4326 if (dc->hwss.subvp_pipe_control_lock) 4327 dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use); 4328 if (dc->hwss.fams2_global_control_lock) 4329 dc->hwss.fams2_global_control_lock(dc, context, false); 4330 } 4331 4332 // Fire manual trigger only when bottom plane is flipped 4333 for (j = 0; j < dc->res_pool->pipe_count; j++) { 4334 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 4335 4336 if (!pipe_ctx->plane_state) 4337 continue; 4338 4339 if (pipe_ctx->bottom_pipe || pipe_ctx->next_odm_pipe || 4340 !pipe_ctx->stream || !should_update_pipe_for_stream(context, pipe_ctx, stream) || 4341 !pipe_ctx->plane_state->update_flags.bits.addr_update || 4342 pipe_ctx->plane_state->skip_manual_trigger) 4343 continue; 4344 4345 if (pipe_ctx->stream_res.tg->funcs->program_manual_trigger) 4346 pipe_ctx->stream_res.tg->funcs->program_manual_trigger(pipe_ctx->stream_res.tg); 4347 } 4348 4349 current_stream_mask = get_stream_mask(dc, context); 4350 if (current_stream_mask != context->stream_mask) { 4351 context->stream_mask = current_stream_mask; 4352 dc_dmub_srv_notify_stream_mask(dc->ctx->dmub_srv, current_stream_mask); 4353 } 4354 } 4355 4356 /** 4357 * could_mpcc_tree_change_for_active_pipes - Check if an OPP associated with MPCC might change 4358 * 4359 * @dc: Used to get the current state status 4360 * @stream: Target stream, which we want to remove the attached planes 4361 * @srf_updates: Array of surface updates 4362 * @surface_count: Number of surface update 4363 * @is_plane_addition: [in] Fill out with true if it is a plane addition case 4364 * 4365 * DCN32x and newer support a feature named Dynamic ODM which can conflict with 4366 * the MPO if used simultaneously in some specific configurations (e.g., 4367 * 4k@144). This function checks if the incoming context requires applying a 4368 * transition state with unnecessary pipe splitting and ODM disabled to 4369 * circumvent our hardware limitations to prevent this edge case. If the OPP 4370 * associated with an MPCC might change due to plane additions, this function 4371 * returns true. 4372 * 4373 * Return: 4374 * Return true if OPP and MPCC might change, otherwise, return false. 4375 */ 4376 static bool could_mpcc_tree_change_for_active_pipes(struct dc *dc, 4377 struct dc_stream_state *stream, 4378 struct dc_surface_update *srf_updates, 4379 int surface_count, 4380 bool *is_plane_addition) 4381 { 4382 4383 struct dc_stream_status *cur_stream_status = stream_get_status(dc->current_state, stream); 4384 bool force_minimal_pipe_splitting = false; 4385 bool subvp_active = false; 4386 uint32_t i; 4387 4388 *is_plane_addition = false; 4389 4390 if (cur_stream_status && 4391 dc->current_state->stream_count > 0 && 4392 dc->debug.pipe_split_policy != MPC_SPLIT_AVOID) { 4393 /* determine if minimal transition is required due to MPC*/ 4394 if (surface_count > 0) { 4395 if (cur_stream_status->plane_count > surface_count) { 4396 force_minimal_pipe_splitting = true; 4397 } else if (cur_stream_status->plane_count < surface_count) { 4398 force_minimal_pipe_splitting = true; 4399 *is_plane_addition = true; 4400 } 4401 } 4402 } 4403 4404 if (cur_stream_status && 4405 dc->current_state->stream_count == 1 && 4406 dc->debug.enable_single_display_2to1_odm_policy) { 4407 /* determine if minimal transition is required due to dynamic ODM*/ 4408 if (surface_count > 0) { 4409 if (cur_stream_status->plane_count > 2 && cur_stream_status->plane_count > surface_count) { 4410 force_minimal_pipe_splitting = true; 4411 } else if (surface_count > 2 && cur_stream_status->plane_count < surface_count) { 4412 force_minimal_pipe_splitting = true; 4413 *is_plane_addition = true; 4414 } 4415 } 4416 } 4417 4418 for (i = 0; i < dc->res_pool->pipe_count; i++) { 4419 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 4420 4421 if (dc_state_get_pipe_subvp_type(dc->current_state, pipe) != SUBVP_NONE) { 4422 subvp_active = true; 4423 break; 4424 } 4425 } 4426 4427 /* For SubVP when adding or removing planes we need to add a minimal transition 4428 * (even when disabling all planes). Whenever disabling a phantom pipe, we 4429 * must use the minimal transition path to disable the pipe correctly. 4430 * 4431 * We want to use the minimal transition whenever subvp is active, not only if 4432 * a plane is being added / removed from a subvp stream (MPO plane can be added 4433 * to a DRR pipe of SubVP + DRR config, in which case we still want to run through 4434 * a min transition to disable subvp. 4435 */ 4436 if (cur_stream_status && subvp_active) { 4437 /* determine if minimal transition is required due to SubVP*/ 4438 if (cur_stream_status->plane_count > surface_count) { 4439 force_minimal_pipe_splitting = true; 4440 } else if (cur_stream_status->plane_count < surface_count) { 4441 force_minimal_pipe_splitting = true; 4442 *is_plane_addition = true; 4443 } 4444 } 4445 4446 return force_minimal_pipe_splitting; 4447 } 4448 4449 struct pipe_split_policy_backup { 4450 bool dynamic_odm_policy; 4451 bool subvp_policy; 4452 enum pipe_split_policy mpc_policy; 4453 char force_odm[MAX_PIPES]; 4454 }; 4455 4456 static void backup_and_set_minimal_pipe_split_policy(struct dc *dc, 4457 struct dc_state *context, 4458 struct pipe_split_policy_backup *policy) 4459 { 4460 int i; 4461 4462 if (!dc->config.is_vmin_only_asic) { 4463 policy->mpc_policy = dc->debug.pipe_split_policy; 4464 dc->debug.pipe_split_policy = MPC_SPLIT_AVOID; 4465 } 4466 policy->dynamic_odm_policy = dc->debug.enable_single_display_2to1_odm_policy; 4467 dc->debug.enable_single_display_2to1_odm_policy = false; 4468 policy->subvp_policy = dc->debug.force_disable_subvp; 4469 dc->debug.force_disable_subvp = true; 4470 for (i = 0; i < context->stream_count; i++) { 4471 policy->force_odm[i] = context->streams[i]->debug.force_odm_combine_segments; 4472 if (context->streams[i]->debug.allow_transition_for_forced_odm) 4473 context->streams[i]->debug.force_odm_combine_segments = 0; 4474 } 4475 } 4476 4477 static void restore_minimal_pipe_split_policy(struct dc *dc, 4478 struct dc_state *context, 4479 struct pipe_split_policy_backup *policy) 4480 { 4481 uint8_t i; 4482 4483 if (!dc->config.is_vmin_only_asic) 4484 dc->debug.pipe_split_policy = policy->mpc_policy; 4485 dc->debug.enable_single_display_2to1_odm_policy = 4486 policy->dynamic_odm_policy; 4487 dc->debug.force_disable_subvp = policy->subvp_policy; 4488 for (i = 0; i < context->stream_count; i++) 4489 context->streams[i]->debug.force_odm_combine_segments = policy->force_odm[i]; 4490 } 4491 4492 static void release_minimal_transition_state(struct dc *dc, 4493 struct dc_state *minimal_transition_context, 4494 struct dc_state *base_context, 4495 struct pipe_split_policy_backup *policy) 4496 { 4497 restore_minimal_pipe_split_policy(dc, base_context, policy); 4498 dc_state_release(minimal_transition_context); 4499 } 4500 4501 static void force_vsync_flip_in_minimal_transition_context(struct dc_state *context) 4502 { 4503 uint8_t i; 4504 int j; 4505 struct dc_stream_status *stream_status; 4506 4507 for (i = 0; i < context->stream_count; i++) { 4508 stream_status = &context->stream_status[i]; 4509 4510 for (j = 0; j < stream_status->plane_count; j++) 4511 stream_status->plane_states[j]->flip_immediate = false; 4512 } 4513 } 4514 4515 static struct dc_state *create_minimal_transition_state(struct dc *dc, 4516 struct dc_state *base_context, struct pipe_split_policy_backup *policy) 4517 { 4518 struct dc_state *minimal_transition_context = NULL; 4519 4520 minimal_transition_context = dc_state_create_copy(base_context); 4521 if (!minimal_transition_context) 4522 return NULL; 4523 4524 backup_and_set_minimal_pipe_split_policy(dc, base_context, policy); 4525 /* commit minimal state */ 4526 if (dc->res_pool->funcs->validate_bandwidth(dc, minimal_transition_context, false)) { 4527 /* prevent underflow and corruption when reconfiguring pipes */ 4528 force_vsync_flip_in_minimal_transition_context(minimal_transition_context); 4529 } else { 4530 /* 4531 * This should never happen, minimal transition state should 4532 * always be validated first before adding pipe split features. 4533 */ 4534 release_minimal_transition_state(dc, minimal_transition_context, base_context, policy); 4535 BREAK_TO_DEBUGGER(); 4536 minimal_transition_context = NULL; 4537 } 4538 return minimal_transition_context; 4539 } 4540 4541 static bool is_pipe_topology_transition_seamless_with_intermediate_step( 4542 struct dc *dc, 4543 struct dc_state *initial_state, 4544 struct dc_state *intermediate_state, 4545 struct dc_state *final_state) 4546 { 4547 return dc->hwss.is_pipe_topology_transition_seamless(dc, initial_state, 4548 intermediate_state) && 4549 dc->hwss.is_pipe_topology_transition_seamless(dc, 4550 intermediate_state, final_state); 4551 } 4552 4553 static void swap_and_release_current_context(struct dc *dc, 4554 struct dc_state *new_context, struct dc_stream_state *stream) 4555 { 4556 4557 int i; 4558 struct dc_state *old = dc->current_state; 4559 struct pipe_ctx *pipe_ctx; 4560 4561 /* Since memory free requires elevated IRQ, an interrupt 4562 * request is generated by mem free. If this happens 4563 * between freeing and reassigning the context, our vsync 4564 * interrupt will call into dc and cause a memory 4565 * corruption. Hence, we first reassign the context, 4566 * then free the old context. 4567 */ 4568 dc->current_state = new_context; 4569 dc_state_release(old); 4570 4571 // clear any forced full updates 4572 for (i = 0; i < dc->res_pool->pipe_count; i++) { 4573 pipe_ctx = &new_context->res_ctx.pipe_ctx[i]; 4574 4575 if (pipe_ctx->plane_state && pipe_ctx->stream == stream) 4576 pipe_ctx->plane_state->force_full_update = false; 4577 } 4578 } 4579 4580 static int initialize_empty_surface_updates( 4581 struct dc_stream_state *stream, 4582 struct dc_surface_update *srf_updates) 4583 { 4584 struct dc_stream_status *status = dc_stream_get_status(stream); 4585 int i; 4586 4587 if (!status) 4588 return 0; 4589 4590 for (i = 0; i < status->plane_count; i++) 4591 srf_updates[i].surface = status->plane_states[i]; 4592 4593 return status->plane_count; 4594 } 4595 4596 static bool commit_minimal_transition_based_on_new_context(struct dc *dc, 4597 struct dc_state *new_context, 4598 struct dc_stream_state *stream, 4599 struct dc_surface_update *srf_updates, 4600 int surface_count) 4601 { 4602 bool success = false; 4603 struct pipe_split_policy_backup policy; 4604 struct dc_state *intermediate_context = 4605 create_minimal_transition_state(dc, new_context, 4606 &policy); 4607 4608 if (intermediate_context) { 4609 if (is_pipe_topology_transition_seamless_with_intermediate_step( 4610 dc, 4611 dc->current_state, 4612 intermediate_context, 4613 new_context)) { 4614 DC_LOG_DC("commit minimal transition state: base = new state\n"); 4615 commit_planes_for_stream(dc, srf_updates, 4616 surface_count, stream, NULL, 4617 UPDATE_TYPE_FULL, intermediate_context); 4618 swap_and_release_current_context( 4619 dc, intermediate_context, stream); 4620 dc_state_retain(dc->current_state); 4621 success = true; 4622 } 4623 release_minimal_transition_state( 4624 dc, intermediate_context, new_context, &policy); 4625 } 4626 return success; 4627 } 4628 4629 static bool commit_minimal_transition_based_on_current_context(struct dc *dc, 4630 struct dc_state *new_context, struct dc_stream_state *stream) 4631 { 4632 bool success = false; 4633 struct pipe_split_policy_backup policy; 4634 struct dc_state *intermediate_context; 4635 struct dc_state *old_current_state = dc->current_state; 4636 struct dc_surface_update srf_updates[MAX_SURFACES] = {0}; 4637 int surface_count; 4638 4639 /* 4640 * Both current and new contexts share the same stream and plane state 4641 * pointers. When new context is validated, stream and planes get 4642 * populated with new updates such as new plane addresses. This makes 4643 * the current context no longer valid because stream and planes are 4644 * modified from the original. We backup current stream and plane states 4645 * into scratch space whenever we are populating new context. So we can 4646 * restore the original values back by calling the restore function now. 4647 * This restores back the original stream and plane states associated 4648 * with the current state. 4649 */ 4650 restore_planes_and_stream_state(&dc->scratch.current_state, stream); 4651 dc_state_retain(old_current_state); 4652 intermediate_context = create_minimal_transition_state(dc, 4653 old_current_state, &policy); 4654 4655 if (intermediate_context) { 4656 if (is_pipe_topology_transition_seamless_with_intermediate_step( 4657 dc, 4658 dc->current_state, 4659 intermediate_context, 4660 new_context)) { 4661 DC_LOG_DC("commit minimal transition state: base = current state\n"); 4662 surface_count = initialize_empty_surface_updates( 4663 stream, srf_updates); 4664 commit_planes_for_stream(dc, srf_updates, 4665 surface_count, stream, NULL, 4666 UPDATE_TYPE_FULL, intermediate_context); 4667 swap_and_release_current_context( 4668 dc, intermediate_context, stream); 4669 dc_state_retain(dc->current_state); 4670 success = true; 4671 } 4672 release_minimal_transition_state(dc, intermediate_context, 4673 old_current_state, &policy); 4674 } 4675 dc_state_release(old_current_state); 4676 /* 4677 * Restore stream and plane states back to the values associated with 4678 * new context. 4679 */ 4680 restore_planes_and_stream_state(&dc->scratch.new_state, stream); 4681 return success; 4682 } 4683 4684 /** 4685 * commit_minimal_transition_state_in_dc_update - Commit a minimal state based 4686 * on current or new context 4687 * 4688 * @dc: DC structure, used to get the current state 4689 * @new_context: New context 4690 * @stream: Stream getting the update for the flip 4691 * @srf_updates: Surface updates 4692 * @surface_count: Number of surfaces 4693 * 4694 * The function takes in current state and new state and determine a minimal 4695 * transition state as the intermediate step which could make the transition 4696 * between current and new states seamless. If found, it will commit the minimal 4697 * transition state and update current state to this minimal transition state 4698 * and return true, if not, it will return false. 4699 * 4700 * Return: 4701 * Return True if the minimal transition succeeded, false otherwise 4702 */ 4703 static bool commit_minimal_transition_state_in_dc_update(struct dc *dc, 4704 struct dc_state *new_context, 4705 struct dc_stream_state *stream, 4706 struct dc_surface_update *srf_updates, 4707 int surface_count) 4708 { 4709 bool success = commit_minimal_transition_based_on_new_context( 4710 dc, new_context, stream, srf_updates, 4711 surface_count); 4712 if (!success) 4713 success = commit_minimal_transition_based_on_current_context(dc, 4714 new_context, stream); 4715 if (!success) 4716 DC_LOG_ERROR("Fail to commit a seamless minimal transition state between current and new states.\nThis pipe topology update is non-seamless!\n"); 4717 return success; 4718 } 4719 4720 /** 4721 * commit_minimal_transition_state - Create a transition pipe split state 4722 * 4723 * @dc: Used to get the current state status 4724 * @transition_base_context: New transition state 4725 * 4726 * In some specific configurations, such as pipe split on multi-display with 4727 * MPO and/or Dynamic ODM, removing a plane may cause unsupported pipe 4728 * programming when moving to new planes. To mitigate those types of problems, 4729 * this function adds a transition state that minimizes pipe usage before 4730 * programming the new configuration. When adding a new plane, the current 4731 * state requires the least pipes, so it is applied without splitting. When 4732 * removing a plane, the new state requires the least pipes, so it is applied 4733 * without splitting. 4734 * 4735 * Return: 4736 * Return false if something is wrong in the transition state. 4737 */ 4738 static bool commit_minimal_transition_state(struct dc *dc, 4739 struct dc_state *transition_base_context) 4740 { 4741 struct dc_state *transition_context; 4742 struct pipe_split_policy_backup policy; 4743 enum dc_status ret = DC_ERROR_UNEXPECTED; 4744 unsigned int i, j; 4745 unsigned int pipe_in_use = 0; 4746 bool subvp_in_use = false; 4747 bool odm_in_use = false; 4748 4749 /* check current pipes in use*/ 4750 for (i = 0; i < dc->res_pool->pipe_count; i++) { 4751 struct pipe_ctx *pipe = &transition_base_context->res_ctx.pipe_ctx[i]; 4752 4753 if (pipe->plane_state) 4754 pipe_in_use++; 4755 } 4756 4757 /* If SubVP is enabled and we are adding or removing planes from any main subvp 4758 * pipe, we must use the minimal transition. 4759 */ 4760 for (i = 0; i < dc->res_pool->pipe_count; i++) { 4761 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 4762 4763 if (pipe->stream && dc_state_get_pipe_subvp_type(dc->current_state, pipe) == SUBVP_PHANTOM) { 4764 subvp_in_use = true; 4765 break; 4766 } 4767 } 4768 4769 /* If ODM is enabled and we are adding or removing planes from any ODM 4770 * pipe, we must use the minimal transition. 4771 */ 4772 for (i = 0; i < dc->res_pool->pipe_count; i++) { 4773 struct pipe_ctx *pipe = &transition_base_context->res_ctx.pipe_ctx[i]; 4774 4775 if (resource_is_pipe_type(pipe, OTG_MASTER)) { 4776 odm_in_use = resource_get_odm_slice_count(pipe) > 1; 4777 break; 4778 } 4779 } 4780 4781 /* When the OS add a new surface if we have been used all of pipes with odm combine 4782 * and mpc split feature, it need use commit_minimal_transition_state to transition safely. 4783 * After OS exit MPO, it will back to use odm and mpc split with all of pipes, we need 4784 * call it again. Otherwise return true to skip. 4785 * 4786 * Reduce the scenarios to use dc_commit_state_no_check in the stage of flip. Especially 4787 * enter/exit MPO when DCN still have enough resources. 4788 */ 4789 if (pipe_in_use != dc->res_pool->pipe_count && !subvp_in_use && !odm_in_use) 4790 return true; 4791 4792 DC_LOG_DC("%s base = %s state, reason = %s\n", __func__, 4793 dc->current_state == transition_base_context ? "current" : "new", 4794 subvp_in_use ? "Subvp In Use" : 4795 odm_in_use ? "ODM in Use" : 4796 dc->debug.pipe_split_policy != MPC_SPLIT_AVOID ? "MPC in Use" : 4797 "Unknown"); 4798 4799 dc_state_retain(transition_base_context); 4800 transition_context = create_minimal_transition_state(dc, 4801 transition_base_context, &policy); 4802 if (transition_context) { 4803 ret = dc_commit_state_no_check(dc, transition_context); 4804 release_minimal_transition_state(dc, transition_context, transition_base_context, &policy); 4805 } 4806 dc_state_release(transition_base_context); 4807 4808 if (ret != DC_OK) { 4809 /* this should never happen */ 4810 BREAK_TO_DEBUGGER(); 4811 return false; 4812 } 4813 4814 /* force full surface update */ 4815 for (i = 0; i < dc->current_state->stream_count; i++) { 4816 for (j = 0; j < dc->current_state->stream_status[i].plane_count; j++) { 4817 dc->current_state->stream_status[i].plane_states[j]->update_flags.raw = 0xFFFFFFFF; 4818 } 4819 } 4820 4821 return true; 4822 } 4823 4824 void populate_fast_updates(struct dc_fast_update *fast_update, 4825 struct dc_surface_update *srf_updates, 4826 int surface_count, 4827 struct dc_stream_update *stream_update) 4828 { 4829 int i = 0; 4830 4831 if (stream_update) { 4832 fast_update[0].out_transfer_func = stream_update->out_transfer_func; 4833 fast_update[0].output_csc_transform = stream_update->output_csc_transform; 4834 } else { 4835 fast_update[0].out_transfer_func = NULL; 4836 fast_update[0].output_csc_transform = NULL; 4837 } 4838 4839 for (i = 0; i < surface_count; i++) { 4840 fast_update[i].flip_addr = srf_updates[i].flip_addr; 4841 fast_update[i].gamma = srf_updates[i].gamma; 4842 fast_update[i].gamut_remap_matrix = srf_updates[i].gamut_remap_matrix; 4843 fast_update[i].input_csc_color_matrix = srf_updates[i].input_csc_color_matrix; 4844 fast_update[i].coeff_reduction_factor = srf_updates[i].coeff_reduction_factor; 4845 fast_update[i].cursor_csc_color_matrix = srf_updates[i].cursor_csc_color_matrix; 4846 } 4847 } 4848 4849 static bool fast_updates_exist(struct dc_fast_update *fast_update, int surface_count) 4850 { 4851 int i; 4852 4853 if (fast_update[0].out_transfer_func || 4854 fast_update[0].output_csc_transform) 4855 return true; 4856 4857 for (i = 0; i < surface_count; i++) { 4858 if (fast_update[i].flip_addr || 4859 fast_update[i].gamma || 4860 fast_update[i].gamut_remap_matrix || 4861 fast_update[i].input_csc_color_matrix || 4862 fast_update[i].cursor_csc_color_matrix || 4863 fast_update[i].coeff_reduction_factor) 4864 return true; 4865 } 4866 4867 return false; 4868 } 4869 4870 bool fast_nonaddr_updates_exist(struct dc_fast_update *fast_update, int surface_count) 4871 { 4872 int i; 4873 4874 if (fast_update[0].out_transfer_func || 4875 fast_update[0].output_csc_transform) 4876 return true; 4877 4878 for (i = 0; i < surface_count; i++) { 4879 if (fast_update[i].input_csc_color_matrix || 4880 fast_update[i].gamma || 4881 fast_update[i].gamut_remap_matrix || 4882 fast_update[i].coeff_reduction_factor || 4883 fast_update[i].cursor_csc_color_matrix) 4884 return true; 4885 } 4886 4887 return false; 4888 } 4889 4890 static bool full_update_required(struct dc *dc, 4891 struct dc_surface_update *srf_updates, 4892 int surface_count, 4893 struct dc_stream_update *stream_update, 4894 struct dc_stream_state *stream) 4895 { 4896 4897 int i; 4898 struct dc_stream_status *stream_status; 4899 const struct dc_state *context = dc->current_state; 4900 4901 for (i = 0; i < surface_count; i++) { 4902 if (srf_updates && 4903 (srf_updates[i].plane_info || 4904 srf_updates[i].scaling_info || 4905 (srf_updates[i].hdr_mult.value && 4906 srf_updates[i].hdr_mult.value != srf_updates->surface->hdr_mult.value) || 4907 (srf_updates[i].sdr_white_level_nits && 4908 srf_updates[i].sdr_white_level_nits != srf_updates->surface->sdr_white_level_nits) || 4909 srf_updates[i].in_transfer_func || 4910 srf_updates[i].func_shaper || 4911 srf_updates[i].lut3d_func || 4912 srf_updates[i].surface->force_full_update || 4913 (srf_updates[i].flip_addr && 4914 srf_updates[i].flip_addr->address.tmz_surface != srf_updates[i].surface->address.tmz_surface) || 4915 (srf_updates[i].cm2_params && 4916 (srf_updates[i].cm2_params->component_settings.shaper_3dlut_setting != srf_updates[i].surface->mcm_shaper_3dlut_setting || 4917 srf_updates[i].cm2_params->component_settings.lut1d_enable != srf_updates[i].surface->mcm_lut1d_enable)) || 4918 !is_surface_in_context(context, srf_updates[i].surface))) 4919 return true; 4920 } 4921 4922 if (stream_update && 4923 (((stream_update->src.height != 0 && stream_update->src.width != 0) || 4924 (stream_update->dst.height != 0 && stream_update->dst.width != 0) || 4925 stream_update->integer_scaling_update) || 4926 stream_update->hdr_static_metadata || 4927 stream_update->abm_level || 4928 stream_update->periodic_interrupt || 4929 stream_update->vrr_infopacket || 4930 stream_update->vsc_infopacket || 4931 stream_update->vsp_infopacket || 4932 stream_update->hfvsif_infopacket || 4933 stream_update->vtem_infopacket || 4934 stream_update->adaptive_sync_infopacket || 4935 stream_update->dpms_off || 4936 stream_update->allow_freesync || 4937 stream_update->vrr_active_variable || 4938 stream_update->vrr_active_fixed || 4939 stream_update->gamut_remap || 4940 stream_update->output_color_space || 4941 stream_update->dither_option || 4942 stream_update->wb_update || 4943 stream_update->dsc_config || 4944 stream_update->mst_bw_update || 4945 stream_update->func_shaper || 4946 stream_update->lut3d_func || 4947 stream_update->pending_test_pattern || 4948 stream_update->crtc_timing_adjust || 4949 stream_update->scaler_sharpener_update || 4950 stream_update->hw_cursor_req)) 4951 return true; 4952 4953 if (stream) { 4954 stream_status = dc_stream_get_status(stream); 4955 if (stream_status == NULL || stream_status->plane_count != surface_count) 4956 return true; 4957 } 4958 if (dc->idle_optimizations_allowed) 4959 return true; 4960 4961 return false; 4962 } 4963 4964 static bool fast_update_only(struct dc *dc, 4965 struct dc_fast_update *fast_update, 4966 struct dc_surface_update *srf_updates, 4967 int surface_count, 4968 struct dc_stream_update *stream_update, 4969 struct dc_stream_state *stream) 4970 { 4971 return fast_updates_exist(fast_update, surface_count) 4972 && !full_update_required(dc, srf_updates, surface_count, stream_update, stream); 4973 } 4974 4975 static bool update_planes_and_stream_v1(struct dc *dc, 4976 struct dc_surface_update *srf_updates, int surface_count, 4977 struct dc_stream_state *stream, 4978 struct dc_stream_update *stream_update, 4979 struct dc_state *state) 4980 { 4981 const struct dc_stream_status *stream_status; 4982 enum surface_update_type update_type; 4983 struct dc_state *context; 4984 struct dc_context *dc_ctx = dc->ctx; 4985 int i, j; 4986 struct dc_fast_update fast_update[MAX_SURFACES] = {0}; 4987 4988 dc_exit_ips_for_hw_access(dc); 4989 4990 populate_fast_updates(fast_update, srf_updates, surface_count, stream_update); 4991 stream_status = dc_stream_get_status(stream); 4992 context = dc->current_state; 4993 4994 update_type = dc_check_update_surfaces_for_stream( 4995 dc, srf_updates, surface_count, stream_update, stream_status); 4996 /* It is possible to receive a flip for one plane while there are multiple flip_immediate planes in the same stream. 4997 * E.g. Desktop and MPO plane are flip_immediate but only the MPO plane received a flip 4998 * Force the other flip_immediate planes to flip so GSL doesn't wait for a flip that won't come. 4999 */ 5000 force_immediate_gsl_plane_flip(dc, srf_updates, surface_count); 5001 5002 if (update_type >= UPDATE_TYPE_FULL) { 5003 5004 /* initialize scratch memory for building context */ 5005 context = dc_state_create_copy(state); 5006 if (context == NULL) { 5007 DC_ERROR("Failed to allocate new validate context!\n"); 5008 return false; 5009 } 5010 5011 for (i = 0; i < dc->res_pool->pipe_count; i++) { 5012 struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i]; 5013 struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 5014 5015 if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state) 5016 new_pipe->plane_state->force_full_update = true; 5017 } 5018 } else if (update_type == UPDATE_TYPE_FAST) { 5019 /* 5020 * Previous frame finished and HW is ready for optimization. 5021 */ 5022 dc_post_update_surfaces_to_stream(dc); 5023 } 5024 5025 for (i = 0; i < surface_count; i++) { 5026 struct dc_plane_state *surface = srf_updates[i].surface; 5027 5028 copy_surface_update_to_plane(surface, &srf_updates[i]); 5029 5030 if (update_type >= UPDATE_TYPE_MED) { 5031 for (j = 0; j < dc->res_pool->pipe_count; j++) { 5032 struct pipe_ctx *pipe_ctx = 5033 &context->res_ctx.pipe_ctx[j]; 5034 5035 if (pipe_ctx->plane_state != surface) 5036 continue; 5037 5038 resource_build_scaling_params(pipe_ctx); 5039 } 5040 } 5041 } 5042 5043 copy_stream_update_to_stream(dc, context, stream, stream_update); 5044 5045 if (update_type >= UPDATE_TYPE_FULL) { 5046 if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) { 5047 DC_ERROR("Mode validation failed for stream update!\n"); 5048 dc_state_release(context); 5049 return false; 5050 } 5051 } 5052 5053 TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES); 5054 5055 if (fast_update_only(dc, fast_update, srf_updates, surface_count, stream_update, stream) && 5056 !dc->debug.enable_legacy_fast_update) { 5057 commit_planes_for_stream_fast(dc, 5058 srf_updates, 5059 surface_count, 5060 stream, 5061 stream_update, 5062 update_type, 5063 context); 5064 } else { 5065 commit_planes_for_stream( 5066 dc, 5067 srf_updates, 5068 surface_count, 5069 stream, 5070 stream_update, 5071 update_type, 5072 context); 5073 } 5074 /*update current_State*/ 5075 if (dc->current_state != context) { 5076 5077 struct dc_state *old = dc->current_state; 5078 5079 dc->current_state = context; 5080 dc_state_release(old); 5081 5082 for (i = 0; i < dc->res_pool->pipe_count; i++) { 5083 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 5084 5085 if (pipe_ctx->plane_state && pipe_ctx->stream == stream) 5086 pipe_ctx->plane_state->force_full_update = false; 5087 } 5088 } 5089 5090 /* Legacy optimization path for DCE. */ 5091 if (update_type >= UPDATE_TYPE_FULL && dc_ctx->dce_version < DCE_VERSION_MAX) { 5092 dc_post_update_surfaces_to_stream(dc); 5093 TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce); 5094 } 5095 return true; 5096 } 5097 5098 static bool update_planes_and_stream_v2(struct dc *dc, 5099 struct dc_surface_update *srf_updates, int surface_count, 5100 struct dc_stream_state *stream, 5101 struct dc_stream_update *stream_update) 5102 { 5103 struct dc_state *context; 5104 enum surface_update_type update_type; 5105 struct dc_fast_update fast_update[MAX_SURFACES] = {0}; 5106 5107 /* In cases where MPO and split or ODM are used transitions can 5108 * cause underflow. Apply stream configuration with minimal pipe 5109 * split first to avoid unsupported transitions for active pipes. 5110 */ 5111 bool force_minimal_pipe_splitting = 0; 5112 bool is_plane_addition = 0; 5113 bool is_fast_update_only; 5114 5115 populate_fast_updates(fast_update, srf_updates, surface_count, stream_update); 5116 is_fast_update_only = fast_update_only(dc, fast_update, srf_updates, 5117 surface_count, stream_update, stream); 5118 force_minimal_pipe_splitting = could_mpcc_tree_change_for_active_pipes( 5119 dc, 5120 stream, 5121 srf_updates, 5122 surface_count, 5123 &is_plane_addition); 5124 5125 /* on plane addition, minimal state is the current one */ 5126 if (force_minimal_pipe_splitting && is_plane_addition && 5127 !commit_minimal_transition_state(dc, dc->current_state)) 5128 return false; 5129 5130 if (!update_planes_and_stream_state( 5131 dc, 5132 srf_updates, 5133 surface_count, 5134 stream, 5135 stream_update, 5136 &update_type, 5137 &context)) 5138 return false; 5139 5140 /* on plane removal, minimal state is the new one */ 5141 if (force_minimal_pipe_splitting && !is_plane_addition) { 5142 if (!commit_minimal_transition_state(dc, context)) { 5143 dc_state_release(context); 5144 return false; 5145 } 5146 update_type = UPDATE_TYPE_FULL; 5147 } 5148 5149 if (dc->hwss.is_pipe_topology_transition_seamless && 5150 !dc->hwss.is_pipe_topology_transition_seamless( 5151 dc, dc->current_state, context)) 5152 commit_minimal_transition_state_in_dc_update(dc, context, stream, 5153 srf_updates, surface_count); 5154 5155 if (is_fast_update_only && !dc->debug.enable_legacy_fast_update) { 5156 commit_planes_for_stream_fast(dc, 5157 srf_updates, 5158 surface_count, 5159 stream, 5160 stream_update, 5161 update_type, 5162 context); 5163 } else { 5164 if (!stream_update && 5165 dc->hwss.is_pipe_topology_transition_seamless && 5166 !dc->hwss.is_pipe_topology_transition_seamless( 5167 dc, dc->current_state, context)) { 5168 DC_LOG_ERROR("performing non-seamless pipe topology transition with surface only update!\n"); 5169 BREAK_TO_DEBUGGER(); 5170 } 5171 commit_planes_for_stream( 5172 dc, 5173 srf_updates, 5174 surface_count, 5175 stream, 5176 stream_update, 5177 update_type, 5178 context); 5179 } 5180 if (dc->current_state != context) 5181 swap_and_release_current_context(dc, context, stream); 5182 return true; 5183 } 5184 5185 static void commit_planes_and_stream_update_on_current_context(struct dc *dc, 5186 struct dc_surface_update *srf_updates, int surface_count, 5187 struct dc_stream_state *stream, 5188 struct dc_stream_update *stream_update, 5189 enum surface_update_type update_type) 5190 { 5191 struct dc_fast_update fast_update[MAX_SURFACES] = {0}; 5192 5193 ASSERT(update_type < UPDATE_TYPE_FULL); 5194 populate_fast_updates(fast_update, srf_updates, surface_count, 5195 stream_update); 5196 if (fast_update_only(dc, fast_update, srf_updates, surface_count, 5197 stream_update, stream) && 5198 !dc->debug.enable_legacy_fast_update) 5199 commit_planes_for_stream_fast(dc, 5200 srf_updates, 5201 surface_count, 5202 stream, 5203 stream_update, 5204 update_type, 5205 dc->current_state); 5206 else 5207 commit_planes_for_stream( 5208 dc, 5209 srf_updates, 5210 surface_count, 5211 stream, 5212 stream_update, 5213 update_type, 5214 dc->current_state); 5215 } 5216 5217 static void commit_planes_and_stream_update_with_new_context(struct dc *dc, 5218 struct dc_surface_update *srf_updates, int surface_count, 5219 struct dc_stream_state *stream, 5220 struct dc_stream_update *stream_update, 5221 enum surface_update_type update_type, 5222 struct dc_state *new_context) 5223 { 5224 ASSERT(update_type >= UPDATE_TYPE_FULL); 5225 if (!dc->hwss.is_pipe_topology_transition_seamless(dc, 5226 dc->current_state, new_context)) 5227 /* 5228 * It is required by the feature design that all pipe topologies 5229 * using extra free pipes for power saving purposes such as 5230 * dynamic ODM or SubVp shall only be enabled when it can be 5231 * transitioned seamlessly to AND from its minimal transition 5232 * state. A minimal transition state is defined as the same dc 5233 * state but with all power saving features disabled. So it uses 5234 * the minimum pipe topology. When we can't seamlessly 5235 * transition from state A to state B, we will insert the 5236 * minimal transition state A' or B' in between so seamless 5237 * transition between A and B can be made possible. 5238 */ 5239 commit_minimal_transition_state_in_dc_update(dc, new_context, 5240 stream, srf_updates, surface_count); 5241 5242 commit_planes_for_stream( 5243 dc, 5244 srf_updates, 5245 surface_count, 5246 stream, 5247 stream_update, 5248 update_type, 5249 new_context); 5250 } 5251 5252 static bool update_planes_and_stream_v3(struct dc *dc, 5253 struct dc_surface_update *srf_updates, int surface_count, 5254 struct dc_stream_state *stream, 5255 struct dc_stream_update *stream_update) 5256 { 5257 struct dc_state *new_context; 5258 enum surface_update_type update_type; 5259 5260 /* 5261 * When this function returns true and new_context is not equal to 5262 * current state, the function allocates and validates a new dc state 5263 * and assigns it to new_context. The function expects that the caller 5264 * is responsible to free this memory when new_context is no longer 5265 * used. We swap current with new context and free current instead. So 5266 * new_context's memory will live until the next full update after it is 5267 * replaced by a newer context. Refer to the use of 5268 * swap_and_free_current_context below. 5269 */ 5270 if (!update_planes_and_stream_state(dc, srf_updates, surface_count, 5271 stream, stream_update, &update_type, 5272 &new_context)) 5273 return false; 5274 5275 if (new_context == dc->current_state) { 5276 commit_planes_and_stream_update_on_current_context(dc, 5277 srf_updates, surface_count, stream, 5278 stream_update, update_type); 5279 } else { 5280 commit_planes_and_stream_update_with_new_context(dc, 5281 srf_updates, surface_count, stream, 5282 stream_update, update_type, new_context); 5283 swap_and_release_current_context(dc, new_context, stream); 5284 } 5285 5286 return true; 5287 } 5288 5289 static void clear_update_flags(struct dc_surface_update *srf_updates, 5290 int surface_count, struct dc_stream_state *stream) 5291 { 5292 int i; 5293 5294 if (stream) 5295 stream->update_flags.raw = 0; 5296 5297 for (i = 0; i < surface_count; i++) 5298 if (srf_updates[i].surface) 5299 srf_updates[i].surface->update_flags.raw = 0; 5300 } 5301 5302 bool dc_update_planes_and_stream(struct dc *dc, 5303 struct dc_surface_update *srf_updates, int surface_count, 5304 struct dc_stream_state *stream, 5305 struct dc_stream_update *stream_update) 5306 { 5307 bool ret = false; 5308 5309 dc_exit_ips_for_hw_access(dc); 5310 /* 5311 * update planes and stream version 3 separates FULL and FAST updates 5312 * to their own sequences. It aims to clean up frequent checks for 5313 * update type resulting unnecessary branching in logic flow. It also 5314 * adds a new commit minimal transition sequence, which detects the need 5315 * for minimal transition based on the actual comparison of current and 5316 * new states instead of "predicting" it based on per feature software 5317 * policy.i.e could_mpcc_tree_change_for_active_pipes. The new commit 5318 * minimal transition sequence is made universal to any power saving 5319 * features that would use extra free pipes such as Dynamic ODM/MPC 5320 * Combine, MPO or SubVp. Therefore there is no longer a need to 5321 * specially handle compatibility problems with transitions among those 5322 * features as they are now transparent to the new sequence. 5323 */ 5324 if (dc->ctx->dce_version >= DCN_VERSION_4_01) 5325 ret = update_planes_and_stream_v3(dc, srf_updates, 5326 surface_count, stream, stream_update); 5327 else 5328 ret = update_planes_and_stream_v2(dc, srf_updates, 5329 surface_count, stream, stream_update); 5330 5331 if (ret) 5332 clear_update_flags(srf_updates, surface_count, stream); 5333 5334 return ret; 5335 } 5336 5337 void dc_commit_updates_for_stream(struct dc *dc, 5338 struct dc_surface_update *srf_updates, 5339 int surface_count, 5340 struct dc_stream_state *stream, 5341 struct dc_stream_update *stream_update, 5342 struct dc_state *state) 5343 { 5344 bool ret = false; 5345 5346 dc_exit_ips_for_hw_access(dc); 5347 /* TODO: Since change commit sequence can have a huge impact, 5348 * we decided to only enable it for DCN3x. However, as soon as 5349 * we get more confident about this change we'll need to enable 5350 * the new sequence for all ASICs. 5351 */ 5352 if (dc->ctx->dce_version >= DCN_VERSION_4_01) { 5353 ret = update_planes_and_stream_v3(dc, srf_updates, surface_count, 5354 stream, stream_update); 5355 } else if (dc->ctx->dce_version >= DCN_VERSION_3_2) { 5356 ret = update_planes_and_stream_v2(dc, srf_updates, surface_count, 5357 stream, stream_update); 5358 } else 5359 ret = update_planes_and_stream_v1(dc, srf_updates, surface_count, stream, 5360 stream_update, state); 5361 5362 if (ret) 5363 clear_update_flags(srf_updates, surface_count, stream); 5364 } 5365 5366 uint8_t dc_get_current_stream_count(struct dc *dc) 5367 { 5368 return dc->current_state->stream_count; 5369 } 5370 5371 struct dc_stream_state *dc_get_stream_at_index(struct dc *dc, uint8_t i) 5372 { 5373 if (i < dc->current_state->stream_count) 5374 return dc->current_state->streams[i]; 5375 return NULL; 5376 } 5377 5378 enum dc_irq_source dc_interrupt_to_irq_source( 5379 struct dc *dc, 5380 uint32_t src_id, 5381 uint32_t ext_id) 5382 { 5383 return dal_irq_service_to_irq_source(dc->res_pool->irqs, src_id, ext_id); 5384 } 5385 5386 /* 5387 * dc_interrupt_set() - Enable/disable an AMD hw interrupt source 5388 */ 5389 bool dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable) 5390 { 5391 5392 if (dc == NULL) 5393 return false; 5394 5395 return dal_irq_service_set(dc->res_pool->irqs, src, enable); 5396 } 5397 5398 void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src) 5399 { 5400 dal_irq_service_ack(dc->res_pool->irqs, src); 5401 } 5402 5403 void dc_power_down_on_boot(struct dc *dc) 5404 { 5405 if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW && 5406 dc->hwss.power_down_on_boot) { 5407 if (dc->caps.ips_support) 5408 dc_exit_ips_for_hw_access(dc); 5409 dc->hwss.power_down_on_boot(dc); 5410 } 5411 } 5412 5413 void dc_set_power_state(struct dc *dc, enum dc_acpi_cm_power_state power_state) 5414 { 5415 if (!dc->current_state) 5416 return; 5417 5418 switch (power_state) { 5419 case DC_ACPI_CM_POWER_STATE_D0: 5420 dc_state_construct(dc, dc->current_state); 5421 5422 dc_exit_ips_for_hw_access(dc); 5423 5424 dc_z10_restore(dc); 5425 5426 dc_dmub_srv_notify_fw_dc_power_state(dc->ctx->dmub_srv, power_state); 5427 5428 dc->hwss.init_hw(dc); 5429 5430 if (dc->hwss.init_sys_ctx != NULL && 5431 dc->vm_pa_config.valid) { 5432 dc->hwss.init_sys_ctx(dc->hwseq, dc, &dc->vm_pa_config); 5433 } 5434 break; 5435 default: 5436 ASSERT(dc->current_state->stream_count == 0); 5437 dc_dmub_srv_notify_fw_dc_power_state(dc->ctx->dmub_srv, power_state); 5438 5439 dc_state_destruct(dc->current_state); 5440 5441 break; 5442 } 5443 } 5444 5445 void dc_resume(struct dc *dc) 5446 { 5447 uint32_t i; 5448 5449 for (i = 0; i < dc->link_count; i++) 5450 dc->link_srv->resume(dc->links[i]); 5451 } 5452 5453 bool dc_is_dmcu_initialized(struct dc *dc) 5454 { 5455 struct dmcu *dmcu = dc->res_pool->dmcu; 5456 5457 if (dmcu) 5458 return dmcu->funcs->is_dmcu_initialized(dmcu); 5459 return false; 5460 } 5461 5462 enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, uint32_t stepping) 5463 { 5464 if (dc->hwss.set_clock) 5465 return dc->hwss.set_clock(dc, clock_type, clk_khz, stepping); 5466 return DC_ERROR_UNEXPECTED; 5467 } 5468 void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg) 5469 { 5470 if (dc->hwss.get_clock) 5471 dc->hwss.get_clock(dc, clock_type, clock_cfg); 5472 } 5473 5474 /* enable/disable eDP PSR without specify stream for eDP */ 5475 bool dc_set_psr_allow_active(struct dc *dc, bool enable) 5476 { 5477 int i; 5478 bool allow_active; 5479 5480 for (i = 0; i < dc->current_state->stream_count ; i++) { 5481 struct dc_link *link; 5482 struct dc_stream_state *stream = dc->current_state->streams[i]; 5483 5484 link = stream->link; 5485 if (!link) 5486 continue; 5487 5488 if (link->psr_settings.psr_feature_enabled) { 5489 if (enable && !link->psr_settings.psr_allow_active) { 5490 allow_active = true; 5491 if (!dc_link_set_psr_allow_active(link, &allow_active, false, false, NULL)) 5492 return false; 5493 } else if (!enable && link->psr_settings.psr_allow_active) { 5494 allow_active = false; 5495 if (!dc_link_set_psr_allow_active(link, &allow_active, true, false, NULL)) 5496 return false; 5497 } 5498 } 5499 } 5500 5501 return true; 5502 } 5503 5504 /* enable/disable eDP Replay without specify stream for eDP */ 5505 bool dc_set_replay_allow_active(struct dc *dc, bool active) 5506 { 5507 int i; 5508 bool allow_active; 5509 5510 for (i = 0; i < dc->current_state->stream_count; i++) { 5511 struct dc_link *link; 5512 struct dc_stream_state *stream = dc->current_state->streams[i]; 5513 5514 link = stream->link; 5515 if (!link) 5516 continue; 5517 5518 if (link->replay_settings.replay_feature_enabled) { 5519 if (active && !link->replay_settings.replay_allow_active) { 5520 allow_active = true; 5521 if (!dc_link_set_replay_allow_active(link, &allow_active, 5522 false, false, NULL)) 5523 return false; 5524 } else if (!active && link->replay_settings.replay_allow_active) { 5525 allow_active = false; 5526 if (!dc_link_set_replay_allow_active(link, &allow_active, 5527 true, false, NULL)) 5528 return false; 5529 } 5530 } 5531 } 5532 5533 return true; 5534 } 5535 5536 /* set IPS disable state */ 5537 bool dc_set_ips_disable(struct dc *dc, unsigned int disable_ips) 5538 { 5539 dc_exit_ips_for_hw_access(dc); 5540 5541 dc->config.disable_ips = disable_ips; 5542 5543 return true; 5544 } 5545 5546 void dc_allow_idle_optimizations_internal(struct dc *dc, bool allow, char const *caller_name) 5547 { 5548 int idle_fclk_khz = 0, idle_dramclk_khz = 0, i = 0; 5549 enum mall_stream_type subvp_pipe_type[MAX_PIPES] = {0}; 5550 struct pipe_ctx *pipe = NULL; 5551 struct dc_state *context = dc->current_state; 5552 5553 if (dc->debug.disable_idle_power_optimizations) { 5554 DC_LOG_DEBUG("%s: disabled\n", __func__); 5555 return; 5556 } 5557 5558 if (allow != dc->idle_optimizations_allowed) 5559 DC_LOG_IPS("%s: allow_idle old=%d new=%d (caller=%s)\n", __func__, 5560 dc->idle_optimizations_allowed, allow, caller_name); 5561 5562 if (dc->caps.ips_support && (dc->config.disable_ips == DMUB_IPS_DISABLE_ALL)) 5563 return; 5564 5565 if (dc->clk_mgr != NULL && dc->clk_mgr->funcs->is_smu_present) 5566 if (!dc->clk_mgr->funcs->is_smu_present(dc->clk_mgr)) 5567 return; 5568 5569 if (allow == dc->idle_optimizations_allowed) 5570 return; 5571 5572 if (dc->hwss.apply_idle_power_optimizations && dc->clk_mgr != NULL && 5573 dc->hwss.apply_idle_power_optimizations(dc, allow)) { 5574 dc->idle_optimizations_allowed = allow; 5575 DC_LOG_DEBUG("%s: %s\n", __func__, allow ? "enabled" : "disabled"); 5576 } 5577 5578 // log idle clocks and sub vp pipe types at idle optimization time 5579 if (dc->clk_mgr != NULL && dc->clk_mgr->funcs->get_hard_min_fclk) 5580 idle_fclk_khz = dc->clk_mgr->funcs->get_hard_min_fclk(dc->clk_mgr); 5581 5582 if (dc->clk_mgr != NULL && dc->clk_mgr->funcs->get_hard_min_memclk) 5583 idle_dramclk_khz = dc->clk_mgr->funcs->get_hard_min_memclk(dc->clk_mgr); 5584 5585 if (dc->res_pool && context) { 5586 for (i = 0; i < dc->res_pool->pipe_count; i++) { 5587 pipe = &context->res_ctx.pipe_ctx[i]; 5588 subvp_pipe_type[i] = dc_state_get_pipe_subvp_type(context, pipe); 5589 } 5590 } 5591 5592 DC_LOG_DC("%s: allow_idle=%d\n HardMinUClk_Khz=%d HardMinDramclk_Khz=%d\n Pipe_0=%d Pipe_1=%d Pipe_2=%d Pipe_3=%d Pipe_4=%d Pipe_5=%d (caller=%s)\n", 5593 __func__, allow, idle_fclk_khz, idle_dramclk_khz, subvp_pipe_type[0], subvp_pipe_type[1], subvp_pipe_type[2], 5594 subvp_pipe_type[3], subvp_pipe_type[4], subvp_pipe_type[5], caller_name); 5595 5596 } 5597 5598 void dc_exit_ips_for_hw_access_internal(struct dc *dc, const char *caller_name) 5599 { 5600 if (dc->caps.ips_support) 5601 dc_allow_idle_optimizations_internal(dc, false, caller_name); 5602 } 5603 5604 bool dc_dmub_is_ips_idle_state(struct dc *dc) 5605 { 5606 if (dc->debug.disable_idle_power_optimizations) 5607 return false; 5608 5609 if (!dc->caps.ips_support || (dc->config.disable_ips == DMUB_IPS_DISABLE_ALL)) 5610 return false; 5611 5612 if (!dc->ctx->dmub_srv) 5613 return false; 5614 5615 return dc->ctx->dmub_srv->idle_allowed; 5616 } 5617 5618 /* set min and max memory clock to lowest and highest DPM level, respectively */ 5619 void dc_unlock_memory_clock_frequency(struct dc *dc) 5620 { 5621 if (dc->clk_mgr->funcs->set_hard_min_memclk) 5622 dc->clk_mgr->funcs->set_hard_min_memclk(dc->clk_mgr, false); 5623 5624 if (dc->clk_mgr->funcs->set_hard_max_memclk) 5625 dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr); 5626 } 5627 5628 /* set min memory clock to the min required for current mode, max to maxDPM */ 5629 void dc_lock_memory_clock_frequency(struct dc *dc) 5630 { 5631 if (dc->clk_mgr->funcs->get_memclk_states_from_smu) 5632 dc->clk_mgr->funcs->get_memclk_states_from_smu(dc->clk_mgr); 5633 5634 if (dc->clk_mgr->funcs->set_hard_min_memclk) 5635 dc->clk_mgr->funcs->set_hard_min_memclk(dc->clk_mgr, true); 5636 5637 if (dc->clk_mgr->funcs->set_hard_max_memclk) 5638 dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr); 5639 } 5640 5641 static void blank_and_force_memclk(struct dc *dc, bool apply, unsigned int memclk_mhz) 5642 { 5643 struct dc_state *context = dc->current_state; 5644 struct hubp *hubp; 5645 struct pipe_ctx *pipe; 5646 int i; 5647 5648 for (i = 0; i < dc->res_pool->pipe_count; i++) { 5649 pipe = &context->res_ctx.pipe_ctx[i]; 5650 5651 if (pipe->stream != NULL) { 5652 dc->hwss.disable_pixel_data(dc, pipe, true); 5653 5654 // wait for double buffer 5655 pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VACTIVE); 5656 pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VBLANK); 5657 pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VACTIVE); 5658 5659 hubp = pipe->plane_res.hubp; 5660 hubp->funcs->set_blank_regs(hubp, true); 5661 } 5662 } 5663 if (dc->clk_mgr->funcs->set_max_memclk) 5664 dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, memclk_mhz); 5665 if (dc->clk_mgr->funcs->set_min_memclk) 5666 dc->clk_mgr->funcs->set_min_memclk(dc->clk_mgr, memclk_mhz); 5667 5668 for (i = 0; i < dc->res_pool->pipe_count; i++) { 5669 pipe = &context->res_ctx.pipe_ctx[i]; 5670 5671 if (pipe->stream != NULL) { 5672 dc->hwss.disable_pixel_data(dc, pipe, false); 5673 5674 hubp = pipe->plane_res.hubp; 5675 hubp->funcs->set_blank_regs(hubp, false); 5676 } 5677 } 5678 } 5679 5680 5681 /** 5682 * dc_enable_dcmode_clk_limit() - lower clocks in dc (battery) mode 5683 * @dc: pointer to dc of the dm calling this 5684 * @enable: True = transition to DC mode, false = transition back to AC mode 5685 * 5686 * Some SoCs define additional clock limits when in DC mode, DM should 5687 * invoke this function when the platform undergoes a power source transition 5688 * so DC can apply/unapply the limit. This interface may be disruptive to 5689 * the onscreen content. 5690 * 5691 * Context: Triggered by OS through DM interface, or manually by escape calls. 5692 * Need to hold a dclock when doing so. 5693 * 5694 * Return: none (void function) 5695 * 5696 */ 5697 void dc_enable_dcmode_clk_limit(struct dc *dc, bool enable) 5698 { 5699 unsigned int softMax = 0, maxDPM = 0, funcMin = 0, i; 5700 bool p_state_change_support; 5701 5702 if (!dc->config.dc_mode_clk_limit_support) 5703 return; 5704 5705 softMax = dc->clk_mgr->bw_params->dc_mode_softmax_memclk; 5706 for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries; i++) { 5707 if (dc->clk_mgr->bw_params->clk_table.entries[i].memclk_mhz > maxDPM) 5708 maxDPM = dc->clk_mgr->bw_params->clk_table.entries[i].memclk_mhz; 5709 } 5710 funcMin = (dc->clk_mgr->clks.dramclk_khz + 999) / 1000; 5711 p_state_change_support = dc->clk_mgr->clks.p_state_change_support; 5712 5713 if (enable && !dc->clk_mgr->dc_mode_softmax_enabled) { 5714 if (p_state_change_support) { 5715 if (funcMin <= softMax && dc->clk_mgr->funcs->set_max_memclk) 5716 dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, softMax); 5717 // else: No-Op 5718 } else { 5719 if (funcMin <= softMax) 5720 blank_and_force_memclk(dc, true, softMax); 5721 // else: No-Op 5722 } 5723 } else if (!enable && dc->clk_mgr->dc_mode_softmax_enabled) { 5724 if (p_state_change_support) { 5725 if (funcMin <= softMax && dc->clk_mgr->funcs->set_max_memclk) 5726 dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, maxDPM); 5727 // else: No-Op 5728 } else { 5729 if (funcMin <= softMax) 5730 blank_and_force_memclk(dc, true, maxDPM); 5731 // else: No-Op 5732 } 5733 } 5734 dc->clk_mgr->dc_mode_softmax_enabled = enable; 5735 } 5736 bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc, 5737 unsigned int pitch, 5738 unsigned int height, 5739 enum surface_pixel_format format, 5740 struct dc_cursor_attributes *cursor_attr) 5741 { 5742 if (dc->hwss.does_plane_fit_in_mall && dc->hwss.does_plane_fit_in_mall(dc, pitch, height, format, cursor_attr)) 5743 return true; 5744 return false; 5745 } 5746 5747 /* cleanup on driver unload */ 5748 void dc_hardware_release(struct dc *dc) 5749 { 5750 dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(dc); 5751 5752 if (dc->hwss.hardware_release) 5753 dc->hwss.hardware_release(dc); 5754 } 5755 5756 void dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(struct dc *dc) 5757 { 5758 if (dc->current_state) 5759 dc->current_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching_shut_down = true; 5760 } 5761 5762 /** 5763 * dc_is_dmub_outbox_supported - Check if DMUB firmware support outbox notification 5764 * 5765 * @dc: [in] dc structure 5766 * 5767 * Checks whether DMUB FW supports outbox notifications, if supported DM 5768 * should register outbox interrupt prior to actually enabling interrupts 5769 * via dc_enable_dmub_outbox 5770 * 5771 * Return: 5772 * True if DMUB FW supports outbox notifications, False otherwise 5773 */ 5774 bool dc_is_dmub_outbox_supported(struct dc *dc) 5775 { 5776 if (!dc->caps.dmcub_support) 5777 return false; 5778 5779 switch (dc->ctx->asic_id.chip_family) { 5780 5781 case FAMILY_YELLOW_CARP: 5782 /* DCN31 B0 USB4 DPIA needs dmub notifications for interrupts */ 5783 if (dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0 && 5784 !dc->debug.dpia_debug.bits.disable_dpia) 5785 return true; 5786 break; 5787 5788 case AMDGPU_FAMILY_GC_11_0_1: 5789 case AMDGPU_FAMILY_GC_11_5_0: 5790 if (!dc->debug.dpia_debug.bits.disable_dpia) 5791 return true; 5792 break; 5793 5794 default: 5795 break; 5796 } 5797 5798 /* dmub aux needs dmub notifications to be enabled */ 5799 return dc->debug.enable_dmub_aux_for_legacy_ddc; 5800 5801 } 5802 5803 /** 5804 * dc_enable_dmub_notifications - Check if dmub fw supports outbox 5805 * 5806 * @dc: [in] dc structure 5807 * 5808 * Calls dc_is_dmub_outbox_supported to check if dmub fw supports outbox 5809 * notifications. All DMs shall switch to dc_is_dmub_outbox_supported. This 5810 * API shall be removed after switching. 5811 * 5812 * Return: 5813 * True if DMUB FW supports outbox notifications, False otherwise 5814 */ 5815 bool dc_enable_dmub_notifications(struct dc *dc) 5816 { 5817 return dc_is_dmub_outbox_supported(dc); 5818 } 5819 5820 /** 5821 * dc_enable_dmub_outbox - Enables DMUB unsolicited notification 5822 * 5823 * @dc: [in] dc structure 5824 * 5825 * Enables DMUB unsolicited notifications to x86 via outbox. 5826 */ 5827 void dc_enable_dmub_outbox(struct dc *dc) 5828 { 5829 struct dc_context *dc_ctx = dc->ctx; 5830 5831 dmub_enable_outbox_notification(dc_ctx->dmub_srv); 5832 DC_LOG_DC("%s: dmub outbox notifications enabled\n", __func__); 5833 } 5834 5835 /** 5836 * dc_process_dmub_aux_transfer_async - Submits aux command to dmub via inbox message 5837 * Sets port index appropriately for legacy DDC 5838 * @dc: dc structure 5839 * @link_index: link index 5840 * @payload: aux payload 5841 * 5842 * Returns: True if successful, False if failure 5843 */ 5844 bool dc_process_dmub_aux_transfer_async(struct dc *dc, 5845 uint32_t link_index, 5846 struct aux_payload *payload) 5847 { 5848 uint8_t action; 5849 union dmub_rb_cmd cmd = {0}; 5850 5851 ASSERT(payload->length <= 16); 5852 5853 cmd.dp_aux_access.header.type = DMUB_CMD__DP_AUX_ACCESS; 5854 cmd.dp_aux_access.header.payload_bytes = 0; 5855 /* For dpia, ddc_pin is set to NULL */ 5856 if (!dc->links[link_index]->ddc->ddc_pin) 5857 cmd.dp_aux_access.aux_control.type = AUX_CHANNEL_DPIA; 5858 else 5859 cmd.dp_aux_access.aux_control.type = AUX_CHANNEL_LEGACY_DDC; 5860 5861 cmd.dp_aux_access.aux_control.instance = dc->links[link_index]->ddc_hw_inst; 5862 cmd.dp_aux_access.aux_control.sw_crc_enabled = 0; 5863 cmd.dp_aux_access.aux_control.timeout = 0; 5864 cmd.dp_aux_access.aux_control.dpaux.address = payload->address; 5865 cmd.dp_aux_access.aux_control.dpaux.is_i2c_over_aux = payload->i2c_over_aux; 5866 cmd.dp_aux_access.aux_control.dpaux.length = payload->length; 5867 5868 /* set aux action */ 5869 if (payload->i2c_over_aux) { 5870 if (payload->write) { 5871 if (payload->mot) 5872 action = DP_AUX_REQ_ACTION_I2C_WRITE_MOT; 5873 else 5874 action = DP_AUX_REQ_ACTION_I2C_WRITE; 5875 } else { 5876 if (payload->mot) 5877 action = DP_AUX_REQ_ACTION_I2C_READ_MOT; 5878 else 5879 action = DP_AUX_REQ_ACTION_I2C_READ; 5880 } 5881 } else { 5882 if (payload->write) 5883 action = DP_AUX_REQ_ACTION_DPCD_WRITE; 5884 else 5885 action = DP_AUX_REQ_ACTION_DPCD_READ; 5886 } 5887 5888 cmd.dp_aux_access.aux_control.dpaux.action = action; 5889 5890 if (payload->length && payload->write) { 5891 memcpy(cmd.dp_aux_access.aux_control.dpaux.data, 5892 payload->data, 5893 payload->length 5894 ); 5895 } 5896 5897 dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); 5898 5899 return true; 5900 } 5901 5902 uint8_t get_link_index_from_dpia_port_index(const struct dc *dc, 5903 uint8_t dpia_port_index) 5904 { 5905 uint8_t index, link_index = 0xFF; 5906 5907 for (index = 0; index < dc->link_count; index++) { 5908 /* ddc_hw_inst has dpia port index for dpia links 5909 * and ddc instance for legacy links 5910 */ 5911 if (!dc->links[index]->ddc->ddc_pin) { 5912 if (dc->links[index]->ddc_hw_inst == dpia_port_index) { 5913 link_index = index; 5914 break; 5915 } 5916 } 5917 } 5918 ASSERT(link_index != 0xFF); 5919 return link_index; 5920 } 5921 5922 /** 5923 * dc_process_dmub_set_config_async - Submits set_config command 5924 * 5925 * @dc: [in] dc structure 5926 * @link_index: [in] link_index: link index 5927 * @payload: [in] aux payload 5928 * @notify: [out] set_config immediate reply 5929 * 5930 * Submits set_config command to dmub via inbox message. 5931 * 5932 * Return: 5933 * True if successful, False if failure 5934 */ 5935 bool dc_process_dmub_set_config_async(struct dc *dc, 5936 uint32_t link_index, 5937 struct set_config_cmd_payload *payload, 5938 struct dmub_notification *notify) 5939 { 5940 union dmub_rb_cmd cmd = {0}; 5941 bool is_cmd_complete = true; 5942 5943 /* prepare SET_CONFIG command */ 5944 cmd.set_config_access.header.type = DMUB_CMD__DPIA; 5945 cmd.set_config_access.header.sub_type = DMUB_CMD__DPIA_SET_CONFIG_ACCESS; 5946 5947 cmd.set_config_access.set_config_control.instance = dc->links[link_index]->ddc_hw_inst; 5948 cmd.set_config_access.set_config_control.cmd_pkt.msg_type = payload->msg_type; 5949 cmd.set_config_access.set_config_control.cmd_pkt.msg_data = payload->msg_data; 5950 5951 if (!dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) { 5952 /* command is not processed by dmub */ 5953 notify->sc_status = SET_CONFIG_UNKNOWN_ERROR; 5954 return is_cmd_complete; 5955 } 5956 5957 /* command processed by dmub, if ret_status is 1, it is completed instantly */ 5958 if (cmd.set_config_access.header.ret_status == 1) 5959 notify->sc_status = cmd.set_config_access.set_config_control.immed_status; 5960 else 5961 /* cmd pending, will receive notification via outbox */ 5962 is_cmd_complete = false; 5963 5964 return is_cmd_complete; 5965 } 5966 5967 /** 5968 * dc_process_dmub_set_mst_slots - Submits MST solt allocation 5969 * 5970 * @dc: [in] dc structure 5971 * @link_index: [in] link index 5972 * @mst_alloc_slots: [in] mst slots to be allotted 5973 * @mst_slots_in_use: [out] mst slots in use returned in failure case 5974 * 5975 * Submits mst slot allocation command to dmub via inbox message 5976 * 5977 * Return: 5978 * DC_OK if successful, DC_ERROR if failure 5979 */ 5980 enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc, 5981 uint32_t link_index, 5982 uint8_t mst_alloc_slots, 5983 uint8_t *mst_slots_in_use) 5984 { 5985 union dmub_rb_cmd cmd = {0}; 5986 5987 /* prepare MST_ALLOC_SLOTS command */ 5988 cmd.set_mst_alloc_slots.header.type = DMUB_CMD__DPIA; 5989 cmd.set_mst_alloc_slots.header.sub_type = DMUB_CMD__DPIA_MST_ALLOC_SLOTS; 5990 5991 cmd.set_mst_alloc_slots.mst_slots_control.instance = dc->links[link_index]->ddc_hw_inst; 5992 cmd.set_mst_alloc_slots.mst_slots_control.mst_alloc_slots = mst_alloc_slots; 5993 5994 if (!dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) 5995 /* command is not processed by dmub */ 5996 return DC_ERROR_UNEXPECTED; 5997 5998 /* command processed by dmub, if ret_status is 1 */ 5999 if (cmd.set_config_access.header.ret_status != 1) 6000 /* command processing error */ 6001 return DC_ERROR_UNEXPECTED; 6002 6003 /* command processed and we have a status of 2, mst not enabled in dpia */ 6004 if (cmd.set_mst_alloc_slots.mst_slots_control.immed_status == 2) 6005 return DC_FAIL_UNSUPPORTED_1; 6006 6007 /* previously configured mst alloc and used slots did not match */ 6008 if (cmd.set_mst_alloc_slots.mst_slots_control.immed_status == 3) { 6009 *mst_slots_in_use = cmd.set_mst_alloc_slots.mst_slots_control.mst_slots_in_use; 6010 return DC_NOT_SUPPORTED; 6011 } 6012 6013 return DC_OK; 6014 } 6015 6016 /** 6017 * dc_process_dmub_dpia_set_tps_notification - Submits tps notification 6018 * 6019 * @dc: [in] dc structure 6020 * @link_index: [in] link index 6021 * @tps: [in] request tps 6022 * 6023 * Submits set_tps_notification command to dmub via inbox message 6024 */ 6025 void dc_process_dmub_dpia_set_tps_notification(const struct dc *dc, uint32_t link_index, uint8_t tps) 6026 { 6027 union dmub_rb_cmd cmd = {0}; 6028 6029 cmd.set_tps_notification.header.type = DMUB_CMD__DPIA; 6030 cmd.set_tps_notification.header.sub_type = DMUB_CMD__DPIA_SET_TPS_NOTIFICATION; 6031 cmd.set_tps_notification.tps_notification.instance = dc->links[link_index]->ddc_hw_inst; 6032 cmd.set_tps_notification.tps_notification.tps = tps; 6033 6034 dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); 6035 } 6036 6037 /** 6038 * dc_process_dmub_dpia_hpd_int_enable - Submits DPIA DPD interruption 6039 * 6040 * @dc: [in] dc structure 6041 * @hpd_int_enable: [in] 1 for hpd int enable, 0 to disable 6042 * 6043 * Submits dpia hpd int enable command to dmub via inbox message 6044 */ 6045 void dc_process_dmub_dpia_hpd_int_enable(const struct dc *dc, 6046 uint32_t hpd_int_enable) 6047 { 6048 union dmub_rb_cmd cmd = {0}; 6049 6050 cmd.dpia_hpd_int_enable.header.type = DMUB_CMD__DPIA_HPD_INT_ENABLE; 6051 cmd.dpia_hpd_int_enable.enable = hpd_int_enable; 6052 6053 dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); 6054 6055 DC_LOG_DEBUG("%s: hpd_int_enable(%d)\n", __func__, hpd_int_enable); 6056 } 6057 6058 /** 6059 * dc_print_dmub_diagnostic_data - Print DMUB diagnostic data for debugging 6060 * 6061 * @dc: [in] dc structure 6062 * 6063 * 6064 */ 6065 void dc_print_dmub_diagnostic_data(const struct dc *dc) 6066 { 6067 dc_dmub_srv_log_diagnostic_data(dc->ctx->dmub_srv); 6068 } 6069 6070 /** 6071 * dc_disable_accelerated_mode - disable accelerated mode 6072 * @dc: dc structure 6073 */ 6074 void dc_disable_accelerated_mode(struct dc *dc) 6075 { 6076 bios_set_scratch_acc_mode_change(dc->ctx->dc_bios, 0); 6077 } 6078 6079 6080 /** 6081 * dc_notify_vsync_int_state - notifies vsync enable/disable state 6082 * @dc: dc structure 6083 * @stream: stream where vsync int state changed 6084 * @enable: whether vsync is enabled or disabled 6085 * 6086 * Called when vsync is enabled/disabled Will notify DMUB to start/stop ABM 6087 * interrupts after steady state is reached. 6088 */ 6089 void dc_notify_vsync_int_state(struct dc *dc, struct dc_stream_state *stream, bool enable) 6090 { 6091 int i; 6092 int edp_num; 6093 struct pipe_ctx *pipe = NULL; 6094 struct dc_link *link = stream->sink->link; 6095 struct dc_link *edp_links[MAX_NUM_EDP]; 6096 6097 6098 if (link->psr_settings.psr_feature_enabled) 6099 return; 6100 6101 if (link->replay_settings.replay_feature_enabled) 6102 return; 6103 6104 /*find primary pipe associated with stream*/ 6105 for (i = 0; i < MAX_PIPES; i++) { 6106 pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 6107 6108 if (pipe->stream == stream && pipe->stream_res.tg) 6109 break; 6110 } 6111 6112 if (i == MAX_PIPES) { 6113 ASSERT(0); 6114 return; 6115 } 6116 6117 dc_get_edp_links(dc, edp_links, &edp_num); 6118 6119 /* Determine panel inst */ 6120 for (i = 0; i < edp_num; i++) { 6121 if (edp_links[i] == link) 6122 break; 6123 } 6124 6125 if (i == edp_num) { 6126 return; 6127 } 6128 6129 if (pipe->stream_res.abm && pipe->stream_res.abm->funcs->set_abm_pause) 6130 pipe->stream_res.abm->funcs->set_abm_pause(pipe->stream_res.abm, !enable, i, pipe->stream_res.tg->inst); 6131 } 6132 6133 /***************************************************************************** 6134 * dc_abm_save_restore() - Interface to DC for save+pause and restore+un-pause 6135 * ABM 6136 * @dc: dc structure 6137 * @stream: stream where vsync int state changed 6138 * @pData: abm hw states 6139 * 6140 ****************************************************************************/ 6141 bool dc_abm_save_restore( 6142 struct dc *dc, 6143 struct dc_stream_state *stream, 6144 struct abm_save_restore *pData) 6145 { 6146 int i; 6147 int edp_num; 6148 struct pipe_ctx *pipe = NULL; 6149 struct dc_link *link = stream->sink->link; 6150 struct dc_link *edp_links[MAX_NUM_EDP]; 6151 6152 if (link->replay_settings.replay_feature_enabled) 6153 return false; 6154 6155 /*find primary pipe associated with stream*/ 6156 for (i = 0; i < MAX_PIPES; i++) { 6157 pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 6158 6159 if (pipe->stream == stream && pipe->stream_res.tg) 6160 break; 6161 } 6162 6163 if (i == MAX_PIPES) { 6164 ASSERT(0); 6165 return false; 6166 } 6167 6168 dc_get_edp_links(dc, edp_links, &edp_num); 6169 6170 /* Determine panel inst */ 6171 for (i = 0; i < edp_num; i++) 6172 if (edp_links[i] == link) 6173 break; 6174 6175 if (i == edp_num) 6176 return false; 6177 6178 if (pipe->stream_res.abm && 6179 pipe->stream_res.abm->funcs->save_restore) 6180 return pipe->stream_res.abm->funcs->save_restore( 6181 pipe->stream_res.abm, 6182 i, 6183 pData); 6184 return false; 6185 } 6186 6187 void dc_query_current_properties(struct dc *dc, struct dc_current_properties *properties) 6188 { 6189 unsigned int i; 6190 bool subvp_sw_cursor_req = false; 6191 6192 for (i = 0; i < dc->current_state->stream_count; i++) { 6193 if (check_subvp_sw_cursor_fallback_req(dc, dc->current_state->streams[i]) && !dc->current_state->streams[i]->hw_cursor_req) { 6194 subvp_sw_cursor_req = true; 6195 break; 6196 } 6197 } 6198 properties->cursor_size_limit = subvp_sw_cursor_req ? 64 : dc->caps.max_cursor_size; 6199 } 6200 6201 /** 6202 * dc_set_edp_power() - DM controls eDP power to be ON/OFF 6203 * 6204 * Called when DM wants to power on/off eDP. 6205 * Only work on links with flag skip_implict_edp_power_control is set. 6206 * 6207 * @dc: Current DC state 6208 * @edp_link: a link with eDP connector signal type 6209 * @powerOn: power on/off eDP 6210 * 6211 * Return: void 6212 */ 6213 void dc_set_edp_power(const struct dc *dc, struct dc_link *edp_link, 6214 bool powerOn) 6215 { 6216 if (edp_link->connector_signal != SIGNAL_TYPE_EDP) 6217 return; 6218 6219 if (edp_link->skip_implict_edp_power_control == false) 6220 return; 6221 6222 edp_link->dc->link_srv->edp_set_panel_power(edp_link, powerOn); 6223 } 6224 6225 /* 6226 ***************************************************************************** 6227 * dc_get_power_profile_for_dc_state() - extracts power profile from dc state 6228 * 6229 * Called when DM wants to make power policy decisions based on dc_state 6230 * 6231 ***************************************************************************** 6232 */ 6233 struct dc_power_profile dc_get_power_profile_for_dc_state(const struct dc_state *context) 6234 { 6235 struct dc_power_profile profile = { 0 }; 6236 6237 profile.power_level = !context->bw_ctx.bw.dcn.clk.p_state_change_support; 6238 if (!context->clk_mgr || !context->clk_mgr->ctx || !context->clk_mgr->ctx->dc) 6239 return profile; 6240 struct dc *dc = context->clk_mgr->ctx->dc; 6241 6242 if (dc->res_pool->funcs->get_power_profile) 6243 profile.power_level = dc->res_pool->funcs->get_power_profile(context); 6244 return profile; 6245 } 6246 6247 /* 6248 ********************************************************************************** 6249 * dc_get_det_buffer_size_from_state() - extracts detile buffer size from dc state 6250 * 6251 * Called when DM wants to log detile buffer size from dc_state 6252 * 6253 ********************************************************************************** 6254 */ 6255 unsigned int dc_get_det_buffer_size_from_state(const struct dc_state *context) 6256 { 6257 struct dc *dc = context->clk_mgr->ctx->dc; 6258 6259 if (dc->res_pool->funcs->get_det_buffer_size) 6260 return dc->res_pool->funcs->get_det_buffer_size(context); 6261 else 6262 return 0; 6263 } 6264