1 /* 2 * Copyright 2015 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: AMD 23 */ 24 25 #include "dm_services.h" 26 27 #include "amdgpu.h" 28 29 #include "dc.h" 30 31 #include "core_status.h" 32 #include "core_types.h" 33 #include "hw_sequencer.h" 34 #include "dce/dce_hwseq.h" 35 36 #include "resource.h" 37 #include "dc_state.h" 38 #include "dc_state_priv.h" 39 #include "dc_plane.h" 40 #include "dc_plane_priv.h" 41 #include "dc_stream_priv.h" 42 43 #include "gpio_service_interface.h" 44 #include "clk_mgr.h" 45 #include "clock_source.h" 46 #include "dc_bios_types.h" 47 48 #include "bios_parser_interface.h" 49 #include "bios/bios_parser_helper.h" 50 #include "include/irq_service_interface.h" 51 #include "transform.h" 52 #include "dmcu.h" 53 #include "dpp.h" 54 #include "timing_generator.h" 55 #include "abm.h" 56 #include "virtual/virtual_link_encoder.h" 57 #include "hubp.h" 58 59 #include "link_hwss.h" 60 #include "link_encoder.h" 61 #include "link_enc_cfg.h" 62 63 #include "link_service.h" 64 #include "dm_helpers.h" 65 #include "mem_input.h" 66 67 #include "dc_dmub_srv.h" 68 69 #include "dsc.h" 70 71 #include "vm_helper.h" 72 73 #include "dce/dce_i2c.h" 74 75 #include "dmub/dmub_srv.h" 76 77 #include "dce/dmub_psr.h" 78 79 #include "dce/dmub_hw_lock_mgr.h" 80 81 #include "dc_trace.h" 82 83 #include "hw_sequencer_private.h" 84 85 #if defined(CONFIG_DRM_AMD_DC_FP) 86 #include "dml2_0/dml2_internal_types.h" 87 #include "soc_and_ip_translator.h" 88 #endif 89 90 #include "dce/dmub_outbox.h" 91 92 #define CTX \ 93 dc->ctx 94 95 #define DC_LOGGER \ 96 dc->ctx->logger 97 98 static const char DC_BUILD_ID[] = "production-build"; 99 100 /** 101 * DOC: Overview 102 * 103 * DC is the OS-agnostic component of the amdgpu DC driver. 104 * 105 * DC maintains and validates a set of structs representing the state of the 106 * driver and writes that state to AMD hardware 107 * 108 * Main DC HW structs: 109 * 110 * struct dc - The central struct. One per driver. Created on driver load, 111 * destroyed on driver unload. 112 * 113 * struct dc_context - One per driver. 114 * Used as a backpointer by most other structs in dc. 115 * 116 * struct dc_link - One per connector (the physical DP, HDMI, miniDP, or eDP 117 * plugpoints). Created on driver load, destroyed on driver unload. 118 * 119 * struct dc_sink - One per display. Created on boot or hotplug. 120 * Destroyed on shutdown or hotunplug. A dc_link can have a local sink 121 * (the display directly attached). It may also have one or more remote 122 * sinks (in the Multi-Stream Transport case) 123 * 124 * struct resource_pool - One per driver. Represents the hw blocks not in the 125 * main pipeline. Not directly accessible by dm. 126 * 127 * Main dc state structs: 128 * 129 * These structs can be created and destroyed as needed. There is a full set of 130 * these structs in dc->current_state representing the currently programmed state. 131 * 132 * struct dc_state - The global DC state to track global state information, 133 * such as bandwidth values. 134 * 135 * struct dc_stream_state - Represents the hw configuration for the pipeline from 136 * a framebuffer to a display. Maps one-to-one with dc_sink. 137 * 138 * struct dc_plane_state - Represents a framebuffer. Each stream has at least one, 139 * and may have more in the Multi-Plane Overlay case. 140 * 141 * struct resource_context - Represents the programmable state of everything in 142 * the resource_pool. Not directly accessible by dm. 143 * 144 * struct pipe_ctx - A member of struct resource_context. Represents the 145 * internal hardware pipeline components. Each dc_plane_state has either 146 * one or two (in the pipe-split case). 147 */ 148 149 /* Private functions */ 150 151 static inline void elevate_update_type( 152 struct surface_update_descriptor *descriptor, 153 enum surface_update_type new_type, 154 enum dc_lock_descriptor new_locks 155 ) 156 { 157 if (new_type > descriptor->update_type) 158 descriptor->update_type = new_type; 159 160 descriptor->lock_descriptor |= new_locks; 161 } 162 163 static void destroy_links(struct dc *dc) 164 { 165 uint32_t i; 166 167 for (i = 0; i < dc->link_count; i++) { 168 if (NULL != dc->links[i]) 169 dc->link_srv->destroy_link(&dc->links[i]); 170 } 171 } 172 173 static uint32_t get_num_of_internal_disp(struct dc_link **links, uint32_t num_links) 174 { 175 int i; 176 uint32_t count = 0; 177 178 for (i = 0; i < num_links; i++) { 179 if (links[i]->connector_signal == SIGNAL_TYPE_EDP || 180 links[i]->is_internal_display) 181 count++; 182 } 183 184 return count; 185 } 186 187 static int get_seamless_boot_stream_count(struct dc_state *ctx) 188 { 189 uint8_t i; 190 uint8_t seamless_boot_stream_count = 0; 191 192 for (i = 0; i < ctx->stream_count; i++) 193 if (ctx->streams[i]->apply_seamless_boot_optimization) 194 seamless_boot_stream_count++; 195 196 return seamless_boot_stream_count; 197 } 198 199 static bool create_links( 200 struct dc *dc, 201 uint32_t num_virtual_links) 202 { 203 int i; 204 int connectors_num; 205 struct dc_bios *bios = dc->ctx->dc_bios; 206 207 dc->link_count = 0; 208 209 connectors_num = bios->funcs->get_connectors_number(bios); 210 211 DC_LOG_DC("BIOS object table - number of connectors: %d", connectors_num); 212 213 if (connectors_num > ENUM_ID_COUNT) { 214 dm_error( 215 "DC: Number of connectors %d exceeds maximum of %d!\n", 216 connectors_num, 217 ENUM_ID_COUNT); 218 return false; 219 } 220 221 dm_output_to_console( 222 "DC: %s: connectors_num: physical:%d, virtual:%d\n", 223 __func__, 224 connectors_num, 225 num_virtual_links); 226 227 /* When getting the number of connectors, the VBIOS reports the number of valid indices, 228 * but it doesn't say which indices are valid, and not every index has an actual connector. 229 * So, if we don't find a connector on an index, that is not an error. 230 * 231 * - There is no guarantee that the first N indices will be valid 232 * - VBIOS may report a higher amount of valid indices than there are actual connectors 233 * - Some VBIOS have valid configurations for more connectors than there actually are 234 * on the card. This may be because the manufacturer used the same VBIOS for different 235 * variants of the same card. 236 */ 237 for (i = 0; dc->link_count < connectors_num && i < MAX_LINKS; i++) { 238 struct graphics_object_id connector_id = bios->funcs->get_connector_id(bios, i); 239 struct link_init_data link_init_params = {0}; 240 struct dc_link *link; 241 242 if (connector_id.id == CONNECTOR_ID_UNKNOWN) 243 continue; 244 245 DC_LOG_DC("BIOS object table - printing link object info for connector number: %d, link_index: %d", i, dc->link_count); 246 247 link_init_params.ctx = dc->ctx; 248 /* next BIOS object table connector */ 249 link_init_params.connector_index = i; 250 link_init_params.link_index = dc->link_count; 251 link_init_params.dc = dc; 252 link = dc->link_srv->create_link(&link_init_params); 253 254 if (link) { 255 dc->links[dc->link_count] = link; 256 link->dc = dc; 257 ++dc->link_count; 258 } 259 } 260 261 DC_LOG_DC("BIOS object table - end"); 262 263 /* Create a link for each usb4 dpia port */ 264 dc->lowest_dpia_link_index = MAX_LINKS; 265 for (i = 0; i < dc->res_pool->usb4_dpia_count; i++) { 266 struct link_init_data link_init_params = {0}; 267 struct dc_link *link; 268 269 link_init_params.ctx = dc->ctx; 270 link_init_params.connector_index = i; 271 link_init_params.link_index = dc->link_count; 272 link_init_params.dc = dc; 273 link_init_params.is_dpia_link = true; 274 275 link = dc->link_srv->create_link(&link_init_params); 276 if (link) { 277 if (dc->lowest_dpia_link_index > dc->link_count) 278 dc->lowest_dpia_link_index = dc->link_count; 279 280 dc->links[dc->link_count] = link; 281 link->dc = dc; 282 ++dc->link_count; 283 } 284 } 285 286 for (i = 0; i < num_virtual_links; i++) { 287 struct dc_link *link = kzalloc(sizeof(*link), GFP_KERNEL); 288 struct encoder_init_data enc_init = {0}; 289 290 if (link == NULL) { 291 BREAK_TO_DEBUGGER(); 292 goto failed_alloc; 293 } 294 295 link->link_index = dc->link_count; 296 dc->links[dc->link_count] = link; 297 dc->link_count++; 298 299 link->ctx = dc->ctx; 300 link->dc = dc; 301 link->connector_signal = SIGNAL_TYPE_VIRTUAL; 302 link->link_id.type = OBJECT_TYPE_CONNECTOR; 303 link->link_id.id = CONNECTOR_ID_VIRTUAL; 304 link->link_id.enum_id = ENUM_ID_1; 305 link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED; 306 link->replay_settings.config.replay_version = DC_REPLAY_VERSION_UNSUPPORTED; 307 link->link_enc = kzalloc(sizeof(*link->link_enc), GFP_KERNEL); 308 309 if (!link->link_enc) { 310 BREAK_TO_DEBUGGER(); 311 goto failed_alloc; 312 } 313 314 link->link_status.dpcd_caps = &link->dpcd_caps; 315 316 enc_init.ctx = dc->ctx; 317 enc_init.channel = CHANNEL_ID_UNKNOWN; 318 enc_init.hpd_source = HPD_SOURCEID_UNKNOWN; 319 enc_init.transmitter = TRANSMITTER_UNKNOWN; 320 enc_init.connector = link->link_id; 321 enc_init.encoder.type = OBJECT_TYPE_ENCODER; 322 enc_init.encoder.id = ENCODER_ID_INTERNAL_VIRTUAL; 323 enc_init.encoder.enum_id = ENUM_ID_1; 324 virtual_link_encoder_construct(link->link_enc, &enc_init); 325 } 326 327 dc->caps.num_of_internal_disp = get_num_of_internal_disp(dc->links, dc->link_count); 328 329 return true; 330 331 failed_alloc: 332 return false; 333 } 334 335 /* Create additional DIG link encoder objects if fewer than the platform 336 * supports were created during link construction. This can happen if the 337 * number of physical connectors is less than the number of DIGs. 338 */ 339 static bool create_link_encoders(struct dc *dc) 340 { 341 bool res = true; 342 unsigned int num_usb4_dpia = dc->res_pool->res_cap->num_usb4_dpia; 343 unsigned int num_dig_link_enc = dc->res_pool->res_cap->num_dig_link_enc; 344 int i; 345 346 /* A platform without USB4 DPIA endpoints has a fixed mapping between DIG 347 * link encoders and physical display endpoints and does not require 348 * additional link encoder objects. 349 */ 350 if (num_usb4_dpia == 0) 351 return res; 352 353 /* Create as many link encoder objects as the platform supports. DPIA 354 * endpoints can be programmably mapped to any DIG. 355 */ 356 if (num_dig_link_enc > dc->res_pool->dig_link_enc_count) { 357 for (i = 0; i < num_dig_link_enc; i++) { 358 struct link_encoder *link_enc = dc->res_pool->link_encoders[i]; 359 360 if (!link_enc && dc->res_pool->funcs->link_enc_create_minimal) { 361 link_enc = dc->res_pool->funcs->link_enc_create_minimal(dc->ctx, 362 (enum engine_id)(ENGINE_ID_DIGA + i)); 363 if (link_enc) { 364 dc->res_pool->link_encoders[i] = link_enc; 365 dc->res_pool->dig_link_enc_count++; 366 } else { 367 res = false; 368 } 369 } 370 } 371 } 372 373 return res; 374 } 375 376 /* Destroy any additional DIG link encoder objects created by 377 * create_link_encoders(). 378 * NB: Must only be called after destroy_links(). 379 */ 380 static void destroy_link_encoders(struct dc *dc) 381 { 382 unsigned int num_usb4_dpia; 383 unsigned int num_dig_link_enc; 384 int i; 385 386 if (!dc->res_pool) 387 return; 388 389 num_usb4_dpia = dc->res_pool->res_cap->num_usb4_dpia; 390 num_dig_link_enc = dc->res_pool->res_cap->num_dig_link_enc; 391 392 /* A platform without USB4 DPIA endpoints has a fixed mapping between DIG 393 * link encoders and physical display endpoints and does not require 394 * additional link encoder objects. 395 */ 396 if (num_usb4_dpia == 0) 397 return; 398 399 for (i = 0; i < num_dig_link_enc; i++) { 400 struct link_encoder *link_enc = dc->res_pool->link_encoders[i]; 401 402 if (link_enc) { 403 link_enc->funcs->destroy(&link_enc); 404 dc->res_pool->link_encoders[i] = NULL; 405 dc->res_pool->dig_link_enc_count--; 406 } 407 } 408 } 409 410 static struct dc_perf_trace *dc_perf_trace_create(void) 411 { 412 return kzalloc(sizeof(struct dc_perf_trace), GFP_KERNEL); 413 } 414 415 static void dc_perf_trace_destroy(struct dc_perf_trace **perf_trace) 416 { 417 kfree(*perf_trace); 418 *perf_trace = NULL; 419 } 420 421 static bool set_long_vtotal(struct dc *dc, struct dc_stream_state *stream, struct dc_crtc_timing_adjust *adjust) 422 { 423 if (!dc || !stream || !adjust) 424 return false; 425 426 if (!dc->current_state) 427 return false; 428 429 int i; 430 431 for (i = 0; i < MAX_PIPES; i++) { 432 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 433 434 if (pipe->stream == stream && pipe->stream_res.tg) { 435 if (dc->hwss.set_long_vtotal) 436 dc->hwss.set_long_vtotal(&pipe, 1, adjust->v_total_min, adjust->v_total_max); 437 438 return true; 439 } 440 } 441 442 return false; 443 } 444 445 /** 446 * dc_stream_adjust_vmin_vmax - look up pipe context & update parts of DRR 447 * @dc: dc reference 448 * @stream: Initial dc stream state 449 * @adjust: Updated parameters for vertical_total_min and vertical_total_max 450 * 451 * Looks up the pipe context of dc_stream_state and updates the 452 * vertical_total_min and vertical_total_max of the DRR, Dynamic Refresh 453 * Rate, which is a power-saving feature that targets reducing panel 454 * refresh rate while the screen is static 455 * 456 * Return: %true if the pipe context is found and adjusted; 457 * %false if the pipe context is not found. 458 */ 459 bool dc_stream_adjust_vmin_vmax(struct dc *dc, 460 struct dc_stream_state *stream, 461 struct dc_crtc_timing_adjust *adjust) 462 { 463 int i; 464 465 /* 466 * Don't adjust DRR while there's bandwidth optimizations pending to 467 * avoid conflicting with firmware updates. 468 */ 469 if (dc->ctx->dce_version > DCE_VERSION_MAX) { 470 if (dc->optimized_required && 471 (stream->adjust.v_total_max != adjust->v_total_max || 472 stream->adjust.v_total_min != adjust->v_total_min)) { 473 stream->adjust.timing_adjust_pending = true; 474 return false; 475 } 476 } 477 478 dc_exit_ips_for_hw_access(dc); 479 480 stream->adjust.v_total_max = adjust->v_total_max; 481 stream->adjust.v_total_mid = adjust->v_total_mid; 482 stream->adjust.v_total_mid_frame_num = adjust->v_total_mid_frame_num; 483 stream->adjust.v_total_min = adjust->v_total_min; 484 stream->adjust.allow_otg_v_count_halt = adjust->allow_otg_v_count_halt; 485 486 if (dc->caps.max_v_total != 0 && 487 (adjust->v_total_max > dc->caps.max_v_total || adjust->v_total_min > dc->caps.max_v_total)) { 488 stream->adjust.timing_adjust_pending = false; 489 if (adjust->allow_otg_v_count_halt) 490 return set_long_vtotal(dc, stream, adjust); 491 else 492 return false; 493 } 494 495 for (i = 0; i < MAX_PIPES; i++) { 496 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 497 498 if (pipe->stream == stream && pipe->stream_res.tg) { 499 dc->hwss.set_drr(&pipe, 500 1, 501 *adjust); 502 stream->adjust.timing_adjust_pending = false; 503 504 if (dc->hwss.notify_cursor_offload_drr_update) 505 dc->hwss.notify_cursor_offload_drr_update(dc, dc->current_state, stream); 506 507 return true; 508 } 509 } 510 511 return false; 512 } 513 514 /** 515 * dc_stream_get_last_used_drr_vtotal - Looks up the pipe context of 516 * dc_stream_state and gets the last VTOTAL used by DRR (Dynamic Refresh Rate) 517 * 518 * @dc: [in] dc reference 519 * @stream: [in] Initial dc stream state 520 * @refresh_rate: [in] new refresh_rate 521 * 522 * Return: %true if the pipe context is found and there is an associated 523 * timing_generator for the DC; 524 * %false if the pipe context is not found or there is no 525 * timing_generator for the DC. 526 */ 527 bool dc_stream_get_last_used_drr_vtotal(struct dc *dc, 528 struct dc_stream_state *stream, 529 uint32_t *refresh_rate) 530 { 531 bool status = false; 532 533 int i = 0; 534 535 dc_exit_ips_for_hw_access(dc); 536 537 for (i = 0; i < MAX_PIPES; i++) { 538 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 539 540 if (pipe->stream == stream && pipe->stream_res.tg) { 541 /* Only execute if a function pointer has been defined for 542 * the DC version in question 543 */ 544 if (pipe->stream_res.tg->funcs->get_last_used_drr_vtotal) { 545 pipe->stream_res.tg->funcs->get_last_used_drr_vtotal(pipe->stream_res.tg, refresh_rate); 546 547 status = true; 548 549 break; 550 } 551 } 552 } 553 554 return status; 555 } 556 557 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 558 static inline void 559 dc_stream_forward_dmub_crc_window(struct dc_dmub_srv *dmub_srv, 560 struct rect *rect, struct otg_phy_mux *mux_mapping, bool is_stop) 561 { 562 union dmub_rb_cmd cmd = {0}; 563 564 cmd.secure_display.roi_info.phy_id = mux_mapping->phy_output_num; 565 cmd.secure_display.roi_info.otg_id = mux_mapping->otg_output_num; 566 567 if (is_stop) { 568 cmd.secure_display.header.type = DMUB_CMD__SECURE_DISPLAY; 569 cmd.secure_display.header.sub_type = DMUB_CMD__SECURE_DISPLAY_CRC_STOP_UPDATE; 570 } else { 571 cmd.secure_display.header.type = DMUB_CMD__SECURE_DISPLAY; 572 cmd.secure_display.header.sub_type = DMUB_CMD__SECURE_DISPLAY_CRC_WIN_NOTIFY; 573 cmd.secure_display.roi_info.x_start = rect->x; 574 cmd.secure_display.roi_info.y_start = rect->y; 575 cmd.secure_display.roi_info.x_end = rect->x + rect->width; 576 cmd.secure_display.roi_info.y_end = rect->y + rect->height; 577 } 578 579 dc_wake_and_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT); 580 } 581 582 static inline void 583 dc_stream_forward_dmcu_crc_window(struct dmcu *dmcu, 584 struct rect *rect, struct otg_phy_mux *mux_mapping, bool is_stop) 585 { 586 if (is_stop) 587 dmcu->funcs->stop_crc_win_update(dmcu, mux_mapping); 588 else 589 dmcu->funcs->forward_crc_window(dmcu, rect, mux_mapping); 590 } 591 592 bool 593 dc_stream_forward_crc_window(struct dc_stream_state *stream, 594 struct rect *rect, uint8_t phy_id, bool is_stop) 595 { 596 struct dmcu *dmcu; 597 struct dc_dmub_srv *dmub_srv; 598 struct otg_phy_mux mux_mapping; 599 struct pipe_ctx *pipe; 600 int i; 601 struct dc *dc = stream->ctx->dc; 602 603 for (i = 0; i < MAX_PIPES; i++) { 604 pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 605 if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe) 606 break; 607 } 608 609 /* Stream not found */ 610 if (i == MAX_PIPES) 611 return false; 612 613 mux_mapping.phy_output_num = phy_id; 614 mux_mapping.otg_output_num = pipe->stream_res.tg->inst; 615 616 dmcu = dc->res_pool->dmcu; 617 dmub_srv = dc->ctx->dmub_srv; 618 619 /* forward to dmub */ 620 if (dmub_srv) 621 dc_stream_forward_dmub_crc_window(dmub_srv, rect, &mux_mapping, is_stop); 622 /* forward to dmcu */ 623 else if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) 624 dc_stream_forward_dmcu_crc_window(dmcu, rect, &mux_mapping, is_stop); 625 else 626 return false; 627 628 return true; 629 } 630 631 static void 632 dc_stream_forward_dmub_multiple_crc_window(struct dc_dmub_srv *dmub_srv, 633 struct crc_window *window, struct otg_phy_mux *mux_mapping, bool stop) 634 { 635 int i; 636 union dmub_rb_cmd cmd = {0}; 637 638 cmd.secure_display.mul_roi_ctl.phy_id = mux_mapping->phy_output_num; 639 cmd.secure_display.mul_roi_ctl.otg_id = mux_mapping->otg_output_num; 640 641 cmd.secure_display.header.type = DMUB_CMD__SECURE_DISPLAY; 642 643 if (stop) { 644 cmd.secure_display.header.sub_type = DMUB_CMD__SECURE_DISPLAY_MULTIPLE_CRC_STOP_UPDATE; 645 } else { 646 cmd.secure_display.header.sub_type = DMUB_CMD__SECURE_DISPLAY_MULTIPLE_CRC_WIN_NOTIFY; 647 for (i = 0; i < MAX_CRC_WINDOW_NUM; i++) { 648 cmd.secure_display.mul_roi_ctl.roi_ctl[i].x_start = window[i].rect.x; 649 cmd.secure_display.mul_roi_ctl.roi_ctl[i].y_start = window[i].rect.y; 650 cmd.secure_display.mul_roi_ctl.roi_ctl[i].x_end = window[i].rect.x + window[i].rect.width; 651 cmd.secure_display.mul_roi_ctl.roi_ctl[i].y_end = window[i].rect.y + window[i].rect.height; 652 cmd.secure_display.mul_roi_ctl.roi_ctl[i].enable = window[i].enable; 653 } 654 } 655 656 dc_wake_and_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT); 657 } 658 659 bool 660 dc_stream_forward_multiple_crc_window(struct dc_stream_state *stream, 661 struct crc_window *window, uint8_t phy_id, bool stop) 662 { 663 struct dc_dmub_srv *dmub_srv; 664 struct otg_phy_mux mux_mapping; 665 struct pipe_ctx *pipe; 666 int i; 667 struct dc *dc = stream->ctx->dc; 668 669 for (i = 0; i < MAX_PIPES; i++) { 670 pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 671 if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe) 672 break; 673 } 674 675 /* Stream not found */ 676 if (i == MAX_PIPES) 677 return false; 678 679 mux_mapping.phy_output_num = phy_id; 680 mux_mapping.otg_output_num = pipe->stream_res.tg->inst; 681 682 dmub_srv = dc->ctx->dmub_srv; 683 684 /* forward to dmub only. no dmcu support*/ 685 if (dmub_srv) 686 dc_stream_forward_dmub_multiple_crc_window(dmub_srv, window, &mux_mapping, stop); 687 else 688 return false; 689 690 return true; 691 } 692 #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */ 693 694 /** 695 * dc_stream_configure_crc() - Configure CRC capture for the given stream. 696 * @dc: DC Object 697 * @stream: The stream to configure CRC on. 698 * @crc_window: CRC window (x/y start/end) information 699 * @enable: Enable CRC if true, disable otherwise. 700 * @continuous: Capture CRC on every frame if true. Otherwise, only capture 701 * once. 702 * @idx: Capture CRC on which CRC engine instance 703 * @reset: Reset CRC engine before the configuration 704 * 705 * By default, the entire frame is used to calculate the CRC. 706 * 707 * Return: %false if the stream is not found or CRC capture is not supported; 708 * %true if the stream has been configured. 709 */ 710 bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream, 711 struct crc_params *crc_window, bool enable, bool continuous, 712 uint8_t idx, bool reset) 713 { 714 struct pipe_ctx *pipe; 715 struct crc_params param; 716 struct timing_generator *tg; 717 718 pipe = resource_get_otg_master_for_stream( 719 &dc->current_state->res_ctx, stream); 720 721 /* Stream not found */ 722 if (pipe == NULL) 723 return false; 724 725 dc_exit_ips_for_hw_access(dc); 726 727 /* By default, capture the full frame */ 728 param.windowa_x_start = 0; 729 param.windowa_y_start = 0; 730 param.windowa_x_end = pipe->stream->timing.h_addressable; 731 param.windowa_y_end = pipe->stream->timing.v_addressable; 732 param.windowb_x_start = 0; 733 param.windowb_y_start = 0; 734 param.windowb_x_end = pipe->stream->timing.h_addressable; 735 param.windowb_y_end = pipe->stream->timing.v_addressable; 736 737 if (crc_window) { 738 param.windowa_x_start = crc_window->windowa_x_start; 739 param.windowa_y_start = crc_window->windowa_y_start; 740 param.windowa_x_end = crc_window->windowa_x_end; 741 param.windowa_y_end = crc_window->windowa_y_end; 742 param.windowb_x_start = crc_window->windowb_x_start; 743 param.windowb_y_start = crc_window->windowb_y_start; 744 param.windowb_x_end = crc_window->windowb_x_end; 745 param.windowb_y_end = crc_window->windowb_y_end; 746 } 747 748 param.dsc_mode = pipe->stream->timing.flags.DSC ? 1:0; 749 param.odm_mode = pipe->next_odm_pipe ? 1:0; 750 751 /* Default to the union of both windows */ 752 param.selection = UNION_WINDOW_A_B; 753 param.continuous_mode = continuous; 754 param.enable = enable; 755 756 param.crc_eng_inst = idx; 757 param.reset = reset; 758 759 tg = pipe->stream_res.tg; 760 761 /* Only call if supported */ 762 if (tg->funcs->configure_crc) 763 return tg->funcs->configure_crc(tg, ¶m); 764 DC_LOG_WARNING("CRC capture not supported."); 765 return false; 766 } 767 768 /** 769 * dc_stream_get_crc() - Get CRC values for the given stream. 770 * 771 * @dc: DC object. 772 * @stream: The DC stream state of the stream to get CRCs from. 773 * @idx: index of crc engine to get CRC from 774 * @r_cr: CRC value for the red component. 775 * @g_y: CRC value for the green component. 776 * @b_cb: CRC value for the blue component. 777 * 778 * dc_stream_configure_crc needs to be called beforehand to enable CRCs. 779 * 780 * Return: 781 * %false if stream is not found, or if CRCs are not enabled. 782 */ 783 bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream, uint8_t idx, 784 uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb) 785 { 786 int i; 787 struct pipe_ctx *pipe = NULL; 788 struct timing_generator *tg; 789 790 dc_exit_ips_for_hw_access(dc); 791 792 for (i = 0; i < MAX_PIPES; i++) { 793 pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 794 if (pipe->stream == stream) 795 break; 796 } 797 /* Stream not found */ 798 if (i == MAX_PIPES) 799 return false; 800 801 tg = pipe->stream_res.tg; 802 803 if (tg->funcs->get_crc) 804 return tg->funcs->get_crc(tg, idx, r_cr, g_y, b_cb); 805 DC_LOG_WARNING("CRC capture not supported."); 806 return false; 807 } 808 809 void dc_stream_set_dyn_expansion(struct dc *dc, struct dc_stream_state *stream, 810 enum dc_dynamic_expansion option) 811 { 812 /* OPP FMT dyn expansion updates*/ 813 int i; 814 struct pipe_ctx *pipe_ctx; 815 816 dc_exit_ips_for_hw_access(dc); 817 818 for (i = 0; i < MAX_PIPES; i++) { 819 if (dc->current_state->res_ctx.pipe_ctx[i].stream 820 == stream) { 821 pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; 822 pipe_ctx->stream_res.opp->dyn_expansion = option; 823 pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion( 824 pipe_ctx->stream_res.opp, 825 COLOR_SPACE_YCBCR601, 826 stream->timing.display_color_depth, 827 stream->signal); 828 } 829 } 830 } 831 832 void dc_stream_set_dither_option(struct dc_stream_state *stream, 833 enum dc_dither_option option) 834 { 835 struct bit_depth_reduction_params params; 836 struct dc_link *link = stream->link; 837 struct pipe_ctx *pipes = NULL; 838 int i; 839 840 for (i = 0; i < MAX_PIPES; i++) { 841 if (link->dc->current_state->res_ctx.pipe_ctx[i].stream == 842 stream) { 843 pipes = &link->dc->current_state->res_ctx.pipe_ctx[i]; 844 break; 845 } 846 } 847 848 if (!pipes) 849 return; 850 if (option > DITHER_OPTION_MAX) 851 return; 852 853 dc_exit_ips_for_hw_access(stream->ctx->dc); 854 855 stream->dither_option = option; 856 857 memset(¶ms, 0, sizeof(params)); 858 resource_build_bit_depth_reduction_params(stream, ¶ms); 859 stream->bit_depth_params = params; 860 861 if (pipes->plane_res.xfm && 862 pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth) { 863 pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth( 864 pipes->plane_res.xfm, 865 pipes->plane_res.scl_data.lb_params.depth, 866 &stream->bit_depth_params); 867 } 868 869 pipes->stream_res.opp->funcs-> 870 opp_program_bit_depth_reduction(pipes->stream_res.opp, ¶ms); 871 } 872 873 bool dc_stream_set_gamut_remap(struct dc *dc, const struct dc_stream_state *stream) 874 { 875 int i; 876 bool ret = false; 877 struct pipe_ctx *pipes; 878 879 dc_exit_ips_for_hw_access(dc); 880 881 for (i = 0; i < MAX_PIPES; i++) { 882 if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) { 883 pipes = &dc->current_state->res_ctx.pipe_ctx[i]; 884 dc->hwss.program_gamut_remap(pipes); 885 ret = true; 886 } 887 } 888 889 return ret; 890 } 891 892 bool dc_stream_program_csc_matrix(struct dc *dc, struct dc_stream_state *stream) 893 { 894 int i; 895 bool ret = false; 896 struct pipe_ctx *pipes; 897 898 dc_exit_ips_for_hw_access(dc); 899 900 for (i = 0; i < MAX_PIPES; i++) { 901 if (dc->current_state->res_ctx.pipe_ctx[i].stream 902 == stream) { 903 904 pipes = &dc->current_state->res_ctx.pipe_ctx[i]; 905 dc->hwss.program_output_csc(dc, 906 pipes, 907 stream->output_color_space, 908 stream->csc_color_matrix.matrix, 909 pipes->stream_res.opp->inst); 910 ret = true; 911 } 912 } 913 914 return ret; 915 } 916 917 void dc_stream_set_static_screen_params(struct dc *dc, 918 struct dc_stream_state **streams, 919 int num_streams, 920 const struct dc_static_screen_params *params) 921 { 922 int i, j; 923 struct pipe_ctx *pipes_affected[MAX_PIPES]; 924 int num_pipes_affected = 0; 925 926 dc_exit_ips_for_hw_access(dc); 927 928 for (i = 0; i < num_streams; i++) { 929 struct dc_stream_state *stream = streams[i]; 930 931 for (j = 0; j < MAX_PIPES; j++) { 932 if (dc->current_state->res_ctx.pipe_ctx[j].stream 933 == stream) { 934 pipes_affected[num_pipes_affected++] = 935 &dc->current_state->res_ctx.pipe_ctx[j]; 936 } 937 } 938 } 939 940 dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, params); 941 } 942 943 static void dc_destruct(struct dc *dc) 944 { 945 // reset link encoder assignment table on destruct 946 if (dc->res_pool && dc->res_pool->funcs->link_encs_assign && 947 !dc->config.unify_link_enc_assignment) 948 link_enc_cfg_init(dc, dc->current_state); 949 950 if (dc->current_state) { 951 dc_state_release(dc->current_state); 952 dc->current_state = NULL; 953 } 954 955 destroy_links(dc); 956 957 destroy_link_encoders(dc); 958 959 if (dc->clk_mgr) { 960 dc_destroy_clk_mgr(dc->clk_mgr); 961 dc->clk_mgr = NULL; 962 } 963 964 dc_destroy_resource_pool(dc); 965 #ifdef CONFIG_DRM_AMD_DC_FP 966 dc_destroy_soc_and_ip_translator(&dc->soc_and_ip_translator); 967 #endif 968 if (dc->link_srv) 969 link_destroy_link_service(&dc->link_srv); 970 971 if (dc->ctx) { 972 if (dc->ctx->gpio_service) 973 dal_gpio_service_destroy(&dc->ctx->gpio_service); 974 975 if (dc->ctx->created_bios) 976 dal_bios_parser_destroy(&dc->ctx->dc_bios); 977 kfree(dc->ctx->logger); 978 dc_perf_trace_destroy(&dc->ctx->perf_trace); 979 980 kfree(dc->ctx); 981 dc->ctx = NULL; 982 } 983 984 kfree(dc->bw_vbios); 985 dc->bw_vbios = NULL; 986 987 kfree(dc->bw_dceip); 988 dc->bw_dceip = NULL; 989 990 kfree(dc->dcn_soc); 991 dc->dcn_soc = NULL; 992 993 kfree(dc->dcn_ip); 994 dc->dcn_ip = NULL; 995 996 kfree(dc->vm_helper); 997 dc->vm_helper = NULL; 998 999 } 1000 1001 static bool dc_construct_ctx(struct dc *dc, 1002 const struct dc_init_data *init_params) 1003 { 1004 struct dc_context *dc_ctx; 1005 1006 dc_ctx = kzalloc(sizeof(*dc_ctx), GFP_KERNEL); 1007 if (!dc_ctx) 1008 return false; 1009 1010 dc_stream_init_rmcm_3dlut(dc); 1011 1012 dc_ctx->cgs_device = init_params->cgs_device; 1013 dc_ctx->driver_context = init_params->driver; 1014 dc_ctx->dc = dc; 1015 dc_ctx->asic_id = init_params->asic_id; 1016 dc_ctx->dc_sink_id_count = 0; 1017 dc_ctx->dc_stream_id_count = 0; 1018 dc_ctx->dce_environment = init_params->dce_environment; 1019 dc_ctx->dcn_reg_offsets = init_params->dcn_reg_offsets; 1020 dc_ctx->nbio_reg_offsets = init_params->nbio_reg_offsets; 1021 dc_ctx->clk_reg_offsets = init_params->clk_reg_offsets; 1022 1023 /* Create logger */ 1024 dc_ctx->logger = kmalloc(sizeof(*dc_ctx->logger), GFP_KERNEL); 1025 1026 if (!dc_ctx->logger) { 1027 kfree(dc_ctx); 1028 return false; 1029 } 1030 1031 dc_ctx->logger->dev = adev_to_drm(init_params->driver); 1032 dc->dml.logger = dc_ctx->logger; 1033 1034 dc_ctx->dce_version = resource_parse_asic_id(init_params->asic_id); 1035 1036 dc_ctx->perf_trace = dc_perf_trace_create(); 1037 if (!dc_ctx->perf_trace) { 1038 kfree(dc_ctx); 1039 ASSERT_CRITICAL(false); 1040 return false; 1041 } 1042 1043 dc->ctx = dc_ctx; 1044 1045 dc->link_srv = link_create_link_service(); 1046 if (!dc->link_srv) 1047 return false; 1048 1049 return true; 1050 } 1051 1052 static bool dc_construct(struct dc *dc, 1053 const struct dc_init_data *init_params) 1054 { 1055 struct dc_context *dc_ctx; 1056 struct bw_calcs_dceip *dc_dceip; 1057 struct bw_calcs_vbios *dc_vbios; 1058 struct dcn_soc_bounding_box *dcn_soc; 1059 struct dcn_ip_params *dcn_ip; 1060 1061 dc->config = init_params->flags; 1062 1063 // Allocate memory for the vm_helper 1064 dc->vm_helper = kzalloc(sizeof(struct vm_helper), GFP_KERNEL); 1065 if (!dc->vm_helper) { 1066 dm_error("%s: failed to create dc->vm_helper\n", __func__); 1067 goto fail; 1068 } 1069 1070 memcpy(&dc->bb_overrides, &init_params->bb_overrides, sizeof(dc->bb_overrides)); 1071 1072 dc_dceip = kzalloc(sizeof(*dc_dceip), GFP_KERNEL); 1073 if (!dc_dceip) { 1074 dm_error("%s: failed to create dceip\n", __func__); 1075 goto fail; 1076 } 1077 1078 dc->bw_dceip = dc_dceip; 1079 1080 dc_vbios = kzalloc(sizeof(*dc_vbios), GFP_KERNEL); 1081 if (!dc_vbios) { 1082 dm_error("%s: failed to create vbios\n", __func__); 1083 goto fail; 1084 } 1085 1086 dc->bw_vbios = dc_vbios; 1087 dcn_soc = kzalloc(sizeof(*dcn_soc), GFP_KERNEL); 1088 if (!dcn_soc) { 1089 dm_error("%s: failed to create dcn_soc\n", __func__); 1090 goto fail; 1091 } 1092 1093 dc->dcn_soc = dcn_soc; 1094 1095 dcn_ip = kzalloc(sizeof(*dcn_ip), GFP_KERNEL); 1096 if (!dcn_ip) { 1097 dm_error("%s: failed to create dcn_ip\n", __func__); 1098 goto fail; 1099 } 1100 1101 dc->dcn_ip = dcn_ip; 1102 1103 if (init_params->bb_from_dmub) 1104 dc->dml2_options.bb_from_dmub = init_params->bb_from_dmub; 1105 else 1106 dc->dml2_options.bb_from_dmub = NULL; 1107 1108 if (!dc_construct_ctx(dc, init_params)) { 1109 dm_error("%s: failed to create ctx\n", __func__); 1110 goto fail; 1111 } 1112 1113 dc_ctx = dc->ctx; 1114 1115 /* Resource should construct all asic specific resources. 1116 * This should be the only place where we need to parse the asic id 1117 */ 1118 if (init_params->vbios_override) 1119 dc_ctx->dc_bios = init_params->vbios_override; 1120 else { 1121 /* Create BIOS parser */ 1122 struct bp_init_data bp_init_data; 1123 1124 bp_init_data.ctx = dc_ctx; 1125 bp_init_data.bios = init_params->asic_id.atombios_base_address; 1126 1127 dc_ctx->dc_bios = dal_bios_parser_create( 1128 &bp_init_data, dc_ctx->dce_version); 1129 1130 if (!dc_ctx->dc_bios) { 1131 ASSERT_CRITICAL(false); 1132 goto fail; 1133 } 1134 1135 dc_ctx->created_bios = true; 1136 } 1137 1138 dc->vendor_signature = init_params->vendor_signature; 1139 1140 /* Create GPIO service */ 1141 dc_ctx->gpio_service = dal_gpio_service_create( 1142 dc_ctx->dce_version, 1143 dc_ctx->dce_environment, 1144 dc_ctx); 1145 1146 if (!dc_ctx->gpio_service) { 1147 ASSERT_CRITICAL(false); 1148 goto fail; 1149 } 1150 1151 dc->res_pool = dc_create_resource_pool(dc, init_params, dc_ctx->dce_version); 1152 if (!dc->res_pool) 1153 goto fail; 1154 1155 /* set i2c speed if not done by the respective dcnxxx__resource.c */ 1156 if (dc->caps.i2c_speed_in_khz_hdcp == 0) 1157 dc->caps.i2c_speed_in_khz_hdcp = dc->caps.i2c_speed_in_khz; 1158 if (dc->check_config.max_optimizable_video_width == 0) 1159 dc->check_config.max_optimizable_video_width = 5120; 1160 dc->clk_mgr = dc_clk_mgr_create(dc->ctx, dc->res_pool->pp_smu, dc->res_pool->dccg); 1161 if (!dc->clk_mgr) 1162 goto fail; 1163 #ifdef CONFIG_DRM_AMD_DC_FP 1164 dc->clk_mgr->force_smu_not_present = init_params->force_smu_not_present; 1165 1166 if (dc->res_pool->funcs->update_bw_bounding_box) { 1167 DC_FP_START(); 1168 dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params); 1169 DC_FP_END(); 1170 } 1171 dc->soc_and_ip_translator = dc_create_soc_and_ip_translator(dc_ctx->dce_version); 1172 if (!dc->soc_and_ip_translator) 1173 goto fail; 1174 #endif 1175 1176 if (!create_links(dc, init_params->num_virtual_links)) 1177 goto fail; 1178 1179 /* Create additional DIG link encoder objects if fewer than the platform 1180 * supports were created during link construction. 1181 */ 1182 if (!create_link_encoders(dc)) 1183 goto fail; 1184 1185 /* Creation of current_state must occur after dc->dml 1186 * is initialized in dc_create_resource_pool because 1187 * on creation it copies the contents of dc->dml 1188 */ 1189 dc->current_state = dc_state_create(dc, NULL); 1190 1191 if (!dc->current_state) { 1192 dm_error("%s: failed to create validate ctx\n", __func__); 1193 goto fail; 1194 } 1195 1196 return true; 1197 1198 fail: 1199 return false; 1200 } 1201 1202 static void disable_all_writeback_pipes_for_stream( 1203 const struct dc *dc, 1204 struct dc_stream_state *stream, 1205 struct dc_state *context) 1206 { 1207 int i; 1208 1209 for (i = 0; i < stream->num_wb_info; i++) 1210 stream->writeback_info[i].wb_enabled = false; 1211 } 1212 1213 static void apply_ctx_interdependent_lock(struct dc *dc, 1214 struct dc_state *context, 1215 struct dc_stream_state *stream, 1216 bool lock) 1217 { 1218 int i; 1219 1220 /* Checks if interdependent update function pointer is NULL or not, takes care of DCE110 case */ 1221 if (dc->hwss.interdependent_update_lock) 1222 dc->hwss.interdependent_update_lock(dc, context, lock); 1223 else { 1224 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1225 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 1226 struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; 1227 1228 // Copied conditions that were previously in dce110_apply_ctx_for_surface 1229 if (stream == pipe_ctx->stream) { 1230 if (resource_is_pipe_type(pipe_ctx, OPP_HEAD) && 1231 (pipe_ctx->plane_state || old_pipe_ctx->plane_state)) 1232 dc->hwss.pipe_control_lock(dc, pipe_ctx, lock); 1233 } 1234 } 1235 } 1236 } 1237 1238 static void dc_update_visual_confirm_color(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx) 1239 { 1240 if (dc->debug.visual_confirm & VISUAL_CONFIRM_EXPLICIT) { 1241 memcpy(&pipe_ctx->visual_confirm_color, &pipe_ctx->plane_state->visual_confirm_color, 1242 sizeof(pipe_ctx->visual_confirm_color)); 1243 return; 1244 } 1245 1246 if (dc->ctx->dce_version >= DCN_VERSION_1_0) { 1247 memset(&pipe_ctx->visual_confirm_color, 0, sizeof(struct tg_color)); 1248 1249 if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR) 1250 get_hdr_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color)); 1251 else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE) 1252 get_surface_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color)); 1253 else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SWIZZLE) 1254 get_surface_tile_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color)); 1255 else if (dc->debug.visual_confirm == VISUAL_CONFIRM_HW_CURSOR) 1256 get_cursor_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color)); 1257 else if (dc->debug.visual_confirm == VISUAL_CONFIRM_DCC) 1258 get_dcc_visual_confirm_color(dc, pipe_ctx, &(pipe_ctx->visual_confirm_color)); 1259 else { 1260 if (dc->ctx->dce_version < DCN_VERSION_2_0) 1261 color_space_to_black_color( 1262 dc, pipe_ctx->stream->output_color_space, &(pipe_ctx->visual_confirm_color)); 1263 } 1264 if (dc->ctx->dce_version >= DCN_VERSION_2_0) { 1265 if (dc->debug.visual_confirm == VISUAL_CONFIRM_MPCTREE) 1266 get_mpctree_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color)); 1267 else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SUBVP) 1268 get_subvp_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color)); 1269 else if (dc->debug.visual_confirm == VISUAL_CONFIRM_MCLK_SWITCH) 1270 get_mclk_switch_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color)); 1271 else if (dc->debug.visual_confirm == VISUAL_CONFIRM_FAMS2) 1272 get_fams2_visual_confirm_color(dc, context, pipe_ctx, &(pipe_ctx->visual_confirm_color)); 1273 else if (dc->debug.visual_confirm == VISUAL_CONFIRM_VABC) 1274 get_vabc_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color)); 1275 } 1276 } 1277 } 1278 1279 void dc_get_visual_confirm_for_stream( 1280 struct dc *dc, 1281 struct dc_stream_state *stream_state, 1282 struct tg_color *color) 1283 { 1284 struct dc_stream_status *stream_status = dc_stream_get_status(stream_state); 1285 struct pipe_ctx *pipe_ctx; 1286 int i; 1287 struct dc_plane_state *plane_state = NULL; 1288 1289 if (!stream_status) 1290 return; 1291 1292 switch (dc->debug.visual_confirm) { 1293 case VISUAL_CONFIRM_DISABLE: 1294 return; 1295 case VISUAL_CONFIRM_PSR: 1296 case VISUAL_CONFIRM_FAMS: 1297 pipe_ctx = dc_stream_get_pipe_ctx(stream_state); 1298 if (!pipe_ctx) 1299 return; 1300 dc_dmub_srv_get_visual_confirm_color_cmd(dc, pipe_ctx); 1301 memcpy(color, &dc->ctx->dmub_srv->dmub->visual_confirm_color, sizeof(struct tg_color)); 1302 return; 1303 1304 default: 1305 /* find plane with highest layer_index */ 1306 for (i = 0; i < stream_status->plane_count; i++) { 1307 if (stream_status->plane_states[i]->visible) 1308 plane_state = stream_status->plane_states[i]; 1309 } 1310 if (!plane_state) 1311 return; 1312 /* find pipe that contains plane with highest layer index */ 1313 for (i = 0; i < MAX_PIPES; i++) { 1314 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 1315 1316 if (pipe->plane_state == plane_state) { 1317 memcpy(color, &pipe->visual_confirm_color, sizeof(struct tg_color)); 1318 return; 1319 } 1320 } 1321 } 1322 } 1323 1324 static void disable_dangling_plane(struct dc *dc, struct dc_state *context) 1325 { 1326 int i, j; 1327 struct dc_state *dangling_context = dc_state_create_current_copy(dc); 1328 struct dc_state *current_ctx; 1329 struct pipe_ctx *pipe; 1330 struct timing_generator *tg; 1331 1332 if (dangling_context == NULL) 1333 return; 1334 1335 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1336 struct dc_stream_state *old_stream = 1337 dc->current_state->res_ctx.pipe_ctx[i].stream; 1338 bool should_disable = true; 1339 bool pipe_split_change = false; 1340 1341 if ((context->res_ctx.pipe_ctx[i].top_pipe) && 1342 (dc->current_state->res_ctx.pipe_ctx[i].top_pipe)) 1343 pipe_split_change = context->res_ctx.pipe_ctx[i].top_pipe->pipe_idx != 1344 dc->current_state->res_ctx.pipe_ctx[i].top_pipe->pipe_idx; 1345 else 1346 pipe_split_change = context->res_ctx.pipe_ctx[i].top_pipe != 1347 dc->current_state->res_ctx.pipe_ctx[i].top_pipe; 1348 1349 for (j = 0; j < context->stream_count; j++) { 1350 if (old_stream == context->streams[j]) { 1351 should_disable = false; 1352 break; 1353 } 1354 } 1355 if (!should_disable && pipe_split_change && 1356 dc->current_state->stream_count != context->stream_count) 1357 should_disable = true; 1358 1359 if (old_stream && !dc->current_state->res_ctx.pipe_ctx[i].top_pipe && 1360 !dc->current_state->res_ctx.pipe_ctx[i].prev_odm_pipe) { 1361 struct pipe_ctx *old_pipe, *new_pipe; 1362 1363 old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 1364 new_pipe = &context->res_ctx.pipe_ctx[i]; 1365 1366 if (old_pipe->plane_state && !new_pipe->plane_state) 1367 should_disable = true; 1368 } 1369 1370 if (should_disable && old_stream) { 1371 bool is_phantom = dc_state_get_stream_subvp_type(dc->current_state, old_stream) == SUBVP_PHANTOM; 1372 pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 1373 tg = pipe->stream_res.tg; 1374 /* When disabling plane for a phantom pipe, we must turn on the 1375 * phantom OTG so the disable programming gets the double buffer 1376 * update. Otherwise the pipe will be left in a partially disabled 1377 * state that can result in underflow or hang when enabling it 1378 * again for different use. 1379 */ 1380 if (is_phantom) { 1381 if (tg->funcs->enable_crtc) { 1382 if (dc->hwseq->funcs.blank_pixel_data) 1383 dc->hwseq->funcs.blank_pixel_data(dc, pipe, true); 1384 tg->funcs->enable_crtc(tg); 1385 } 1386 } 1387 1388 if (is_phantom) 1389 dc_state_rem_all_phantom_planes_for_stream(dc, old_stream, dangling_context, true); 1390 else 1391 dc_state_rem_all_planes_for_stream(dc, old_stream, dangling_context); 1392 disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context); 1393 1394 if (pipe->stream && pipe->plane_state) { 1395 if (!dc->debug.using_dml2) 1396 set_p_state_switch_method(dc, context, pipe); 1397 dc_update_visual_confirm_color(dc, context, pipe); 1398 } 1399 1400 if (dc->hwss.apply_ctx_for_surface) { 1401 apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, true); 1402 dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context); 1403 apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, false); 1404 dc->hwss.post_unlock_program_front_end(dc, dangling_context); 1405 } 1406 1407 if (dc->res_pool->funcs->prepare_mcache_programming) 1408 dc->res_pool->funcs->prepare_mcache_programming(dc, dangling_context); 1409 if (dc->hwss.program_front_end_for_ctx) { 1410 dc->hwss.interdependent_update_lock(dc, dc->current_state, true); 1411 dc->hwss.program_front_end_for_ctx(dc, dangling_context); 1412 dc->hwss.interdependent_update_lock(dc, dc->current_state, false); 1413 dc->hwss.post_unlock_program_front_end(dc, dangling_context); 1414 } 1415 /* We need to put the phantom OTG back into it's default (disabled) state or we 1416 * can get corruption when transition from one SubVP config to a different one. 1417 * The OTG is set to disable on falling edge of VUPDATE so the plane disable 1418 * will still get it's double buffer update. 1419 */ 1420 if (is_phantom) { 1421 if (tg->funcs->disable_phantom_crtc) 1422 tg->funcs->disable_phantom_crtc(tg); 1423 } 1424 } 1425 } 1426 1427 current_ctx = dc->current_state; 1428 dc->current_state = dangling_context; 1429 dc_state_release(current_ctx); 1430 } 1431 1432 static void disable_vbios_mode_if_required( 1433 struct dc *dc, 1434 struct dc_state *context) 1435 { 1436 unsigned int i, j; 1437 1438 /* check if timing_changed, disable stream*/ 1439 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1440 struct dc_stream_state *stream = NULL; 1441 struct dc_link *link = NULL; 1442 struct pipe_ctx *pipe = NULL; 1443 1444 pipe = &context->res_ctx.pipe_ctx[i]; 1445 stream = pipe->stream; 1446 if (stream == NULL) 1447 continue; 1448 1449 if (stream->apply_seamless_boot_optimization) 1450 continue; 1451 1452 // only looking for first odm pipe 1453 if (pipe->prev_odm_pipe) 1454 continue; 1455 1456 if (stream->link->local_sink && 1457 stream->link->local_sink->sink_signal == SIGNAL_TYPE_EDP) { 1458 link = stream->link; 1459 } 1460 1461 if (link != NULL && link->link_enc->funcs->is_dig_enabled(link->link_enc)) { 1462 unsigned int enc_inst, tg_inst = 0; 1463 unsigned int pix_clk_100hz = 0; 1464 1465 enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc); 1466 if (enc_inst != ENGINE_ID_UNKNOWN) { 1467 for (j = 0; j < dc->res_pool->stream_enc_count; j++) { 1468 if (dc->res_pool->stream_enc[j]->id == enc_inst) { 1469 tg_inst = dc->res_pool->stream_enc[j]->funcs->dig_source_otg( 1470 dc->res_pool->stream_enc[j]); 1471 break; 1472 } 1473 } 1474 1475 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz( 1476 dc->res_pool->dp_clock_source, 1477 tg_inst, &pix_clk_100hz); 1478 1479 if (link->link_status.link_active) { 1480 uint32_t requested_pix_clk_100hz = 1481 pipe->stream_res.pix_clk_params.requested_pix_clk_100hz; 1482 1483 if (pix_clk_100hz != requested_pix_clk_100hz) { 1484 dc->link_srv->set_dpms_off(pipe); 1485 pipe->stream->dpms_off = false; 1486 } 1487 } 1488 } 1489 } 1490 } 1491 } 1492 1493 /* Public functions */ 1494 1495 struct dc *dc_create(const struct dc_init_data *init_params) 1496 { 1497 struct dc *dc = kzalloc(sizeof(*dc), GFP_KERNEL); 1498 unsigned int full_pipe_count; 1499 1500 if (!dc) 1501 return NULL; 1502 1503 if (init_params->dce_environment == DCE_ENV_VIRTUAL_HW) { 1504 dc->caps.linear_pitch_alignment = 64; 1505 if (!dc_construct_ctx(dc, init_params)) 1506 goto destruct_dc; 1507 } else { 1508 if (!dc_construct(dc, init_params)) 1509 goto destruct_dc; 1510 1511 full_pipe_count = dc->res_pool->pipe_count; 1512 if (dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE) 1513 full_pipe_count--; 1514 dc->caps.max_streams = min( 1515 full_pipe_count, 1516 dc->res_pool->stream_enc_count); 1517 1518 dc->caps.max_links = dc->link_count; 1519 dc->caps.max_audios = dc->res_pool->audio_count; 1520 dc->caps.linear_pitch_alignment = 64; 1521 1522 dc->caps.max_dp_protocol_version = DP_VERSION_1_4; 1523 1524 dc->caps.max_otg_num = dc->res_pool->res_cap->num_timing_generator; 1525 1526 if (dc->res_pool->dmcu != NULL) 1527 dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version; 1528 } 1529 1530 dc->dcn_reg_offsets = init_params->dcn_reg_offsets; 1531 dc->nbio_reg_offsets = init_params->nbio_reg_offsets; 1532 dc->clk_reg_offsets = init_params->clk_reg_offsets; 1533 1534 /* Populate versioning information */ 1535 dc->versions.dc_ver = DC_VER; 1536 1537 dc->build_id = DC_BUILD_ID; 1538 1539 DC_LOG_DC("Display Core initialized\n"); 1540 1541 return dc; 1542 1543 destruct_dc: 1544 dc_destruct(dc); 1545 kfree(dc); 1546 return NULL; 1547 } 1548 1549 static void detect_edp_presence(struct dc *dc) 1550 { 1551 struct dc_link *edp_links[MAX_NUM_EDP]; 1552 struct dc_link *edp_link = NULL; 1553 enum dc_connection_type type; 1554 int i; 1555 int edp_num; 1556 1557 dc_get_edp_links(dc, edp_links, &edp_num); 1558 if (!edp_num) 1559 return; 1560 1561 for (i = 0; i < edp_num; i++) { 1562 edp_link = edp_links[i]; 1563 if (dc->config.edp_not_connected) { 1564 edp_link->edp_sink_present = false; 1565 } else { 1566 dc_link_detect_connection_type(edp_link, &type); 1567 edp_link->edp_sink_present = (type != dc_connection_none); 1568 } 1569 } 1570 } 1571 1572 void dc_hardware_init(struct dc *dc) 1573 { 1574 1575 detect_edp_presence(dc); 1576 if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW) 1577 dc->hwss.init_hw(dc); 1578 dc_dmub_srv_notify_fw_dc_power_state(dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D0); 1579 } 1580 1581 void dc_init_callbacks(struct dc *dc, 1582 const struct dc_callback_init *init_params) 1583 { 1584 dc->ctx->cp_psp = init_params->cp_psp; 1585 } 1586 1587 void dc_deinit_callbacks(struct dc *dc) 1588 { 1589 memset(&dc->ctx->cp_psp, 0, sizeof(dc->ctx->cp_psp)); 1590 } 1591 1592 void dc_destroy(struct dc **dc) 1593 { 1594 dc_destruct(*dc); 1595 kfree(*dc); 1596 *dc = NULL; 1597 } 1598 1599 static void enable_timing_multisync( 1600 struct dc *dc, 1601 struct dc_state *ctx) 1602 { 1603 int i, multisync_count = 0; 1604 int pipe_count = dc->res_pool->pipe_count; 1605 struct pipe_ctx *multisync_pipes[MAX_PIPES] = { NULL }; 1606 1607 for (i = 0; i < pipe_count; i++) { 1608 if (!ctx->res_ctx.pipe_ctx[i].stream || 1609 !ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.enabled) 1610 continue; 1611 if (ctx->res_ctx.pipe_ctx[i].stream == ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.event_source) 1612 continue; 1613 multisync_pipes[multisync_count] = &ctx->res_ctx.pipe_ctx[i]; 1614 multisync_count++; 1615 } 1616 1617 if (multisync_count > 0) { 1618 dc->hwss.enable_per_frame_crtc_position_reset( 1619 dc, multisync_count, multisync_pipes); 1620 } 1621 } 1622 1623 static void program_timing_sync( 1624 struct dc *dc, 1625 struct dc_state *ctx) 1626 { 1627 int i, j, k; 1628 int group_index = 0; 1629 int num_group = 0; 1630 int pipe_count = dc->res_pool->pipe_count; 1631 struct pipe_ctx *unsynced_pipes[MAX_PIPES] = { NULL }; 1632 1633 for (i = 0; i < pipe_count; i++) { 1634 if (!ctx->res_ctx.pipe_ctx[i].stream 1635 || ctx->res_ctx.pipe_ctx[i].top_pipe 1636 || ctx->res_ctx.pipe_ctx[i].prev_odm_pipe) 1637 continue; 1638 1639 unsynced_pipes[i] = &ctx->res_ctx.pipe_ctx[i]; 1640 } 1641 1642 for (i = 0; i < pipe_count; i++) { 1643 int group_size = 1; 1644 enum timing_synchronization_type sync_type = NOT_SYNCHRONIZABLE; 1645 struct pipe_ctx *pipe_set[MAX_PIPES]; 1646 1647 if (!unsynced_pipes[i]) 1648 continue; 1649 1650 pipe_set[0] = unsynced_pipes[i]; 1651 unsynced_pipes[i] = NULL; 1652 1653 /* Add tg to the set, search rest of the tg's for ones with 1654 * same timing, add all tgs with same timing to the group 1655 */ 1656 for (j = i + 1; j < pipe_count; j++) { 1657 if (!unsynced_pipes[j]) 1658 continue; 1659 if (sync_type != TIMING_SYNCHRONIZABLE && 1660 dc->hwss.enable_vblanks_synchronization && 1661 unsynced_pipes[j]->stream_res.tg->funcs->align_vblanks && 1662 resource_are_vblanks_synchronizable( 1663 unsynced_pipes[j]->stream, 1664 pipe_set[0]->stream)) { 1665 sync_type = VBLANK_SYNCHRONIZABLE; 1666 pipe_set[group_size] = unsynced_pipes[j]; 1667 unsynced_pipes[j] = NULL; 1668 group_size++; 1669 } else 1670 if (sync_type != VBLANK_SYNCHRONIZABLE && 1671 resource_are_streams_timing_synchronizable( 1672 unsynced_pipes[j]->stream, 1673 pipe_set[0]->stream)) { 1674 sync_type = TIMING_SYNCHRONIZABLE; 1675 pipe_set[group_size] = unsynced_pipes[j]; 1676 unsynced_pipes[j] = NULL; 1677 group_size++; 1678 } 1679 } 1680 1681 /* set first unblanked pipe as master */ 1682 for (j = 0; j < group_size; j++) { 1683 bool is_blanked; 1684 1685 if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked) 1686 is_blanked = 1687 pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp); 1688 else 1689 is_blanked = 1690 pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg); 1691 if (!is_blanked) { 1692 if (j == 0) 1693 break; 1694 1695 swap(pipe_set[0], pipe_set[j]); 1696 break; 1697 } 1698 } 1699 1700 for (k = 0; k < group_size; k++) { 1701 struct dc_stream_status *status = dc_state_get_stream_status(ctx, pipe_set[k]->stream); 1702 1703 if (!status) 1704 continue; 1705 1706 status->timing_sync_info.group_id = num_group; 1707 status->timing_sync_info.group_size = group_size; 1708 if (k == 0) 1709 status->timing_sync_info.master = true; 1710 else 1711 status->timing_sync_info.master = false; 1712 1713 } 1714 1715 /* remove any other unblanked pipes as they have already been synced */ 1716 if (dc->config.use_pipe_ctx_sync_logic) { 1717 /* check pipe's syncd to decide which pipe to be removed */ 1718 for (j = 1; j < group_size; j++) { 1719 if (pipe_set[j]->pipe_idx_syncd == pipe_set[0]->pipe_idx_syncd) { 1720 group_size--; 1721 pipe_set[j] = pipe_set[group_size]; 1722 j--; 1723 } else 1724 /* link slave pipe's syncd with master pipe */ 1725 pipe_set[j]->pipe_idx_syncd = pipe_set[0]->pipe_idx_syncd; 1726 } 1727 } else { 1728 /* remove any other pipes by checking valid plane */ 1729 for (j = j + 1; j < group_size; j++) { 1730 bool is_blanked; 1731 1732 if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked) 1733 is_blanked = 1734 pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp); 1735 else 1736 is_blanked = 1737 pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg); 1738 if (!is_blanked) { 1739 group_size--; 1740 pipe_set[j] = pipe_set[group_size]; 1741 j--; 1742 } 1743 } 1744 } 1745 1746 if (group_size > 1) { 1747 if (sync_type == TIMING_SYNCHRONIZABLE) { 1748 dc->hwss.enable_timing_synchronization( 1749 dc, ctx, group_index, group_size, pipe_set); 1750 } else 1751 if (sync_type == VBLANK_SYNCHRONIZABLE) { 1752 dc->hwss.enable_vblanks_synchronization( 1753 dc, group_index, group_size, pipe_set); 1754 } 1755 group_index++; 1756 } 1757 num_group++; 1758 } 1759 } 1760 1761 static bool streams_changed(struct dc *dc, 1762 struct dc_stream_state *streams[], 1763 uint8_t stream_count) 1764 { 1765 uint8_t i; 1766 1767 if (stream_count != dc->current_state->stream_count) 1768 return true; 1769 1770 for (i = 0; i < dc->current_state->stream_count; i++) { 1771 if (dc->current_state->streams[i] != streams[i]) 1772 return true; 1773 if (!streams[i]->link->link_state_valid) 1774 return true; 1775 } 1776 1777 return false; 1778 } 1779 1780 bool dc_validate_boot_timing(const struct dc *dc, 1781 const struct dc_sink *sink, 1782 struct dc_crtc_timing *crtc_timing) 1783 { 1784 struct timing_generator *tg; 1785 struct stream_encoder *se = NULL; 1786 1787 struct dc_crtc_timing hw_crtc_timing = {0}; 1788 1789 struct dc_link *link = sink->link; 1790 unsigned int i, enc_inst, tg_inst = 0; 1791 1792 /* Support seamless boot on EDP displays only */ 1793 if (sink->sink_signal != SIGNAL_TYPE_EDP) { 1794 return false; 1795 } 1796 1797 if (dc->debug.force_odm_combine) { 1798 DC_LOG_DEBUG("boot timing validation failed due to force_odm_combine\n"); 1799 return false; 1800 } 1801 1802 /* Check for enabled DIG to identify enabled display */ 1803 if (!link->link_enc->funcs->is_dig_enabled(link->link_enc)) { 1804 DC_LOG_DEBUG("boot timing validation failed due to disabled DIG\n"); 1805 return false; 1806 } 1807 1808 enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc); 1809 1810 if (enc_inst == ENGINE_ID_UNKNOWN) { 1811 DC_LOG_DEBUG("boot timing validation failed due to unknown DIG engine ID\n"); 1812 return false; 1813 } 1814 1815 for (i = 0; i < dc->res_pool->stream_enc_count; i++) { 1816 if (dc->res_pool->stream_enc[i]->id == enc_inst) { 1817 1818 se = dc->res_pool->stream_enc[i]; 1819 1820 tg_inst = dc->res_pool->stream_enc[i]->funcs->dig_source_otg( 1821 dc->res_pool->stream_enc[i]); 1822 break; 1823 } 1824 } 1825 1826 // tg_inst not found 1827 if (i == dc->res_pool->stream_enc_count) { 1828 DC_LOG_DEBUG("boot timing validation failed due to timing generator instance not found\n"); 1829 return false; 1830 } 1831 1832 if (tg_inst >= dc->res_pool->timing_generator_count) { 1833 DC_LOG_DEBUG("boot timing validation failed due to invalid timing generator count\n"); 1834 return false; 1835 } 1836 1837 if (tg_inst != link->link_enc->preferred_engine) { 1838 DC_LOG_DEBUG("boot timing validation failed due to non-preferred timing generator\n"); 1839 return false; 1840 } 1841 1842 tg = dc->res_pool->timing_generators[tg_inst]; 1843 1844 if (!tg->funcs->get_hw_timing) { 1845 DC_LOG_DEBUG("boot timing validation failed due to missing get_hw_timing callback\n"); 1846 return false; 1847 } 1848 1849 if (!tg->funcs->get_hw_timing(tg, &hw_crtc_timing)) { 1850 DC_LOG_DEBUG("boot timing validation failed due to failed get_hw_timing return\n"); 1851 return false; 1852 } 1853 1854 if (crtc_timing->h_total != hw_crtc_timing.h_total) { 1855 DC_LOG_DEBUG("boot timing validation failed due to h_total mismatch\n"); 1856 return false; 1857 } 1858 1859 if (crtc_timing->h_border_left != hw_crtc_timing.h_border_left) { 1860 DC_LOG_DEBUG("boot timing validation failed due to h_border_left mismatch\n"); 1861 return false; 1862 } 1863 1864 if (crtc_timing->h_addressable != hw_crtc_timing.h_addressable) { 1865 DC_LOG_DEBUG("boot timing validation failed due to h_addressable mismatch\n"); 1866 return false; 1867 } 1868 1869 if (crtc_timing->h_border_right != hw_crtc_timing.h_border_right) { 1870 DC_LOG_DEBUG("boot timing validation failed due to h_border_right mismatch\n"); 1871 return false; 1872 } 1873 1874 if (crtc_timing->h_front_porch != hw_crtc_timing.h_front_porch) { 1875 DC_LOG_DEBUG("boot timing validation failed due to h_front_porch mismatch\n"); 1876 return false; 1877 } 1878 1879 if (crtc_timing->h_sync_width != hw_crtc_timing.h_sync_width) { 1880 DC_LOG_DEBUG("boot timing validation failed due to h_sync_width mismatch\n"); 1881 return false; 1882 } 1883 1884 if (crtc_timing->v_total != hw_crtc_timing.v_total) { 1885 DC_LOG_DEBUG("boot timing validation failed due to v_total mismatch\n"); 1886 return false; 1887 } 1888 1889 if (crtc_timing->v_border_top != hw_crtc_timing.v_border_top) { 1890 DC_LOG_DEBUG("boot timing validation failed due to v_border_top mismatch\n"); 1891 return false; 1892 } 1893 1894 if (crtc_timing->v_addressable != hw_crtc_timing.v_addressable) { 1895 DC_LOG_DEBUG("boot timing validation failed due to v_addressable mismatch\n"); 1896 return false; 1897 } 1898 1899 if (crtc_timing->v_border_bottom != hw_crtc_timing.v_border_bottom) { 1900 DC_LOG_DEBUG("boot timing validation failed due to v_border_bottom mismatch\n"); 1901 return false; 1902 } 1903 1904 if (crtc_timing->v_front_porch != hw_crtc_timing.v_front_porch) { 1905 DC_LOG_DEBUG("boot timing validation failed due to v_front_porch mismatch\n"); 1906 return false; 1907 } 1908 1909 if (crtc_timing->v_sync_width != hw_crtc_timing.v_sync_width) { 1910 DC_LOG_DEBUG("boot timing validation failed due to v_sync_width mismatch\n"); 1911 return false; 1912 } 1913 1914 /* block DSC for now, as VBIOS does not currently support DSC timings */ 1915 if (crtc_timing->flags.DSC) { 1916 DC_LOG_DEBUG("boot timing validation failed due to DSC\n"); 1917 return false; 1918 } 1919 1920 if (dc_is_dp_signal(link->connector_signal)) { 1921 unsigned int pix_clk_100hz = 0; 1922 uint32_t numOdmPipes = 1; 1923 uint32_t id_src[4] = {0}; 1924 1925 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz( 1926 dc->res_pool->dp_clock_source, 1927 tg_inst, &pix_clk_100hz); 1928 1929 if (tg->funcs->get_optc_source) 1930 tg->funcs->get_optc_source(tg, 1931 &numOdmPipes, &id_src[0], &id_src[1]); 1932 1933 if (numOdmPipes == 2) { 1934 pix_clk_100hz *= 2; 1935 } else if (numOdmPipes == 4) { 1936 pix_clk_100hz *= 4; 1937 } else if (se && se->funcs->get_pixels_per_cycle) { 1938 uint32_t pixels_per_cycle = se->funcs->get_pixels_per_cycle(se); 1939 1940 if (pixels_per_cycle != 1 && !dc->debug.enable_dp_dig_pixel_rate_div_policy) { 1941 DC_LOG_DEBUG("boot timing validation failed due to pixels_per_cycle\n"); 1942 return false; 1943 } 1944 1945 pix_clk_100hz *= pixels_per_cycle; 1946 } 1947 1948 // Note: In rare cases, HW pixclk may differ from crtc's pixclk 1949 // slightly due to rounding issues in 10 kHz units. 1950 if (crtc_timing->pix_clk_100hz != pix_clk_100hz) { 1951 DC_LOG_DEBUG("boot timing validation failed due to pix_clk_100hz mismatch\n"); 1952 return false; 1953 } 1954 1955 if (!se || !se->funcs->dp_get_pixel_format) { 1956 DC_LOG_DEBUG("boot timing validation failed due to missing dp_get_pixel_format\n"); 1957 return false; 1958 } 1959 1960 if (!se->funcs->dp_get_pixel_format( 1961 se, 1962 &hw_crtc_timing.pixel_encoding, 1963 &hw_crtc_timing.display_color_depth)) { 1964 DC_LOG_DEBUG("boot timing validation failed due to dp_get_pixel_format failure\n"); 1965 return false; 1966 } 1967 1968 if (hw_crtc_timing.display_color_depth != crtc_timing->display_color_depth) { 1969 DC_LOG_DEBUG("boot timing validation failed due to display_color_depth mismatch\n"); 1970 return false; 1971 } 1972 1973 if (hw_crtc_timing.pixel_encoding != crtc_timing->pixel_encoding) { 1974 DC_LOG_DEBUG("boot timing validation failed due to pixel_encoding mismatch\n"); 1975 return false; 1976 } 1977 } 1978 1979 1980 if (link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) { 1981 DC_LOG_DEBUG("boot timing validation failed due to VSC SDP colorimetry\n"); 1982 return false; 1983 } 1984 1985 if (link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED) { 1986 DC_LOG_DEBUG("boot timing validation failed due to DP 128b/132b\n"); 1987 return false; 1988 } 1989 1990 if (dc->link_srv->edp_is_ilr_optimization_required(link, crtc_timing)) { 1991 DC_LOG_EVENT_LINK_TRAINING("Seamless boot disabled to optimize eDP link rate\n"); 1992 return false; 1993 } 1994 1995 return true; 1996 } 1997 1998 static inline bool should_update_pipe_for_stream( 1999 struct dc_state *context, 2000 struct pipe_ctx *pipe_ctx, 2001 struct dc_stream_state *stream) 2002 { 2003 return (pipe_ctx->stream && pipe_ctx->stream == stream); 2004 } 2005 2006 static inline bool should_update_pipe_for_plane( 2007 struct dc_state *context, 2008 struct pipe_ctx *pipe_ctx, 2009 struct dc_plane_state *plane_state) 2010 { 2011 return (pipe_ctx->plane_state == plane_state); 2012 } 2013 2014 void dc_enable_stereo( 2015 struct dc *dc, 2016 struct dc_state *context, 2017 struct dc_stream_state *streams[], 2018 uint8_t stream_count) 2019 { 2020 int i, j; 2021 struct pipe_ctx *pipe; 2022 2023 dc_exit_ips_for_hw_access(dc); 2024 2025 for (i = 0; i < MAX_PIPES; i++) { 2026 if (context != NULL) { 2027 pipe = &context->res_ctx.pipe_ctx[i]; 2028 } else { 2029 context = dc->current_state; 2030 pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 2031 } 2032 2033 for (j = 0; pipe && j < stream_count; j++) { 2034 if (should_update_pipe_for_stream(context, pipe, streams[j]) && 2035 dc->hwss.setup_stereo) 2036 dc->hwss.setup_stereo(pipe, dc); 2037 } 2038 } 2039 } 2040 2041 void dc_trigger_sync(struct dc *dc, struct dc_state *context) 2042 { 2043 if (context->stream_count > 1 && !dc->debug.disable_timing_sync) { 2044 dc_exit_ips_for_hw_access(dc); 2045 2046 enable_timing_multisync(dc, context); 2047 program_timing_sync(dc, context); 2048 } 2049 } 2050 2051 static uint8_t get_stream_mask(struct dc *dc, struct dc_state *context) 2052 { 2053 int i; 2054 unsigned int stream_mask = 0; 2055 2056 for (i = 0; i < dc->res_pool->pipe_count; i++) { 2057 if (context->res_ctx.pipe_ctx[i].stream) 2058 stream_mask |= 1 << i; 2059 } 2060 2061 return stream_mask; 2062 } 2063 2064 void dc_z10_restore(const struct dc *dc) 2065 { 2066 if (dc->hwss.z10_restore) 2067 dc->hwss.z10_restore(dc); 2068 } 2069 2070 void dc_z10_save_init(struct dc *dc) 2071 { 2072 if (dc->hwss.z10_save_init) 2073 dc->hwss.z10_save_init(dc); 2074 } 2075 2076 /* Set a pipe unlock order based on the change in DET allocation and stores it in dc scratch memory 2077 * Prevents over allocation of DET during unlock process 2078 * e.g. 2 pipe config with different streams with a max of 20 DET segments 2079 * Before: After: 2080 * - Pipe0: 10 DET segments - Pipe0: 12 DET segments 2081 * - Pipe1: 10 DET segments - Pipe1: 8 DET segments 2082 * If Pipe0 gets updated first, 22 DET segments will be allocated 2083 */ 2084 static void determine_pipe_unlock_order(struct dc *dc, struct dc_state *context) 2085 { 2086 unsigned int i = 0; 2087 struct pipe_ctx *pipe = NULL; 2088 struct timing_generator *tg = NULL; 2089 2090 if (!dc->config.set_pipe_unlock_order) 2091 return; 2092 2093 memset(dc->scratch.pipes_to_unlock_first, 0, sizeof(dc->scratch.pipes_to_unlock_first)); 2094 for (i = 0; i < dc->res_pool->pipe_count; i++) { 2095 pipe = &context->res_ctx.pipe_ctx[i]; 2096 tg = pipe->stream_res.tg; 2097 2098 if (!resource_is_pipe_type(pipe, OTG_MASTER) || 2099 !tg->funcs->is_tg_enabled(tg) || 2100 dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) { 2101 continue; 2102 } 2103 2104 if (resource_calculate_det_for_stream(context, pipe) < 2105 resource_calculate_det_for_stream(dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i])) { 2106 dc->scratch.pipes_to_unlock_first[i] = true; 2107 } 2108 } 2109 } 2110 2111 /** 2112 * dc_commit_state_no_check - Apply context to the hardware 2113 * 2114 * @dc: DC object with the current status to be updated 2115 * @context: New state that will become the current status at the end of this function 2116 * 2117 * Applies given context to the hardware and copy it into current context. 2118 * It's up to the user to release the src context afterwards. 2119 * 2120 * Return: an enum dc_status result code for the operation 2121 */ 2122 static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *context) 2123 { 2124 struct dc_bios *dcb = dc->ctx->dc_bios; 2125 enum dc_status result = DC_ERROR_UNEXPECTED; 2126 struct pipe_ctx *pipe; 2127 int i, k, l; 2128 struct dc_stream_state *dc_streams[MAX_STREAMS] = {0}; 2129 struct dc_state *old_state; 2130 bool subvp_prev_use = false; 2131 2132 dc_z10_restore(dc); 2133 dc_allow_idle_optimizations(dc, false); 2134 2135 for (i = 0; i < dc->res_pool->pipe_count; i++) { 2136 struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 2137 2138 /* Check old context for SubVP */ 2139 subvp_prev_use |= (dc_state_get_pipe_subvp_type(dc->current_state, old_pipe) == SUBVP_PHANTOM); 2140 if (subvp_prev_use) 2141 break; 2142 } 2143 2144 for (i = 0; i < context->stream_count; i++) 2145 dc_streams[i] = context->streams[i]; 2146 2147 if (!dcb->funcs->is_accelerated_mode(dcb)) { 2148 disable_vbios_mode_if_required(dc, context); 2149 dc->hwss.enable_accelerated_mode(dc, context); 2150 } else if (get_seamless_boot_stream_count(dc->current_state) > 0) { 2151 /* If the previous Stream still retains the apply seamless boot flag, 2152 * it means the OS has not actually performed a flip yet. 2153 * At this point, if we receive dc_commit_streams again, we should 2154 * once more check whether the actual HW timing matches what the OS 2155 * has provided 2156 */ 2157 disable_vbios_mode_if_required(dc, context); 2158 } 2159 2160 if (dc->hwseq->funcs.wait_for_pipe_update_if_needed) { 2161 for (i = 0; i < dc->res_pool->pipe_count; i++) { 2162 pipe = &context->res_ctx.pipe_ctx[i]; 2163 //Only delay otg master for a given config 2164 if (resource_is_pipe_type(pipe, OTG_MASTER)) { 2165 //dc_commit_state_no_check is always a full update 2166 dc->hwseq->funcs.wait_for_pipe_update_if_needed(dc, pipe, false); 2167 break; 2168 } 2169 } 2170 } 2171 2172 if (context->stream_count > get_seamless_boot_stream_count(context) || 2173 context->stream_count == 0) 2174 dc->hwss.prepare_bandwidth(dc, context); 2175 2176 /* When SubVP is active, all HW programming must be done while 2177 * SubVP lock is acquired 2178 */ 2179 if (dc->hwss.subvp_pipe_control_lock) 2180 dc->hwss.subvp_pipe_control_lock(dc, context, true, true, NULL, subvp_prev_use); 2181 if (dc->hwss.dmub_hw_control_lock) 2182 dc->hwss.dmub_hw_control_lock(dc, context, true); 2183 2184 if (dc->hwss.update_dsc_pg) 2185 dc->hwss.update_dsc_pg(dc, context, false); 2186 2187 disable_dangling_plane(dc, context); 2188 /* re-program planes for existing stream, in case we need to 2189 * free up plane resource for later use 2190 */ 2191 if (dc->hwss.apply_ctx_for_surface) { 2192 for (i = 0; i < context->stream_count; i++) { 2193 if (context->streams[i]->mode_changed) 2194 continue; 2195 apply_ctx_interdependent_lock(dc, context, context->streams[i], true); 2196 dc->hwss.apply_ctx_for_surface( 2197 dc, context->streams[i], 2198 context->stream_status[i].plane_count, 2199 context); /* use new pipe config in new context */ 2200 apply_ctx_interdependent_lock(dc, context, context->streams[i], false); 2201 dc->hwss.post_unlock_program_front_end(dc, context); 2202 } 2203 } 2204 2205 /* Program hardware */ 2206 for (i = 0; i < dc->res_pool->pipe_count; i++) { 2207 pipe = &context->res_ctx.pipe_ctx[i]; 2208 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe); 2209 } 2210 2211 for (i = 0; i < dc->current_state->stream_count; i++) 2212 dc_dmub_srv_control_cursor_offload(dc, dc->current_state, dc->current_state->streams[i], false); 2213 2214 result = dc->hwss.apply_ctx_to_hw(dc, context); 2215 2216 for (i = 0; i < context->stream_count; i++) 2217 dc_dmub_srv_control_cursor_offload(dc, context, context->streams[i], true); 2218 2219 if (result != DC_OK) { 2220 /* Application of dc_state to hardware stopped. */ 2221 dc->current_state->res_ctx.link_enc_cfg_ctx.mode = LINK_ENC_CFG_STEADY; 2222 return result; 2223 } 2224 2225 dc_trigger_sync(dc, context); 2226 2227 /* Full update should unconditionally be triggered when dc_commit_state_no_check is called */ 2228 for (i = 0; i < context->stream_count; i++) { 2229 uint32_t prev_dsc_changed = context->streams[i]->update_flags.bits.dsc_changed; 2230 2231 context->streams[i]->update_flags.raw = 0xFFFFFFFF; 2232 context->streams[i]->update_flags.bits.dsc_changed = prev_dsc_changed; 2233 } 2234 2235 determine_pipe_unlock_order(dc, context); 2236 /* Program all planes within new context*/ 2237 if (dc->res_pool->funcs->prepare_mcache_programming) 2238 dc->res_pool->funcs->prepare_mcache_programming(dc, context); 2239 if (dc->hwss.program_front_end_for_ctx) { 2240 dc->hwss.interdependent_update_lock(dc, context, true); 2241 dc->hwss.program_front_end_for_ctx(dc, context); 2242 2243 if (dc->hwseq->funcs.set_wait_for_update_needed_for_pipe) { 2244 for (i = 0; i < dc->res_pool->pipe_count; i++) { 2245 pipe = &context->res_ctx.pipe_ctx[i]; 2246 dc->hwseq->funcs.set_wait_for_update_needed_for_pipe(dc, pipe); 2247 } 2248 } 2249 2250 dc->hwss.interdependent_update_lock(dc, context, false); 2251 dc->hwss.post_unlock_program_front_end(dc, context); 2252 } 2253 2254 if (dc->hwss.commit_subvp_config) 2255 dc->hwss.commit_subvp_config(dc, context); 2256 if (dc->hwss.subvp_pipe_control_lock) 2257 dc->hwss.subvp_pipe_control_lock(dc, context, false, true, NULL, subvp_prev_use); 2258 if (dc->hwss.dmub_hw_control_lock) 2259 dc->hwss.dmub_hw_control_lock(dc, context, false); 2260 2261 for (i = 0; i < context->stream_count; i++) { 2262 const struct dc_link *link = context->streams[i]->link; 2263 2264 if (!context->streams[i]->mode_changed) 2265 continue; 2266 2267 if (dc->hwss.apply_ctx_for_surface) { 2268 apply_ctx_interdependent_lock(dc, context, context->streams[i], true); 2269 dc->hwss.apply_ctx_for_surface( 2270 dc, context->streams[i], 2271 context->stream_status[i].plane_count, 2272 context); 2273 apply_ctx_interdependent_lock(dc, context, context->streams[i], false); 2274 dc->hwss.post_unlock_program_front_end(dc, context); 2275 } 2276 2277 /* 2278 * enable stereo 2279 * TODO rework dc_enable_stereo call to work with validation sets? 2280 */ 2281 for (k = 0; k < MAX_PIPES; k++) { 2282 pipe = &context->res_ctx.pipe_ctx[k]; 2283 2284 for (l = 0 ; pipe && l < context->stream_count; l++) { 2285 if (context->streams[l] && 2286 context->streams[l] == pipe->stream && 2287 dc->hwss.setup_stereo) 2288 dc->hwss.setup_stereo(pipe, dc); 2289 } 2290 } 2291 2292 CONN_MSG_MODE(link, "{%dx%d, %dx%d@%dKhz}", 2293 context->streams[i]->timing.h_addressable, 2294 context->streams[i]->timing.v_addressable, 2295 context->streams[i]->timing.h_total, 2296 context->streams[i]->timing.v_total, 2297 context->streams[i]->timing.pix_clk_100hz / 10); 2298 } 2299 2300 dc_enable_stereo(dc, context, dc_streams, context->stream_count); 2301 2302 if (get_seamless_boot_stream_count(context) == 0 || 2303 context->stream_count == 0) { 2304 /* Must wait for no flips to be pending before doing optimize bw */ 2305 hwss_wait_for_no_pipes_pending(dc, context); 2306 /* 2307 * optimized dispclk depends on ODM setup. Need to wait for ODM 2308 * update pending complete before optimizing bandwidth. 2309 */ 2310 hwss_wait_for_odm_update_pending_complete(dc, context); 2311 /* pplib is notified if disp_num changed */ 2312 dc->hwss.optimize_bandwidth(dc, context); 2313 /* Need to do otg sync again as otg could be out of sync due to otg 2314 * workaround applied during clock update 2315 */ 2316 dc_trigger_sync(dc, context); 2317 } 2318 2319 if (dc->hwss.update_dsc_pg) 2320 dc->hwss.update_dsc_pg(dc, context, true); 2321 2322 if (dc->ctx->dce_version >= DCE_VERSION_MAX) 2323 TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk); 2324 else 2325 TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce); 2326 2327 context->stream_mask = get_stream_mask(dc, context); 2328 2329 if (context->stream_mask != dc->current_state->stream_mask) 2330 dc_dmub_srv_notify_stream_mask(dc->ctx->dmub_srv, context->stream_mask); 2331 2332 for (i = 0; i < context->stream_count; i++) 2333 context->streams[i]->mode_changed = false; 2334 2335 /* Clear update flags that were set earlier to avoid redundant programming */ 2336 for (i = 0; i < context->stream_count; i++) { 2337 context->streams[i]->update_flags.raw = 0x0; 2338 } 2339 2340 old_state = dc->current_state; 2341 dc->current_state = context; 2342 2343 dc_state_release(old_state); 2344 2345 dc_state_retain(dc->current_state); 2346 2347 return result; 2348 } 2349 2350 static bool commit_minimal_transition_state(struct dc *dc, 2351 struct dc_state *transition_base_context); 2352 2353 /** 2354 * dc_commit_streams - Commit current stream state 2355 * 2356 * @dc: DC object with the commit state to be configured in the hardware 2357 * @params: Parameters for the commit, including the streams to be committed 2358 * 2359 * Function responsible for commit streams change to the hardware. 2360 * 2361 * Return: 2362 * Return DC_OK if everything work as expected, otherwise, return a dc_status 2363 * code. 2364 */ 2365 enum dc_status dc_commit_streams(struct dc *dc, struct dc_commit_streams_params *params) 2366 { 2367 int i, j; 2368 struct dc_state *context; 2369 enum dc_status res = DC_OK; 2370 struct dc_validation_set set[MAX_STREAMS] = {0}; 2371 struct pipe_ctx *pipe; 2372 bool handle_exit_odm2to1 = false; 2373 2374 if (!params) 2375 return DC_ERROR_UNEXPECTED; 2376 2377 if (dc->ctx->dce_environment == DCE_ENV_VIRTUAL_HW) 2378 return res; 2379 2380 if (!streams_changed(dc, params->streams, params->stream_count) && 2381 dc->current_state->power_source == params->power_source) 2382 return res; 2383 2384 dc_exit_ips_for_hw_access(dc); 2385 2386 DC_LOG_DC("%s: %d streams\n", __func__, params->stream_count); 2387 2388 for (i = 0; i < params->stream_count; i++) { 2389 struct dc_stream_state *stream = params->streams[i]; 2390 struct dc_stream_status *status = dc_stream_get_status(stream); 2391 struct dc_sink *sink = stream->sink; 2392 2393 /* revalidate streams */ 2394 if (!dc_is_virtual_signal(sink->sink_signal)) { 2395 res = dc_validate_stream(dc, stream); 2396 if (res != DC_OK) 2397 return res; 2398 } 2399 2400 2401 dc_stream_log(dc, stream); 2402 2403 set[i].stream = stream; 2404 2405 if (status) { 2406 set[i].plane_count = status->plane_count; 2407 for (j = 0; j < status->plane_count; j++) 2408 set[i].plane_states[j] = status->plane_states[j]; 2409 } 2410 } 2411 2412 /* ODM Combine 2:1 power optimization is only applied for single stream 2413 * scenario, it uses extra pipes than needed to reduce power consumption 2414 * We need to switch off this feature to make room for new streams. 2415 */ 2416 if (params->stream_count > dc->current_state->stream_count && 2417 dc->current_state->stream_count == 1) { 2418 for (i = 0; i < dc->res_pool->pipe_count; i++) { 2419 pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 2420 if (pipe->next_odm_pipe) 2421 handle_exit_odm2to1 = true; 2422 } 2423 } 2424 2425 if (handle_exit_odm2to1) 2426 res = commit_minimal_transition_state(dc, dc->current_state); 2427 2428 context = dc_state_create_current_copy(dc); 2429 if (!context) 2430 goto context_alloc_fail; 2431 2432 context->power_source = params->power_source; 2433 2434 res = dc_validate_with_context(dc, set, params->stream_count, context, DC_VALIDATE_MODE_AND_PROGRAMMING); 2435 2436 /* 2437 * Only update link encoder to stream assignment after bandwidth validation passed. 2438 */ 2439 if (res == DC_OK && dc->res_pool->funcs->link_encs_assign && !dc->config.unify_link_enc_assignment) 2440 dc->res_pool->funcs->link_encs_assign( 2441 dc, context, context->streams, context->stream_count); 2442 2443 if (res != DC_OK) { 2444 BREAK_TO_DEBUGGER(); 2445 goto fail; 2446 } 2447 2448 /* 2449 * If not already seamless, make transition seamless by inserting intermediate minimal transition 2450 */ 2451 if (dc->hwss.is_pipe_topology_transition_seamless && 2452 !dc->hwss.is_pipe_topology_transition_seamless(dc, dc->current_state, context)) { 2453 res = commit_minimal_transition_state(dc, context); 2454 if (res != DC_OK) { 2455 BREAK_TO_DEBUGGER(); 2456 goto fail; 2457 } 2458 } 2459 2460 res = dc_commit_state_no_check(dc, context); 2461 2462 for (i = 0; i < params->stream_count; i++) { 2463 for (j = 0; j < context->stream_count; j++) { 2464 if (params->streams[i]->stream_id == context->streams[j]->stream_id) 2465 params->streams[i]->out.otg_offset = context->stream_status[j].primary_otg_inst; 2466 2467 if (dc_is_embedded_signal(params->streams[i]->signal)) { 2468 struct dc_stream_status *status = dc_state_get_stream_status(context, params->streams[i]); 2469 2470 if (!status) 2471 continue; 2472 2473 if (dc->hwss.is_abm_supported) 2474 status->is_abm_supported = dc->hwss.is_abm_supported(dc, context, params->streams[i]); 2475 else 2476 status->is_abm_supported = true; 2477 } 2478 } 2479 } 2480 2481 fail: 2482 dc_state_release(context); 2483 2484 context_alloc_fail: 2485 2486 DC_LOG_DC("%s Finished.\n", __func__); 2487 2488 return res; 2489 } 2490 2491 bool dc_acquire_release_mpc_3dlut( 2492 struct dc *dc, bool acquire, 2493 struct dc_stream_state *stream, 2494 struct dc_3dlut **lut, 2495 struct dc_transfer_func **shaper) 2496 { 2497 int pipe_idx; 2498 bool ret = false; 2499 bool found_pipe_idx = false; 2500 const struct resource_pool *pool = dc->res_pool; 2501 struct resource_context *res_ctx = &dc->current_state->res_ctx; 2502 int mpcc_id = 0; 2503 2504 if (pool && res_ctx) { 2505 if (acquire) { 2506 /*find pipe idx for the given stream*/ 2507 for (pipe_idx = 0; pipe_idx < pool->pipe_count; pipe_idx++) { 2508 if (res_ctx->pipe_ctx[pipe_idx].stream == stream) { 2509 found_pipe_idx = true; 2510 mpcc_id = res_ctx->pipe_ctx[pipe_idx].plane_res.hubp->inst; 2511 break; 2512 } 2513 } 2514 } else 2515 found_pipe_idx = true;/*for release pipe_idx is not required*/ 2516 2517 if (found_pipe_idx) { 2518 if (acquire && pool->funcs->acquire_post_bldn_3dlut) 2519 ret = pool->funcs->acquire_post_bldn_3dlut(res_ctx, pool, mpcc_id, lut, shaper); 2520 else if (!acquire && pool->funcs->release_post_bldn_3dlut) 2521 ret = pool->funcs->release_post_bldn_3dlut(res_ctx, pool, lut, shaper); 2522 } 2523 } 2524 return ret; 2525 } 2526 2527 static bool is_flip_pending_in_pipes(struct dc *dc, struct dc_state *context) 2528 { 2529 int i; 2530 struct pipe_ctx *pipe; 2531 2532 for (i = 0; i < MAX_PIPES; i++) { 2533 pipe = &context->res_ctx.pipe_ctx[i]; 2534 2535 // Don't check flip pending on phantom pipes 2536 if (!pipe->plane_state || (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM)) 2537 continue; 2538 2539 /* Must set to false to start with, due to OR in update function */ 2540 pipe->plane_state->status.is_flip_pending = false; 2541 dc->hwss.update_pending_status(pipe); 2542 if (pipe->plane_state->status.is_flip_pending) 2543 return true; 2544 } 2545 return false; 2546 } 2547 2548 /* Perform updates here which need to be deferred until next vupdate 2549 * 2550 * i.e. blnd lut, 3dlut, and shaper lut bypass regs are double buffered 2551 * but forcing lut memory to shutdown state is immediate. This causes 2552 * single frame corruption as lut gets disabled mid-frame unless shutdown 2553 * is deferred until after entering bypass. 2554 */ 2555 static void process_deferred_updates(struct dc *dc) 2556 { 2557 int i = 0; 2558 2559 if (dc->debug.enable_mem_low_power.bits.cm) { 2560 ASSERT(dc->dcn_ip->max_num_dpp); 2561 for (i = 0; i < dc->dcn_ip->max_num_dpp; i++) 2562 if (dc->res_pool->dpps[i]->funcs->dpp_deferred_update) 2563 dc->res_pool->dpps[i]->funcs->dpp_deferred_update(dc->res_pool->dpps[i]); 2564 } 2565 } 2566 2567 void dc_post_update_surfaces_to_stream(struct dc *dc) 2568 { 2569 int i; 2570 struct dc_state *context = dc->current_state; 2571 2572 if ((!dc->optimized_required) || get_seamless_boot_stream_count(context) > 0) 2573 return; 2574 2575 post_surface_trace(dc); 2576 2577 /* 2578 * Only relevant for DCN behavior where we can guarantee the optimization 2579 * is safe to apply - retain the legacy behavior for DCE. 2580 */ 2581 2582 if (dc->ctx->dce_version < DCE_VERSION_MAX) 2583 TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce); 2584 else { 2585 TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk); 2586 2587 if (is_flip_pending_in_pipes(dc, context)) 2588 return; 2589 2590 for (i = 0; i < dc->res_pool->pipe_count; i++) 2591 if (context->res_ctx.pipe_ctx[i].stream == NULL || 2592 context->res_ctx.pipe_ctx[i].plane_state == NULL) { 2593 context->res_ctx.pipe_ctx[i].pipe_idx = i; 2594 dc->hwss.disable_plane(dc, context, &context->res_ctx.pipe_ctx[i]); 2595 } 2596 2597 process_deferred_updates(dc); 2598 2599 dc->hwss.optimize_bandwidth(dc, context); 2600 2601 if (dc->hwss.update_dsc_pg) 2602 dc->hwss.update_dsc_pg(dc, context, true); 2603 } 2604 2605 dc->optimized_required = false; 2606 } 2607 2608 bool dc_set_generic_gpio_for_stereo(bool enable, 2609 struct gpio_service *gpio_service) 2610 { 2611 enum gpio_result gpio_result = GPIO_RESULT_NON_SPECIFIC_ERROR; 2612 struct gpio_pin_info pin_info; 2613 struct gpio *generic; 2614 struct gpio_generic_mux_config *config = kzalloc(sizeof(struct gpio_generic_mux_config), 2615 GFP_KERNEL); 2616 2617 if (!config) 2618 return false; 2619 pin_info = dal_gpio_get_generic_pin_info(gpio_service, GPIO_ID_GENERIC, 0); 2620 2621 if (pin_info.mask == 0xFFFFFFFF || pin_info.offset == 0xFFFFFFFF) { 2622 kfree(config); 2623 return false; 2624 } else { 2625 generic = dal_gpio_service_create_generic_mux( 2626 gpio_service, 2627 pin_info.offset, 2628 pin_info.mask); 2629 } 2630 2631 if (!generic) { 2632 kfree(config); 2633 return false; 2634 } 2635 2636 gpio_result = dal_gpio_open(generic, GPIO_MODE_OUTPUT); 2637 2638 config->enable_output_from_mux = enable; 2639 config->mux_select = GPIO_SIGNAL_SOURCE_PASS_THROUGH_STEREO_SYNC; 2640 2641 if (gpio_result == GPIO_RESULT_OK) 2642 gpio_result = dal_mux_setup_config(generic, config); 2643 2644 if (gpio_result == GPIO_RESULT_OK) { 2645 dal_gpio_close(generic); 2646 dal_gpio_destroy_generic_mux(&generic); 2647 kfree(config); 2648 return true; 2649 } else { 2650 dal_gpio_close(generic); 2651 dal_gpio_destroy_generic_mux(&generic); 2652 kfree(config); 2653 return false; 2654 } 2655 } 2656 2657 static bool is_surface_in_context( 2658 const struct dc_state *context, 2659 const struct dc_plane_state *plane_state) 2660 { 2661 int j; 2662 2663 for (j = 0; j < MAX_PIPES; j++) { 2664 const struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 2665 2666 if (plane_state == pipe_ctx->plane_state) { 2667 return true; 2668 } 2669 } 2670 2671 return false; 2672 } 2673 2674 static struct surface_update_descriptor get_plane_info_update_type(const struct dc_surface_update *u) 2675 { 2676 union surface_update_flags *update_flags = &u->surface->update_flags; 2677 struct surface_update_descriptor update_type = { UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_NONE }; 2678 2679 if (!u->plane_info) 2680 return update_type; 2681 2682 // `plane_info` present means at least `STREAM` lock is required 2683 elevate_update_type(&update_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM); 2684 2685 if (u->plane_info->color_space != u->surface->color_space) { 2686 update_flags->bits.color_space_change = 1; 2687 elevate_update_type(&update_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM); 2688 } 2689 2690 if (u->plane_info->horizontal_mirror != u->surface->horizontal_mirror) { 2691 update_flags->bits.horizontal_mirror_change = 1; 2692 elevate_update_type(&update_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM); 2693 } 2694 2695 if (u->plane_info->rotation != u->surface->rotation) { 2696 update_flags->bits.rotation_change = 1; 2697 elevate_update_type(&update_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL); 2698 } 2699 2700 if (u->plane_info->format != u->surface->format) { 2701 update_flags->bits.pixel_format_change = 1; 2702 elevate_update_type(&update_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL); 2703 } 2704 2705 if (u->plane_info->stereo_format != u->surface->stereo_format) { 2706 update_flags->bits.stereo_format_change = 1; 2707 elevate_update_type(&update_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL); 2708 } 2709 2710 if (u->plane_info->per_pixel_alpha != u->surface->per_pixel_alpha) { 2711 update_flags->bits.per_pixel_alpha_change = 1; 2712 elevate_update_type(&update_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM); 2713 } 2714 2715 if (u->plane_info->global_alpha_value != u->surface->global_alpha_value) { 2716 update_flags->bits.global_alpha_change = 1; 2717 elevate_update_type(&update_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM); 2718 } 2719 2720 if (u->plane_info->dcc.enable != u->surface->dcc.enable 2721 || u->plane_info->dcc.dcc_ind_blk != u->surface->dcc.dcc_ind_blk 2722 || u->plane_info->dcc.meta_pitch != u->surface->dcc.meta_pitch) { 2723 /* During DCC on/off, stutter period is calculated before 2724 * DCC has fully transitioned. This results in incorrect 2725 * stutter period calculation. Triggering a full update will 2726 * recalculate stutter period. 2727 */ 2728 update_flags->bits.dcc_change = 1; 2729 elevate_update_type(&update_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL); 2730 } 2731 2732 if (resource_pixel_format_to_bpp(u->plane_info->format) != 2733 resource_pixel_format_to_bpp(u->surface->format)) { 2734 /* different bytes per element will require full bandwidth 2735 * and DML calculation 2736 */ 2737 update_flags->bits.bpp_change = 1; 2738 elevate_update_type(&update_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL); 2739 } 2740 2741 if (u->plane_info->plane_size.surface_pitch != u->surface->plane_size.surface_pitch 2742 || u->plane_info->plane_size.chroma_pitch != u->surface->plane_size.chroma_pitch) { 2743 update_flags->bits.plane_size_change = 1; 2744 elevate_update_type(&update_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM); 2745 } 2746 2747 const struct dc_tiling_info *tiling = &u->plane_info->tiling_info; 2748 2749 if (memcmp(tiling, &u->surface->tiling_info, sizeof(*tiling)) != 0) { 2750 update_flags->bits.swizzle_change = 1; 2751 elevate_update_type(&update_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM); 2752 2753 switch (tiling->gfxversion) { 2754 case DcGfxVersion9: 2755 case DcGfxVersion10: 2756 case DcGfxVersion11: 2757 if (tiling->gfx9.swizzle != DC_SW_LINEAR) { 2758 update_flags->bits.bandwidth_change = 1; 2759 elevate_update_type(&update_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL); 2760 } 2761 break; 2762 case DcGfxAddr3: 2763 if (tiling->gfx_addr3.swizzle != DC_ADDR3_SW_LINEAR) { 2764 update_flags->bits.bandwidth_change = 1; 2765 elevate_update_type(&update_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL); 2766 } 2767 break; 2768 case DcGfxVersion7: 2769 case DcGfxVersion8: 2770 case DcGfxVersionUnknown: 2771 default: 2772 break; 2773 } 2774 } 2775 2776 /* This should be UPDATE_TYPE_FAST if nothing has changed. */ 2777 return update_type; 2778 } 2779 2780 static struct surface_update_descriptor get_scaling_info_update_type( 2781 const struct dc_check_config *check_config, 2782 const struct dc_surface_update *u) 2783 { 2784 union surface_update_flags *update_flags = &u->surface->update_flags; 2785 struct surface_update_descriptor update_type = { UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_NONE }; 2786 2787 if (!u->scaling_info) 2788 return update_type; 2789 2790 // `scaling_info` present means at least `STREAM` lock is required 2791 elevate_update_type(&update_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM); 2792 2793 if (u->scaling_info->src_rect.width != u->surface->src_rect.width 2794 || u->scaling_info->src_rect.height != u->surface->src_rect.height 2795 || u->scaling_info->dst_rect.width != u->surface->dst_rect.width 2796 || u->scaling_info->dst_rect.height != u->surface->dst_rect.height 2797 || u->scaling_info->clip_rect.width != u->surface->clip_rect.width 2798 || u->scaling_info->clip_rect.height != u->surface->clip_rect.height 2799 || u->scaling_info->scaling_quality.integer_scaling != 2800 u->surface->scaling_quality.integer_scaling) { 2801 update_flags->bits.scaling_change = 1; 2802 elevate_update_type(&update_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL); 2803 2804 if (u->scaling_info->src_rect.width > u->surface->src_rect.width 2805 || u->scaling_info->src_rect.height > u->surface->src_rect.height) 2806 /* Making src rect bigger requires a bandwidth change */ 2807 update_flags->bits.clock_change = 1; 2808 2809 if ((u->scaling_info->dst_rect.width < u->surface->dst_rect.width 2810 || u->scaling_info->dst_rect.height < u->surface->dst_rect.height) 2811 && (u->scaling_info->dst_rect.width < u->surface->src_rect.width 2812 || u->scaling_info->dst_rect.height < u->surface->src_rect.height)) 2813 /* Making dst rect smaller requires a bandwidth change */ 2814 update_flags->bits.bandwidth_change = 1; 2815 2816 if (u->scaling_info->src_rect.width > check_config->max_optimizable_video_width && 2817 (u->scaling_info->clip_rect.width > u->surface->clip_rect.width || 2818 u->scaling_info->clip_rect.height > u->surface->clip_rect.height)) 2819 /* Changing clip size of a large surface may result in MPC slice count change */ 2820 update_flags->bits.bandwidth_change = 1; 2821 } 2822 2823 if (u->scaling_info->src_rect.x != u->surface->src_rect.x 2824 || u->scaling_info->src_rect.y != u->surface->src_rect.y 2825 || u->scaling_info->clip_rect.x != u->surface->clip_rect.x 2826 || u->scaling_info->clip_rect.y != u->surface->clip_rect.y 2827 || u->scaling_info->dst_rect.x != u->surface->dst_rect.x 2828 || u->scaling_info->dst_rect.y != u->surface->dst_rect.y) { 2829 elevate_update_type(&update_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM); 2830 update_flags->bits.position_change = 1; 2831 } 2832 2833 return update_type; 2834 } 2835 2836 static struct surface_update_descriptor det_surface_update( 2837 const struct dc_check_config *check_config, 2838 struct dc_surface_update *u) 2839 { 2840 struct surface_update_descriptor overall_type = { UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_NONE }; 2841 union surface_update_flags *update_flags = &u->surface->update_flags; 2842 2843 if (u->surface->force_full_update) { 2844 update_flags->raw = 0xFFFFFFFF; 2845 elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL); 2846 return overall_type; 2847 } 2848 2849 update_flags->raw = 0; // Reset all flags 2850 2851 struct surface_update_descriptor inner_type = get_plane_info_update_type(u); 2852 2853 elevate_update_type(&overall_type, inner_type.update_type, inner_type.lock_descriptor); 2854 2855 inner_type = get_scaling_info_update_type(check_config, u); 2856 elevate_update_type(&overall_type, inner_type.update_type, inner_type.lock_descriptor); 2857 2858 if (u->flip_addr) { 2859 update_flags->bits.addr_update = 1; 2860 elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM); 2861 2862 if (u->flip_addr->address.tmz_surface != u->surface->address.tmz_surface) { 2863 update_flags->bits.tmz_changed = 1; 2864 elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL); 2865 } 2866 } 2867 if (u->in_transfer_func) { 2868 update_flags->bits.in_transfer_func_change = 1; 2869 elevate_update_type(&overall_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM); 2870 } 2871 2872 if (u->input_csc_color_matrix) { 2873 update_flags->bits.input_csc_change = 1; 2874 elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM); 2875 } 2876 2877 if (u->coeff_reduction_factor) { 2878 update_flags->bits.coeff_reduction_change = 1; 2879 elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM); 2880 } 2881 2882 if (u->gamut_remap_matrix) { 2883 update_flags->bits.gamut_remap_change = 1; 2884 elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM); 2885 } 2886 2887 if (u->blend_tf || (u->gamma && dce_use_lut(u->plane_info ? u->plane_info->format : u->surface->format))) { 2888 update_flags->bits.gamma_change = 1; 2889 elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM); 2890 } 2891 2892 if (u->lut3d_func || u->func_shaper) { 2893 update_flags->bits.lut_3d = 1; 2894 elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM); 2895 } 2896 2897 if (u->hdr_mult.value) 2898 if (u->hdr_mult.value != u->surface->hdr_mult.value) { 2899 // TODO: Should be fast? 2900 update_flags->bits.hdr_mult = 1; 2901 elevate_update_type(&overall_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM); 2902 } 2903 2904 if (u->sdr_white_level_nits) 2905 if (u->sdr_white_level_nits != u->surface->sdr_white_level_nits) { 2906 // TODO: Should be fast? 2907 update_flags->bits.sdr_white_level_nits = 1; 2908 elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL); 2909 } 2910 2911 if (u->cm2_params) { 2912 if (u->cm2_params->component_settings.shaper_3dlut_setting != u->surface->mcm_shaper_3dlut_setting 2913 || u->cm2_params->component_settings.lut1d_enable != u->surface->mcm_lut1d_enable 2914 || u->cm2_params->cm2_luts.lut3d_data.lut3d_src != u->surface->mcm_luts.lut3d_data.lut3d_src) { 2915 update_flags->bits.mcm_transfer_function_enable_change = 1; 2916 elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL); 2917 } 2918 } 2919 2920 if (update_flags->bits.lut_3d && 2921 u->surface->mcm_luts.lut3d_data.lut3d_src != DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM) { 2922 elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL); 2923 } 2924 2925 if (check_config->enable_legacy_fast_update && 2926 (update_flags->bits.gamma_change || 2927 update_flags->bits.gamut_remap_change || 2928 update_flags->bits.input_csc_change || 2929 update_flags->bits.coeff_reduction_change)) { 2930 elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL); 2931 } 2932 return overall_type; 2933 } 2934 2935 /* May need to flip the desktop plane in cases where MPO plane receives a flip but desktop plane doesn't 2936 * while both planes are flip_immediate 2937 */ 2938 static void force_immediate_gsl_plane_flip(struct dc *dc, struct dc_surface_update *updates, int surface_count) 2939 { 2940 bool has_flip_immediate_plane = false; 2941 int i; 2942 2943 for (i = 0; i < surface_count; i++) { 2944 if (updates[i].surface->flip_immediate) { 2945 has_flip_immediate_plane = true; 2946 break; 2947 } 2948 } 2949 2950 if (has_flip_immediate_plane && surface_count > 1) { 2951 for (i = 0; i < surface_count; i++) { 2952 if (updates[i].surface->flip_immediate) 2953 updates[i].surface->update_flags.bits.addr_update = 1; 2954 } 2955 } 2956 } 2957 2958 static struct surface_update_descriptor check_update_surfaces_for_stream( 2959 const struct dc_check_config *check_config, 2960 struct dc_surface_update *updates, 2961 int surface_count, 2962 struct dc_stream_update *stream_update) 2963 { 2964 struct surface_update_descriptor overall_type = { UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_NONE }; 2965 2966 /* When countdown finishes, promote this flip to full to trigger deferred final transition */ 2967 if (check_config->deferred_transition_state && !check_config->transition_countdown_to_steady_state) { 2968 elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL); 2969 } 2970 2971 if (stream_update && stream_update->pending_test_pattern) { 2972 elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL); 2973 } 2974 2975 if (stream_update && stream_update->hw_cursor_req) { 2976 elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL); 2977 } 2978 2979 /* some stream updates require passive update */ 2980 if (stream_update) { 2981 elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM); 2982 2983 union stream_update_flags *su_flags = &stream_update->stream->update_flags; 2984 2985 if ((stream_update->src.height != 0 && stream_update->src.width != 0) || 2986 (stream_update->dst.height != 0 && stream_update->dst.width != 0) || 2987 stream_update->integer_scaling_update) 2988 su_flags->bits.scaling = 1; 2989 2990 if (check_config->enable_legacy_fast_update && stream_update->out_transfer_func) 2991 su_flags->bits.out_tf = 1; 2992 2993 if (stream_update->abm_level) 2994 su_flags->bits.abm_level = 1; 2995 2996 if (stream_update->dpms_off) { 2997 su_flags->bits.dpms_off = 1; 2998 elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL | LOCK_DESCRIPTOR_LINK); 2999 } 3000 3001 if (stream_update->gamut_remap) 3002 su_flags->bits.gamut_remap = 1; 3003 3004 if (stream_update->wb_update) 3005 su_flags->bits.wb_update = 1; 3006 3007 if (stream_update->dsc_config) 3008 su_flags->bits.dsc_changed = 1; 3009 3010 if (stream_update->mst_bw_update) 3011 su_flags->bits.mst_bw = 1; 3012 3013 if (stream_update->stream->freesync_on_desktop && 3014 (stream_update->vrr_infopacket || stream_update->allow_freesync || 3015 stream_update->vrr_active_variable || stream_update->vrr_active_fixed)) 3016 su_flags->bits.fams_changed = 1; 3017 3018 if (stream_update->scaler_sharpener_update) 3019 su_flags->bits.scaler_sharpener = 1; 3020 3021 if (stream_update->sharpening_required) 3022 su_flags->bits.sharpening_required = 1; 3023 3024 if (stream_update->output_color_space) 3025 su_flags->bits.out_csc = 1; 3026 3027 // TODO: Make each elevation explicit, as to not override fast stream in crct_timing_adjust 3028 if (su_flags->raw) 3029 elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL); 3030 3031 // Non-global cases 3032 if (stream_update->output_csc_transform) { 3033 su_flags->bits.out_csc = 1; 3034 elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM); 3035 } 3036 3037 if (!check_config->enable_legacy_fast_update && stream_update->out_transfer_func) { 3038 su_flags->bits.out_tf = 1; 3039 elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM); 3040 } 3041 } 3042 3043 for (int i = 0 ; i < surface_count; i++) { 3044 struct surface_update_descriptor inner_type = 3045 det_surface_update(check_config, &updates[i]); 3046 3047 elevate_update_type(&overall_type, inner_type.update_type, inner_type.lock_descriptor); 3048 } 3049 3050 return overall_type; 3051 } 3052 3053 /* 3054 * dc_check_update_surfaces_for_stream() - Determine update type (fast, med, or full) 3055 * 3056 * See :c:type:`enum surface_update_type <surface_update_type>` for explanation of update types 3057 */ 3058 struct surface_update_descriptor dc_check_update_surfaces_for_stream( 3059 const struct dc_check_config *check_config, 3060 struct dc_surface_update *updates, 3061 int surface_count, 3062 struct dc_stream_update *stream_update) 3063 { 3064 if (stream_update) 3065 stream_update->stream->update_flags.raw = 0; 3066 for (size_t i = 0; i < surface_count; i++) 3067 updates[i].surface->update_flags.raw = 0; 3068 3069 return check_update_surfaces_for_stream(check_config, updates, surface_count, stream_update); 3070 } 3071 3072 static struct dc_stream_status *stream_get_status( 3073 struct dc_state *ctx, 3074 struct dc_stream_state *stream) 3075 { 3076 uint8_t i; 3077 3078 for (i = 0; i < ctx->stream_count; i++) { 3079 if (stream == ctx->streams[i]) { 3080 return &ctx->stream_status[i]; 3081 } 3082 } 3083 3084 return NULL; 3085 } 3086 3087 static const enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL; 3088 3089 static void copy_surface_update_to_plane( 3090 struct dc_plane_state *surface, 3091 struct dc_surface_update *srf_update) 3092 { 3093 if (srf_update->flip_addr) { 3094 surface->address = srf_update->flip_addr->address; 3095 surface->flip_immediate = 3096 srf_update->flip_addr->flip_immediate; 3097 surface->time.time_elapsed_in_us[surface->time.index] = 3098 srf_update->flip_addr->flip_timestamp_in_us - 3099 surface->time.prev_update_time_in_us; 3100 surface->time.prev_update_time_in_us = 3101 srf_update->flip_addr->flip_timestamp_in_us; 3102 surface->time.index++; 3103 if (surface->time.index >= DC_PLANE_UPDATE_TIMES_MAX) 3104 surface->time.index = 0; 3105 3106 surface->triplebuffer_flips = srf_update->flip_addr->triplebuffer_flips; 3107 } 3108 3109 if (srf_update->scaling_info) { 3110 surface->scaling_quality = 3111 srf_update->scaling_info->scaling_quality; 3112 surface->dst_rect = 3113 srf_update->scaling_info->dst_rect; 3114 surface->src_rect = 3115 srf_update->scaling_info->src_rect; 3116 surface->clip_rect = 3117 srf_update->scaling_info->clip_rect; 3118 } 3119 3120 if (srf_update->plane_info) { 3121 surface->color_space = 3122 srf_update->plane_info->color_space; 3123 surface->format = 3124 srf_update->plane_info->format; 3125 surface->plane_size = 3126 srf_update->plane_info->plane_size; 3127 surface->rotation = 3128 srf_update->plane_info->rotation; 3129 surface->horizontal_mirror = 3130 srf_update->plane_info->horizontal_mirror; 3131 surface->stereo_format = 3132 srf_update->plane_info->stereo_format; 3133 surface->tiling_info = 3134 srf_update->plane_info->tiling_info; 3135 surface->visible = 3136 srf_update->plane_info->visible; 3137 surface->per_pixel_alpha = 3138 srf_update->plane_info->per_pixel_alpha; 3139 surface->global_alpha = 3140 srf_update->plane_info->global_alpha; 3141 surface->global_alpha_value = 3142 srf_update->plane_info->global_alpha_value; 3143 surface->dcc = 3144 srf_update->plane_info->dcc; 3145 surface->layer_index = 3146 srf_update->plane_info->layer_index; 3147 } 3148 3149 if (srf_update->gamma) { 3150 memcpy(&surface->gamma_correction.entries, 3151 &srf_update->gamma->entries, 3152 sizeof(struct dc_gamma_entries)); 3153 surface->gamma_correction.is_identity = 3154 srf_update->gamma->is_identity; 3155 surface->gamma_correction.num_entries = 3156 srf_update->gamma->num_entries; 3157 surface->gamma_correction.type = 3158 srf_update->gamma->type; 3159 } 3160 3161 if (srf_update->in_transfer_func) { 3162 surface->in_transfer_func.sdr_ref_white_level = 3163 srf_update->in_transfer_func->sdr_ref_white_level; 3164 surface->in_transfer_func.tf = 3165 srf_update->in_transfer_func->tf; 3166 surface->in_transfer_func.type = 3167 srf_update->in_transfer_func->type; 3168 memcpy(&surface->in_transfer_func.tf_pts, 3169 &srf_update->in_transfer_func->tf_pts, 3170 sizeof(struct dc_transfer_func_distributed_points)); 3171 } 3172 3173 if (srf_update->cm2_params) { 3174 surface->mcm_shaper_3dlut_setting = srf_update->cm2_params->component_settings.shaper_3dlut_setting; 3175 surface->mcm_lut1d_enable = srf_update->cm2_params->component_settings.lut1d_enable; 3176 surface->mcm_luts = srf_update->cm2_params->cm2_luts; 3177 } 3178 3179 if (srf_update->func_shaper) { 3180 memcpy(&surface->in_shaper_func, srf_update->func_shaper, 3181 sizeof(surface->in_shaper_func)); 3182 3183 if (surface->mcm_shaper_3dlut_setting >= DC_CM2_SHAPER_3DLUT_SETTING_ENABLE_SHAPER) 3184 surface->mcm_luts.shaper = &surface->in_shaper_func; 3185 } 3186 3187 if (srf_update->lut3d_func) 3188 memcpy(&surface->lut3d_func, srf_update->lut3d_func, 3189 sizeof(surface->lut3d_func)); 3190 3191 if (srf_update->hdr_mult.value) 3192 surface->hdr_mult = 3193 srf_update->hdr_mult; 3194 3195 if (srf_update->sdr_white_level_nits) 3196 surface->sdr_white_level_nits = 3197 srf_update->sdr_white_level_nits; 3198 3199 if (srf_update->blend_tf) { 3200 memcpy(&surface->blend_tf, srf_update->blend_tf, 3201 sizeof(surface->blend_tf)); 3202 3203 if (surface->mcm_lut1d_enable) 3204 surface->mcm_luts.lut1d_func = &surface->blend_tf; 3205 } 3206 3207 if (srf_update->cm2_params || srf_update->blend_tf) 3208 surface->lut_bank_a = !surface->lut_bank_a; 3209 3210 if (srf_update->input_csc_color_matrix) 3211 surface->input_csc_color_matrix = 3212 *srf_update->input_csc_color_matrix; 3213 3214 if (srf_update->coeff_reduction_factor) 3215 surface->coeff_reduction_factor = 3216 *srf_update->coeff_reduction_factor; 3217 3218 if (srf_update->gamut_remap_matrix) 3219 surface->gamut_remap_matrix = 3220 *srf_update->gamut_remap_matrix; 3221 3222 if (srf_update->cursor_csc_color_matrix) 3223 surface->cursor_csc_color_matrix = 3224 *srf_update->cursor_csc_color_matrix; 3225 3226 if (srf_update->bias_and_scale.bias_and_scale_valid) 3227 surface->bias_and_scale = 3228 srf_update->bias_and_scale; 3229 } 3230 3231 static void copy_stream_update_to_stream(struct dc *dc, 3232 struct dc_state *context, 3233 struct dc_stream_state *stream, 3234 struct dc_stream_update *update) 3235 { 3236 struct dc_context *dc_ctx = dc->ctx; 3237 3238 if (update == NULL || stream == NULL) 3239 return; 3240 3241 if (update->src.height && update->src.width) 3242 stream->src = update->src; 3243 3244 if (update->dst.height && update->dst.width) 3245 stream->dst = update->dst; 3246 3247 if (update->out_transfer_func) { 3248 stream->out_transfer_func.sdr_ref_white_level = 3249 update->out_transfer_func->sdr_ref_white_level; 3250 stream->out_transfer_func.tf = update->out_transfer_func->tf; 3251 stream->out_transfer_func.type = 3252 update->out_transfer_func->type; 3253 memcpy(&stream->out_transfer_func.tf_pts, 3254 &update->out_transfer_func->tf_pts, 3255 sizeof(struct dc_transfer_func_distributed_points)); 3256 } 3257 3258 if (update->hdr_static_metadata) 3259 stream->hdr_static_metadata = *update->hdr_static_metadata; 3260 3261 if (update->abm_level) 3262 stream->abm_level = *update->abm_level; 3263 3264 if (update->periodic_interrupt) 3265 stream->periodic_interrupt = *update->periodic_interrupt; 3266 3267 if (update->gamut_remap) 3268 stream->gamut_remap_matrix = *update->gamut_remap; 3269 3270 /* Note: this being updated after mode set is currently not a use case 3271 * however if it arises OCSC would need to be reprogrammed at the 3272 * minimum 3273 */ 3274 if (update->output_color_space) 3275 stream->output_color_space = *update->output_color_space; 3276 3277 if (update->output_csc_transform) 3278 stream->csc_color_matrix = *update->output_csc_transform; 3279 3280 if (update->vrr_infopacket) 3281 stream->vrr_infopacket = *update->vrr_infopacket; 3282 3283 if (update->hw_cursor_req) 3284 stream->hw_cursor_req = *update->hw_cursor_req; 3285 3286 if (update->allow_freesync) 3287 stream->allow_freesync = *update->allow_freesync; 3288 3289 if (update->vrr_active_variable) 3290 stream->vrr_active_variable = *update->vrr_active_variable; 3291 3292 if (update->vrr_active_fixed) 3293 stream->vrr_active_fixed = *update->vrr_active_fixed; 3294 3295 if (update->crtc_timing_adjust) { 3296 if (stream->adjust.v_total_min != update->crtc_timing_adjust->v_total_min || 3297 stream->adjust.v_total_max != update->crtc_timing_adjust->v_total_max || 3298 stream->adjust.timing_adjust_pending) 3299 update->crtc_timing_adjust->timing_adjust_pending = true; 3300 stream->adjust = *update->crtc_timing_adjust; 3301 update->crtc_timing_adjust->timing_adjust_pending = false; 3302 } 3303 3304 if (update->dpms_off) 3305 stream->dpms_off = *update->dpms_off; 3306 3307 if (update->hfvsif_infopacket) 3308 stream->hfvsif_infopacket = *update->hfvsif_infopacket; 3309 3310 if (update->vtem_infopacket) 3311 stream->vtem_infopacket = *update->vtem_infopacket; 3312 3313 if (update->vsc_infopacket) 3314 stream->vsc_infopacket = *update->vsc_infopacket; 3315 3316 if (update->vsp_infopacket) 3317 stream->vsp_infopacket = *update->vsp_infopacket; 3318 3319 if (update->adaptive_sync_infopacket) 3320 stream->adaptive_sync_infopacket = *update->adaptive_sync_infopacket; 3321 3322 if (update->avi_infopacket) 3323 stream->avi_infopacket = *update->avi_infopacket; 3324 3325 if (update->dither_option) 3326 stream->dither_option = *update->dither_option; 3327 3328 if (update->pending_test_pattern) 3329 stream->test_pattern = *update->pending_test_pattern; 3330 /* update current stream with writeback info */ 3331 if (update->wb_update) { 3332 int i; 3333 3334 stream->num_wb_info = update->wb_update->num_wb_info; 3335 ASSERT(stream->num_wb_info <= MAX_DWB_PIPES); 3336 for (i = 0; i < stream->num_wb_info; i++) 3337 stream->writeback_info[i] = 3338 update->wb_update->writeback_info[i]; 3339 } 3340 if (update->dsc_config) { 3341 struct dc_dsc_config old_dsc_cfg = stream->timing.dsc_cfg; 3342 uint32_t old_dsc_enabled = stream->timing.flags.DSC; 3343 uint32_t enable_dsc = (update->dsc_config->num_slices_h != 0 && 3344 update->dsc_config->num_slices_v != 0); 3345 3346 /* Use temporarry context for validating new DSC config */ 3347 struct dc_state *dsc_validate_context = dc_state_create_copy(dc->current_state); 3348 3349 if (dsc_validate_context) { 3350 stream->timing.dsc_cfg = *update->dsc_config; 3351 stream->timing.flags.DSC = enable_dsc; 3352 if (dc->res_pool->funcs->validate_bandwidth(dc, dsc_validate_context, 3353 DC_VALIDATE_MODE_ONLY) != DC_OK) { 3354 stream->timing.dsc_cfg = old_dsc_cfg; 3355 stream->timing.flags.DSC = old_dsc_enabled; 3356 update->dsc_config = NULL; 3357 } 3358 3359 dc_state_release(dsc_validate_context); 3360 } else { 3361 DC_ERROR("Failed to allocate new validate context for DSC change\n"); 3362 update->dsc_config = NULL; 3363 } 3364 } 3365 if (update->scaler_sharpener_update) 3366 stream->scaler_sharpener_update = *update->scaler_sharpener_update; 3367 if (update->sharpening_required) 3368 stream->sharpening_required = *update->sharpening_required; 3369 } 3370 3371 static void backup_planes_and_stream_state( 3372 struct dc_scratch_space *scratch, 3373 struct dc_stream_state *stream) 3374 { 3375 int i; 3376 struct dc_stream_status *status = dc_stream_get_status(stream); 3377 3378 if (!status) 3379 return; 3380 3381 for (i = 0; i < status->plane_count; i++) { 3382 dc_plane_copy_config(&scratch->plane_states[i], status->plane_states[i]); 3383 } 3384 scratch->stream_state = *stream; 3385 } 3386 3387 static void restore_planes_and_stream_state( 3388 struct dc_scratch_space *scratch, 3389 struct dc_stream_state *stream) 3390 { 3391 int i; 3392 struct dc_stream_status *status = dc_stream_get_status(stream); 3393 3394 if (!status) 3395 return; 3396 3397 for (i = 0; i < status->plane_count; i++) { 3398 dc_plane_copy_config(status->plane_states[i], &scratch->plane_states[i]); 3399 } 3400 3401 // refcount is persistent 3402 struct kref temp_refcount = stream->refcount; 3403 *stream = scratch->stream_state; 3404 stream->refcount = temp_refcount; 3405 } 3406 3407 /** 3408 * update_seamless_boot_flags() - Helper function for updating seamless boot flags 3409 * 3410 * @dc: Current DC state 3411 * @context: New DC state to be programmed 3412 * @surface_count: Number of surfaces that have an updated 3413 * @stream: Corresponding stream to be updated in the current flip 3414 * 3415 * Updating seamless boot flags do not need to be part of the commit sequence. This 3416 * helper function will update the seamless boot flags on each flip (if required) 3417 * outside of the HW commit sequence (fast or slow). 3418 * 3419 * Return: void 3420 */ 3421 static void update_seamless_boot_flags(struct dc *dc, 3422 struct dc_state *context, 3423 int surface_count, 3424 struct dc_stream_state *stream) 3425 { 3426 if (get_seamless_boot_stream_count(context) > 0 && (surface_count > 0 || stream->dpms_off)) { 3427 /* Optimize seamless boot flag keeps clocks and watermarks high until 3428 * first flip. After first flip, optimization is required to lower 3429 * bandwidth. Important to note that it is expected UEFI will 3430 * only light up a single display on POST, therefore we only expect 3431 * one stream with seamless boot flag set. 3432 */ 3433 if (stream->apply_seamless_boot_optimization) { 3434 stream->apply_seamless_boot_optimization = false; 3435 3436 if (get_seamless_boot_stream_count(context) == 0) 3437 dc->optimized_required = true; 3438 } 3439 } 3440 } 3441 3442 static bool full_update_required_weak( 3443 const struct dc *dc, 3444 const struct dc_surface_update *srf_updates, 3445 int surface_count, 3446 const struct dc_stream_update *stream_update, 3447 const struct dc_stream_state *stream); 3448 3449 struct pipe_split_policy_backup { 3450 bool dynamic_odm_policy; 3451 bool subvp_policy; 3452 enum pipe_split_policy mpc_policy; 3453 char force_odm[MAX_PIPES]; 3454 }; 3455 3456 static void backup_and_set_minimal_pipe_split_policy(struct dc *dc, 3457 struct dc_state *context, 3458 struct pipe_split_policy_backup *policy) 3459 { 3460 int i; 3461 3462 if (!dc->config.is_vmin_only_asic) { 3463 policy->mpc_policy = dc->debug.pipe_split_policy; 3464 dc->debug.pipe_split_policy = MPC_SPLIT_AVOID; 3465 } 3466 policy->dynamic_odm_policy = dc->debug.enable_single_display_2to1_odm_policy; 3467 dc->debug.enable_single_display_2to1_odm_policy = false; 3468 policy->subvp_policy = dc->debug.force_disable_subvp; 3469 dc->debug.force_disable_subvp = true; 3470 for (i = 0; i < context->stream_count; i++) { 3471 policy->force_odm[i] = context->streams[i]->debug.force_odm_combine_segments; 3472 if (context->streams[i]->debug.allow_transition_for_forced_odm) 3473 context->streams[i]->debug.force_odm_combine_segments = 0; 3474 } 3475 } 3476 3477 static void restore_minimal_pipe_split_policy(struct dc *dc, 3478 struct dc_state *context, 3479 struct pipe_split_policy_backup *policy) 3480 { 3481 uint8_t i; 3482 3483 if (!dc->config.is_vmin_only_asic) 3484 dc->debug.pipe_split_policy = policy->mpc_policy; 3485 dc->debug.enable_single_display_2to1_odm_policy = 3486 policy->dynamic_odm_policy; 3487 dc->debug.force_disable_subvp = policy->subvp_policy; 3488 for (i = 0; i < context->stream_count; i++) 3489 context->streams[i]->debug.force_odm_combine_segments = policy->force_odm[i]; 3490 } 3491 3492 /** 3493 * update_planes_and_stream_state() - The function takes planes and stream 3494 * updates as inputs and determines the appropriate update type. If update type 3495 * is FULL, the function allocates a new context, populates and validates it. 3496 * Otherwise, it updates current dc context. The function will return both 3497 * new_context and new_update_type back to the caller. The function also backs 3498 * up both current and new contexts into corresponding dc state scratch memory. 3499 * TODO: The function does too many things, and even conditionally allocates dc 3500 * context memory implicitly. We should consider to break it down. 3501 * 3502 * @dc: Current DC state 3503 * @srf_updates: an array of surface updates 3504 * @surface_count: surface update count 3505 * @stream: Corresponding stream to be updated 3506 * @stream_update: stream update 3507 * @new_update_type: [out] determined update type by the function 3508 * @new_context: [out] new context allocated and validated if update type is 3509 * FULL, reference to current context if update type is less than FULL. 3510 * 3511 * Return: true if a valid update is populated into new_context, false 3512 * otherwise. 3513 */ 3514 static bool update_planes_and_stream_state(struct dc *dc, 3515 struct dc_surface_update *srf_updates, int surface_count, 3516 struct dc_stream_state *stream, 3517 struct dc_stream_update *stream_update, 3518 enum surface_update_type *new_update_type, 3519 struct dc_state **new_context) 3520 { 3521 struct dc_state *context; 3522 int i, j; 3523 enum surface_update_type update_type; 3524 const struct dc_stream_status *stream_status; 3525 struct dc_context *dc_ctx = dc->ctx; 3526 3527 stream_status = dc_stream_get_status(stream); 3528 3529 if (!stream_status) { 3530 if (surface_count) /* Only an error condition if surf_count non-zero*/ 3531 ASSERT(false); 3532 3533 return false; /* Cannot commit surface to stream that is not committed */ 3534 } 3535 3536 context = dc->current_state; 3537 update_type = dc_check_update_surfaces_for_stream( 3538 &dc->check_config, srf_updates, surface_count, stream_update).update_type; 3539 if (full_update_required_weak(dc, srf_updates, surface_count, stream_update, stream)) 3540 update_type = UPDATE_TYPE_FULL; 3541 3542 /* It is possible to receive a flip for one plane while there are multiple flip_immediate planes in the same stream. 3543 * E.g. Desktop and MPO plane are flip_immediate but only the MPO plane received a flip 3544 * Force the other flip_immediate planes to flip so GSL doesn't wait for a flip that won't come. 3545 */ 3546 force_immediate_gsl_plane_flip(dc, srf_updates, surface_count); 3547 if (update_type == UPDATE_TYPE_FULL) 3548 backup_planes_and_stream_state(&dc->scratch.current_state, stream); 3549 3550 /* update current stream with the new updates */ 3551 copy_stream_update_to_stream(dc, context, stream, stream_update); 3552 3553 /* do not perform surface update if surface has invalid dimensions 3554 * (all zero) and no scaling_info is provided 3555 */ 3556 if (surface_count > 0) { 3557 for (i = 0; i < surface_count; i++) { 3558 if ((srf_updates[i].surface->src_rect.width == 0 || 3559 srf_updates[i].surface->src_rect.height == 0 || 3560 srf_updates[i].surface->dst_rect.width == 0 || 3561 srf_updates[i].surface->dst_rect.height == 0) && 3562 (!srf_updates[i].scaling_info || 3563 srf_updates[i].scaling_info->src_rect.width == 0 || 3564 srf_updates[i].scaling_info->src_rect.height == 0 || 3565 srf_updates[i].scaling_info->dst_rect.width == 0 || 3566 srf_updates[i].scaling_info->dst_rect.height == 0)) { 3567 DC_ERROR("Invalid src/dst rects in surface update!\n"); 3568 return false; 3569 } 3570 } 3571 } 3572 3573 if (update_type == UPDATE_TYPE_FULL) { 3574 if (stream_update) { 3575 uint32_t dsc_changed = stream_update->stream->update_flags.bits.dsc_changed; 3576 stream_update->stream->update_flags.raw = 0xFFFFFFFF; 3577 stream_update->stream->update_flags.bits.dsc_changed = dsc_changed; 3578 } 3579 for (i = 0; i < surface_count; i++) 3580 srf_updates[i].surface->update_flags.raw = 0xFFFFFFFF; 3581 } 3582 3583 if (update_type >= update_surface_trace_level) 3584 update_surface_trace(dc, srf_updates, surface_count); 3585 3586 for (i = 0; i < surface_count; i++) 3587 copy_surface_update_to_plane(srf_updates[i].surface, &srf_updates[i]); 3588 3589 if (update_type >= UPDATE_TYPE_FULL) { 3590 struct dc_plane_state *new_planes[MAX_SURFACES] = {0}; 3591 3592 for (i = 0; i < surface_count; i++) 3593 new_planes[i] = srf_updates[i].surface; 3594 3595 /* initialize scratch memory for building context */ 3596 context = dc_state_create_copy(dc->current_state); 3597 if (context == NULL) { 3598 DC_ERROR("Failed to allocate new validate context!\n"); 3599 return false; 3600 } 3601 3602 /* For each full update, remove all existing phantom pipes first. 3603 * Ensures that we have enough pipes for newly added MPO planes 3604 */ 3605 dc_state_remove_phantom_streams_and_planes(dc, context); 3606 dc_state_release_phantom_streams_and_planes(dc, context); 3607 3608 /*remove old surfaces from context */ 3609 if (!dc_state_rem_all_planes_for_stream(dc, stream, context)) { 3610 3611 BREAK_TO_DEBUGGER(); 3612 goto fail; 3613 } 3614 3615 /* add surface to context */ 3616 if (!dc_state_add_all_planes_for_stream(dc, stream, new_planes, surface_count, context)) { 3617 3618 BREAK_TO_DEBUGGER(); 3619 goto fail; 3620 } 3621 } 3622 3623 /* save update parameters into surface */ 3624 for (i = 0; i < surface_count; i++) { 3625 struct dc_plane_state *surface = srf_updates[i].surface; 3626 3627 if (update_type != UPDATE_TYPE_MED) 3628 continue; 3629 if (surface->update_flags.bits.position_change) { 3630 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3631 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3632 3633 if (pipe_ctx->plane_state != surface) 3634 continue; 3635 3636 resource_build_scaling_params(pipe_ctx); 3637 } 3638 } 3639 } 3640 3641 if (update_type == UPDATE_TYPE_FULL) { 3642 struct pipe_split_policy_backup policy; 3643 bool minimize = false; 3644 3645 if (dc->check_config.deferred_transition_state) { 3646 if (dc->check_config.transition_countdown_to_steady_state) { 3647 /* During countdown, all new contexts created as minimal transition states */ 3648 minimize = true; 3649 } else { 3650 dc->check_config.deferred_transition_state = false; 3651 } 3652 } 3653 3654 if (minimize) 3655 backup_and_set_minimal_pipe_split_policy(dc, context, &policy); 3656 3657 if (dc->res_pool->funcs->validate_bandwidth(dc, context, DC_VALIDATE_MODE_AND_PROGRAMMING) != DC_OK) { 3658 if (minimize) 3659 restore_minimal_pipe_split_policy(dc, context, &policy); 3660 BREAK_TO_DEBUGGER(); 3661 goto fail; 3662 } 3663 3664 if (minimize) 3665 restore_minimal_pipe_split_policy(dc, context, &policy); 3666 } 3667 update_seamless_boot_flags(dc, context, surface_count, stream); 3668 3669 *new_context = context; 3670 *new_update_type = update_type; 3671 if (update_type == UPDATE_TYPE_FULL) 3672 backup_planes_and_stream_state(&dc->scratch.new_state, stream); 3673 3674 return true; 3675 3676 fail: 3677 dc_state_release(context); 3678 3679 return false; 3680 3681 } 3682 3683 static void commit_planes_do_stream_update(struct dc *dc, 3684 struct dc_stream_state *stream, 3685 struct dc_stream_update *stream_update, 3686 enum surface_update_type update_type, 3687 struct dc_state *context) 3688 { 3689 int j; 3690 3691 // Stream updates 3692 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3693 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3694 3695 if (resource_is_pipe_type(pipe_ctx, OTG_MASTER) && pipe_ctx->stream == stream) { 3696 3697 if (stream_update->periodic_interrupt && dc->hwss.setup_periodic_interrupt) 3698 dc->hwss.setup_periodic_interrupt(dc, pipe_ctx); 3699 3700 if ((stream_update->hdr_static_metadata && !stream->use_dynamic_meta) || 3701 stream_update->vrr_infopacket || 3702 stream_update->vsc_infopacket || 3703 stream_update->vsp_infopacket || 3704 stream_update->hfvsif_infopacket || 3705 stream_update->adaptive_sync_infopacket || 3706 stream_update->vtem_infopacket || 3707 stream_update->avi_infopacket) { 3708 resource_build_info_frame(pipe_ctx); 3709 dc->hwss.update_info_frame(pipe_ctx); 3710 3711 if (dc_is_dp_signal(pipe_ctx->stream->signal)) 3712 dc->link_srv->dp_trace_source_sequence( 3713 pipe_ctx->stream->link, 3714 DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME); 3715 } 3716 3717 if (stream_update->hdr_static_metadata && 3718 stream->use_dynamic_meta && 3719 dc->hwss.set_dmdata_attributes && 3720 pipe_ctx->stream->dmdata_address.quad_part != 0) 3721 dc->hwss.set_dmdata_attributes(pipe_ctx); 3722 3723 if (stream_update->gamut_remap) 3724 dc_stream_set_gamut_remap(dc, stream); 3725 3726 if (stream_update->output_csc_transform) 3727 dc_stream_program_csc_matrix(dc, stream); 3728 3729 if (stream_update->dither_option) { 3730 struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe; 3731 resource_build_bit_depth_reduction_params(pipe_ctx->stream, 3732 &pipe_ctx->stream->bit_depth_params); 3733 pipe_ctx->stream_res.opp->funcs->opp_program_fmt(pipe_ctx->stream_res.opp, 3734 &stream->bit_depth_params, 3735 &stream->clamping); 3736 while (odm_pipe) { 3737 odm_pipe->stream_res.opp->funcs->opp_program_fmt(odm_pipe->stream_res.opp, 3738 &stream->bit_depth_params, 3739 &stream->clamping); 3740 odm_pipe = odm_pipe->next_odm_pipe; 3741 } 3742 } 3743 3744 if (stream_update->cursor_attributes) 3745 program_cursor_attributes(dc, stream); 3746 3747 if (stream_update->cursor_position) 3748 program_cursor_position(dc, stream); 3749 3750 /* Full fe update*/ 3751 if (update_type == UPDATE_TYPE_FAST) 3752 continue; 3753 3754 if (stream_update->dsc_config) 3755 dc->link_srv->update_dsc_config(pipe_ctx); 3756 3757 if (stream_update->mst_bw_update) { 3758 if (stream_update->mst_bw_update->is_increase) 3759 dc->link_srv->increase_mst_payload(pipe_ctx, 3760 stream_update->mst_bw_update->mst_stream_bw); 3761 else 3762 dc->link_srv->reduce_mst_payload(pipe_ctx, 3763 stream_update->mst_bw_update->mst_stream_bw); 3764 } 3765 3766 if (stream_update->pending_test_pattern) { 3767 /* 3768 * test pattern params depends on ODM topology 3769 * changes that we could be applying to front 3770 * end. Since at the current stage front end 3771 * changes are not yet applied. We can only 3772 * apply test pattern in hw based on current 3773 * state and populate the final test pattern 3774 * params in new state. If current and new test 3775 * pattern params are different as result of 3776 * different ODM topology being used, it will be 3777 * detected and handle during front end 3778 * programming update. 3779 */ 3780 dc->link_srv->dp_set_test_pattern(stream->link, 3781 stream->test_pattern.type, 3782 stream->test_pattern.color_space, 3783 stream->test_pattern.p_link_settings, 3784 stream->test_pattern.p_custom_pattern, 3785 stream->test_pattern.cust_pattern_size); 3786 resource_build_test_pattern_params(&context->res_ctx, pipe_ctx); 3787 } 3788 3789 if (stream_update->dpms_off) { 3790 if (*stream_update->dpms_off) { 3791 dc->link_srv->set_dpms_off(pipe_ctx); 3792 /* for dpms, keep acquired resources*/ 3793 if (pipe_ctx->stream_res.audio && !dc->debug.az_endpoint_mute_only) 3794 pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio); 3795 3796 dc->optimized_required = true; 3797 3798 } else { 3799 if (get_seamless_boot_stream_count(context) == 0) 3800 dc->hwss.prepare_bandwidth(dc, dc->current_state); 3801 dc->link_srv->set_dpms_on(dc->current_state, pipe_ctx); 3802 } 3803 } else if (pipe_ctx->stream->link->wa_flags.blank_stream_on_ocs_change && stream_update->output_color_space 3804 && !stream->dpms_off && dc_is_dp_signal(pipe_ctx->stream->signal)) { 3805 /* 3806 * Workaround for firmware issue in some receivers where they don't pick up 3807 * correct output color space unless DP link is disabled/re-enabled 3808 */ 3809 dc->link_srv->set_dpms_on(dc->current_state, pipe_ctx); 3810 } 3811 3812 if (stream_update->abm_level && pipe_ctx->stream_res.abm) { 3813 bool should_program_abm = true; 3814 3815 // if otg funcs defined check if blanked before programming 3816 if (pipe_ctx->stream_res.tg->funcs->is_blanked) 3817 if (pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg)) 3818 should_program_abm = false; 3819 3820 if (should_program_abm) { 3821 if (*stream_update->abm_level == ABM_LEVEL_IMMEDIATE_DISABLE) { 3822 dc->hwss.set_abm_immediate_disable(pipe_ctx); 3823 } else { 3824 pipe_ctx->stream_res.abm->funcs->set_abm_level( 3825 pipe_ctx->stream_res.abm, stream->abm_level); 3826 } 3827 } 3828 } 3829 } 3830 } 3831 } 3832 3833 static bool dc_dmub_should_send_dirty_rect_cmd(struct dc *dc, struct dc_stream_state *stream) 3834 { 3835 if ((stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1 3836 || stream->link->psr_settings.psr_version == DC_PSR_VERSION_1) 3837 && stream->ctx->dce_version >= DCN_VERSION_3_1) 3838 return true; 3839 3840 if (stream->link->replay_settings.config.replay_supported) 3841 return true; 3842 3843 if (stream->ctx->dce_version >= DCN_VERSION_3_5 && stream->abm_level) 3844 return true; 3845 3846 return false; 3847 } 3848 3849 void dc_dmub_update_dirty_rect(struct dc *dc, 3850 int surface_count, 3851 struct dc_stream_state *stream, 3852 const struct dc_surface_update *srf_updates, 3853 struct dc_state *context) 3854 { 3855 union dmub_rb_cmd cmd; 3856 struct dmub_cmd_update_dirty_rect_data *update_dirty_rect; 3857 unsigned int i, j; 3858 unsigned int panel_inst = 0; 3859 3860 if (!dc_dmub_should_send_dirty_rect_cmd(dc, stream)) 3861 return; 3862 3863 if (!dc_get_edp_link_panel_inst(dc, stream->link, &panel_inst)) 3864 return; 3865 3866 memset(&cmd, 0x0, sizeof(cmd)); 3867 cmd.update_dirty_rect.header.type = DMUB_CMD__UPDATE_DIRTY_RECT; 3868 cmd.update_dirty_rect.header.sub_type = 0; 3869 cmd.update_dirty_rect.header.payload_bytes = 3870 sizeof(cmd.update_dirty_rect) - 3871 sizeof(cmd.update_dirty_rect.header); 3872 update_dirty_rect = &cmd.update_dirty_rect.update_dirty_rect_data; 3873 for (i = 0; i < surface_count; i++) { 3874 struct dc_plane_state *plane_state = srf_updates[i].surface; 3875 const struct dc_flip_addrs *flip_addr = srf_updates[i].flip_addr; 3876 3877 if (!srf_updates[i].surface || !flip_addr) 3878 continue; 3879 /* Do not send in immediate flip mode */ 3880 if (srf_updates[i].surface->flip_immediate) 3881 continue; 3882 3883 update_dirty_rect->cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1; 3884 update_dirty_rect->dirty_rect_count = flip_addr->dirty_rect_count; 3885 memcpy(update_dirty_rect->src_dirty_rects, flip_addr->dirty_rects, 3886 sizeof(flip_addr->dirty_rects)); 3887 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3888 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3889 3890 if (pipe_ctx->stream != stream) 3891 continue; 3892 if (pipe_ctx->plane_state != plane_state) 3893 continue; 3894 3895 update_dirty_rect->panel_inst = panel_inst; 3896 update_dirty_rect->pipe_idx = j; 3897 dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT); 3898 } 3899 } 3900 } 3901 3902 static void build_dmub_update_dirty_rect( 3903 struct dc *dc, 3904 int surface_count, 3905 struct dc_stream_state *stream, 3906 struct dc_surface_update *srf_updates, 3907 struct dc_state *context, 3908 struct dc_dmub_cmd dc_dmub_cmd[], 3909 unsigned int *dmub_cmd_count) 3910 { 3911 union dmub_rb_cmd cmd; 3912 struct dmub_cmd_update_dirty_rect_data *update_dirty_rect; 3913 unsigned int i, j; 3914 unsigned int panel_inst = 0; 3915 3916 if (!dc_dmub_should_send_dirty_rect_cmd(dc, stream)) 3917 return; 3918 3919 if (!dc_get_edp_link_panel_inst(dc, stream->link, &panel_inst)) 3920 return; 3921 3922 memset(&cmd, 0x0, sizeof(cmd)); 3923 cmd.update_dirty_rect.header.type = DMUB_CMD__UPDATE_DIRTY_RECT; 3924 cmd.update_dirty_rect.header.sub_type = 0; 3925 cmd.update_dirty_rect.header.payload_bytes = 3926 sizeof(cmd.update_dirty_rect) - 3927 sizeof(cmd.update_dirty_rect.header); 3928 update_dirty_rect = &cmd.update_dirty_rect.update_dirty_rect_data; 3929 for (i = 0; i < surface_count; i++) { 3930 struct dc_plane_state *plane_state = srf_updates[i].surface; 3931 const struct dc_flip_addrs *flip_addr = srf_updates[i].flip_addr; 3932 3933 if (!srf_updates[i].surface || !flip_addr) 3934 continue; 3935 /* Do not send in immediate flip mode */ 3936 if (srf_updates[i].surface->flip_immediate) 3937 continue; 3938 update_dirty_rect->cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1; 3939 update_dirty_rect->dirty_rect_count = flip_addr->dirty_rect_count; 3940 memcpy(update_dirty_rect->src_dirty_rects, flip_addr->dirty_rects, 3941 sizeof(flip_addr->dirty_rects)); 3942 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3943 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3944 3945 if (pipe_ctx->stream != stream) 3946 continue; 3947 if (pipe_ctx->plane_state != plane_state) 3948 continue; 3949 update_dirty_rect->panel_inst = panel_inst; 3950 update_dirty_rect->pipe_idx = j; 3951 dc_dmub_cmd[*dmub_cmd_count].dmub_cmd = cmd; 3952 dc_dmub_cmd[*dmub_cmd_count].wait_type = DM_DMUB_WAIT_TYPE_NO_WAIT; 3953 (*dmub_cmd_count)++; 3954 } 3955 } 3956 } 3957 3958 static bool check_address_only_update(union surface_update_flags update_flags) 3959 { 3960 union surface_update_flags addr_only_update_flags; 3961 addr_only_update_flags.raw = 0; 3962 addr_only_update_flags.bits.addr_update = 1; 3963 3964 return update_flags.bits.addr_update && 3965 !(update_flags.raw & ~addr_only_update_flags.raw); 3966 } 3967 3968 /** 3969 * build_dmub_cmd_list() - Build an array of DMCUB commands to be sent to DMCUB 3970 * 3971 * @dc: Current DC state 3972 * @srf_updates: Array of surface updates 3973 * @surface_count: Number of surfaces that have an updated 3974 * @stream: Corresponding stream to be updated in the current flip 3975 * @context: New DC state to be programmed 3976 * 3977 * @dc_dmub_cmd: Array of DMCUB commands to be sent to DMCUB 3978 * @dmub_cmd_count: Count indicating the number of DMCUB commands in dc_dmub_cmd array 3979 * 3980 * This function builds an array of DMCUB commands to be sent to DMCUB. This function is required 3981 * to build an array of commands and have them sent while the OTG lock is acquired. 3982 * 3983 * Return: void 3984 */ 3985 static void build_dmub_cmd_list(struct dc *dc, 3986 struct dc_surface_update *srf_updates, 3987 int surface_count, 3988 struct dc_stream_state *stream, 3989 struct dc_state *context, 3990 struct dc_dmub_cmd dc_dmub_cmd[], 3991 unsigned int *dmub_cmd_count) 3992 { 3993 // Initialize cmd count to 0 3994 *dmub_cmd_count = 0; 3995 build_dmub_update_dirty_rect(dc, surface_count, stream, srf_updates, context, dc_dmub_cmd, dmub_cmd_count); 3996 } 3997 3998 static void commit_plane_for_stream_offload_fams2_flip(struct dc *dc, 3999 struct dc_surface_update *srf_updates, 4000 int surface_count, 4001 struct dc_stream_state *stream, 4002 struct dc_state *context) 4003 { 4004 int i, j; 4005 4006 /* update dirty rect for PSR */ 4007 dc_dmub_update_dirty_rect(dc, surface_count, stream, 4008 srf_updates, context); 4009 4010 /* Perform requested Updates */ 4011 for (i = 0; i < surface_count; i++) { 4012 struct dc_plane_state *plane_state = srf_updates[i].surface; 4013 4014 for (j = 0; j < dc->res_pool->pipe_count; j++) { 4015 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 4016 4017 if (!should_update_pipe_for_stream(context, pipe_ctx, stream)) 4018 continue; 4019 4020 if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state)) 4021 continue; 4022 4023 /* update pipe context for plane */ 4024 if (pipe_ctx->plane_state->update_flags.bits.addr_update) 4025 dc->hwss.update_plane_addr(dc, pipe_ctx); 4026 } 4027 } 4028 4029 /* Send commands to DMCUB */ 4030 dc_dmub_srv_fams2_passthrough_flip(dc, 4031 context, 4032 stream, 4033 srf_updates, 4034 surface_count); 4035 } 4036 4037 static void commit_planes_for_stream_fast(struct dc *dc, 4038 struct dc_surface_update *srf_updates, 4039 int surface_count, 4040 struct dc_stream_state *stream, 4041 struct dc_stream_update *stream_update, 4042 enum surface_update_type update_type, 4043 struct dc_state *context) 4044 { 4045 int i, j; 4046 struct pipe_ctx *top_pipe_to_program = NULL; 4047 struct dc_stream_status *stream_status = NULL; 4048 bool should_offload_fams2_flip = false; 4049 bool should_lock_all_pipes = (update_type != UPDATE_TYPE_FAST); 4050 4051 if (should_lock_all_pipes) 4052 determine_pipe_unlock_order(dc, context); 4053 4054 if (dc->debug.fams2_config.bits.enable && 4055 dc->debug.fams2_config.bits.enable_offload_flip && 4056 dc_state_is_fams2_in_use(dc, context)) { 4057 /* if not offloading to HWFQ, offload to FAMS2 if needed */ 4058 should_offload_fams2_flip = true; 4059 for (i = 0; i < surface_count; i++) { 4060 if (srf_updates[i].surface && 4061 srf_updates[i].surface->update_flags.raw && 4062 !check_address_only_update(srf_updates[i].surface->update_flags)) { 4063 /* more than address update, need to acquire FAMS2 lock */ 4064 should_offload_fams2_flip = false; 4065 break; 4066 } 4067 } 4068 if (stream_update) { 4069 /* more than address update, need to acquire FAMS2 lock */ 4070 should_offload_fams2_flip = false; 4071 } 4072 } 4073 4074 dc_exit_ips_for_hw_access(dc); 4075 4076 dc_z10_restore(dc); 4077 4078 top_pipe_to_program = resource_get_otg_master_for_stream( 4079 &context->res_ctx, 4080 stream); 4081 4082 if (!top_pipe_to_program) 4083 return; 4084 4085 for (i = 0; i < dc->res_pool->pipe_count; i++) { 4086 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 4087 4088 if (pipe->stream && pipe->plane_state) { 4089 if (!dc->debug.using_dml2) 4090 set_p_state_switch_method(dc, context, pipe); 4091 4092 if (dc->debug.visual_confirm) 4093 dc_update_visual_confirm_color(dc, context, pipe); 4094 } 4095 } 4096 4097 for (i = 0; i < surface_count; i++) { 4098 struct dc_plane_state *plane_state = srf_updates[i].surface; 4099 /*set logical flag for lock/unlock use*/ 4100 for (j = 0; j < dc->res_pool->pipe_count; j++) { 4101 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 4102 4103 if (!pipe_ctx->plane_state) 4104 continue; 4105 if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state)) 4106 continue; 4107 4108 pipe_ctx->plane_state->triplebuffer_flips = false; 4109 if (update_type == UPDATE_TYPE_FAST && 4110 dc->hwss.program_triplebuffer != NULL && 4111 !pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) { 4112 /*triple buffer for VUpdate only*/ 4113 pipe_ctx->plane_state->triplebuffer_flips = true; 4114 } 4115 } 4116 } 4117 4118 stream_status = dc_state_get_stream_status(context, stream); 4119 4120 if (should_offload_fams2_flip) { 4121 commit_plane_for_stream_offload_fams2_flip(dc, 4122 srf_updates, 4123 surface_count, 4124 stream, 4125 context); 4126 } else if (stream_status) { 4127 build_dmub_cmd_list(dc, 4128 srf_updates, 4129 surface_count, 4130 stream, 4131 context, 4132 context->dc_dmub_cmd, 4133 &(context->dmub_cmd_count)); 4134 hwss_build_fast_sequence(dc, 4135 context->dc_dmub_cmd, 4136 context->dmub_cmd_count, 4137 context->block_sequence, 4138 &(context->block_sequence_steps), 4139 top_pipe_to_program, 4140 stream_status, 4141 context); 4142 hwss_execute_sequence(dc, 4143 context->block_sequence, 4144 context->block_sequence_steps); 4145 } 4146 4147 /* Clear update flags so next flip doesn't have redundant programming 4148 * (if there's no stream update, the update flags are not cleared). 4149 * Surface updates are cleared unconditionally at the beginning of each flip, 4150 * so no need to clear here. 4151 */ 4152 if (top_pipe_to_program->stream) 4153 top_pipe_to_program->stream->update_flags.raw = 0; 4154 } 4155 4156 static void commit_planes_for_stream(struct dc *dc, 4157 const struct dc_surface_update *srf_updates, 4158 int surface_count, 4159 struct dc_stream_state *stream, 4160 struct dc_stream_update *stream_update, 4161 enum surface_update_type update_type, 4162 struct dc_state *context) 4163 { 4164 int i, j; 4165 struct pipe_ctx *top_pipe_to_program = NULL; 4166 bool should_lock_all_pipes = (update_type != UPDATE_TYPE_FAST); 4167 bool subvp_prev_use = false; 4168 bool subvp_curr_use = false; 4169 uint8_t current_stream_mask = 0; 4170 4171 if (should_lock_all_pipes) 4172 determine_pipe_unlock_order(dc, context); 4173 // Once we apply the new subvp context to hardware it won't be in the 4174 // dc->current_state anymore, so we have to cache it before we apply 4175 // the new SubVP context 4176 subvp_prev_use = false; 4177 dc_exit_ips_for_hw_access(dc); 4178 4179 dc_z10_restore(dc); 4180 if (update_type == UPDATE_TYPE_FULL && dc->optimized_required) 4181 hwss_process_outstanding_hw_updates(dc, dc->current_state); 4182 4183 if (update_type != UPDATE_TYPE_FAST && dc->res_pool->funcs->prepare_mcache_programming) 4184 dc->res_pool->funcs->prepare_mcache_programming(dc, context); 4185 4186 for (i = 0; i < dc->res_pool->pipe_count; i++) { 4187 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 4188 4189 if (pipe->stream && pipe->plane_state) { 4190 if (!dc->debug.using_dml2) 4191 set_p_state_switch_method(dc, context, pipe); 4192 4193 if (dc->debug.visual_confirm) 4194 dc_update_visual_confirm_color(dc, context, pipe); 4195 } 4196 } 4197 4198 if (update_type == UPDATE_TYPE_FULL) { 4199 dc_allow_idle_optimizations(dc, false); 4200 4201 if (get_seamless_boot_stream_count(context) == 0) 4202 dc->hwss.prepare_bandwidth(dc, context); 4203 4204 if (dc->hwss.update_dsc_pg) 4205 dc->hwss.update_dsc_pg(dc, context, false); 4206 4207 context_clock_trace(dc, context); 4208 } 4209 4210 if (update_type == UPDATE_TYPE_FULL) 4211 hwss_wait_for_outstanding_hw_updates(dc, dc->current_state); 4212 4213 top_pipe_to_program = resource_get_otg_master_for_stream( 4214 &context->res_ctx, 4215 stream); 4216 ASSERT(top_pipe_to_program != NULL); 4217 4218 for (i = 0; i < dc->res_pool->pipe_count; i++) { 4219 struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 4220 4221 // Check old context for SubVP 4222 subvp_prev_use |= (dc_state_get_pipe_subvp_type(dc->current_state, old_pipe) == SUBVP_PHANTOM); 4223 if (subvp_prev_use) 4224 break; 4225 } 4226 4227 for (i = 0; i < dc->res_pool->pipe_count; i++) { 4228 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 4229 4230 if (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) { 4231 subvp_curr_use = true; 4232 break; 4233 } 4234 } 4235 4236 if (stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) { 4237 struct pipe_ctx *mpcc_pipe; 4238 struct pipe_ctx *odm_pipe; 4239 4240 for (mpcc_pipe = top_pipe_to_program; mpcc_pipe; mpcc_pipe = mpcc_pipe->bottom_pipe) 4241 for (odm_pipe = mpcc_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) 4242 odm_pipe->ttu_regs.min_ttu_vblank = MAX_TTU; 4243 } 4244 4245 if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed) 4246 if (top_pipe_to_program && 4247 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) { 4248 if (should_use_dmub_inbox1_lock(dc, stream->link)) { 4249 union dmub_hw_lock_flags hw_locks = { 0 }; 4250 struct dmub_hw_lock_inst_flags inst_flags = { 0 }; 4251 4252 hw_locks.bits.lock_dig = 1; 4253 inst_flags.dig_inst = top_pipe_to_program->stream_res.tg->inst; 4254 4255 dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv, 4256 true, 4257 &hw_locks, 4258 &inst_flags); 4259 } else 4260 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable( 4261 top_pipe_to_program->stream_res.tg); 4262 } 4263 4264 if (dc->hwss.wait_for_dcc_meta_propagation) { 4265 dc->hwss.wait_for_dcc_meta_propagation(dc, top_pipe_to_program); 4266 } 4267 4268 if (dc->hwseq->funcs.wait_for_pipe_update_if_needed) 4269 dc->hwseq->funcs.wait_for_pipe_update_if_needed(dc, top_pipe_to_program, update_type < UPDATE_TYPE_FULL); 4270 4271 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) { 4272 if (dc->hwss.subvp_pipe_control_lock) 4273 dc->hwss.subvp_pipe_control_lock(dc, context, true, should_lock_all_pipes, NULL, subvp_prev_use); 4274 4275 if (dc->hwss.dmub_hw_control_lock) 4276 dc->hwss.dmub_hw_control_lock(dc, context, true); 4277 4278 dc->hwss.interdependent_update_lock(dc, context, true); 4279 } else { 4280 if (dc->hwss.subvp_pipe_control_lock) 4281 dc->hwss.subvp_pipe_control_lock(dc, context, true, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use); 4282 4283 if (dc->hwss.dmub_hw_control_lock) 4284 dc->hwss.dmub_hw_control_lock(dc, context, true); 4285 4286 /* Lock the top pipe while updating plane addrs, since freesync requires 4287 * plane addr update event triggers to be synchronized. 4288 * top_pipe_to_program is expected to never be NULL 4289 */ 4290 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true); 4291 } 4292 4293 dc_dmub_update_dirty_rect(dc, surface_count, stream, srf_updates, context); 4294 4295 // Stream updates 4296 if (stream_update) 4297 commit_planes_do_stream_update(dc, stream, stream_update, update_type, context); 4298 4299 if (surface_count == 0) { 4300 /* 4301 * In case of turning off screen, no need to program front end a second time. 4302 * just return after program blank. 4303 */ 4304 if (dc->hwss.apply_ctx_for_surface) 4305 dc->hwss.apply_ctx_for_surface(dc, stream, 0, context); 4306 if (dc->hwss.program_front_end_for_ctx) 4307 dc->hwss.program_front_end_for_ctx(dc, context); 4308 4309 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) { 4310 dc->hwss.interdependent_update_lock(dc, context, false); 4311 } else { 4312 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false); 4313 } 4314 dc->hwss.post_unlock_program_front_end(dc, context); 4315 4316 if (update_type != UPDATE_TYPE_FAST) 4317 if (dc->hwss.commit_subvp_config) 4318 dc->hwss.commit_subvp_config(dc, context); 4319 4320 /* Since phantom pipe programming is moved to post_unlock_program_front_end, 4321 * move the SubVP lock to after the phantom pipes have been setup 4322 */ 4323 if (dc->hwss.subvp_pipe_control_lock) 4324 dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, 4325 NULL, subvp_prev_use); 4326 4327 if (dc->hwss.dmub_hw_control_lock) 4328 dc->hwss.dmub_hw_control_lock(dc, context, false); 4329 return; 4330 } 4331 4332 if (update_type != UPDATE_TYPE_FAST) { 4333 for (j = 0; j < dc->res_pool->pipe_count; j++) { 4334 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 4335 4336 if ((dc->debug.visual_confirm == VISUAL_CONFIRM_SUBVP || 4337 dc->debug.visual_confirm == VISUAL_CONFIRM_MCLK_SWITCH) && 4338 pipe_ctx->stream && pipe_ctx->plane_state) { 4339 /* Only update visual confirm for SUBVP and Mclk switching here. 4340 * The bar appears on all pipes, so we need to update the bar on all displays, 4341 * so the information doesn't get stale. 4342 */ 4343 dc->hwss.update_visual_confirm_color(dc, pipe_ctx, 4344 pipe_ctx->plane_res.hubp->inst); 4345 } 4346 } 4347 } 4348 4349 for (i = 0; i < surface_count; i++) { 4350 struct dc_plane_state *plane_state = srf_updates[i].surface; 4351 4352 /*set logical flag for lock/unlock use*/ 4353 for (j = 0; j < dc->res_pool->pipe_count; j++) { 4354 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 4355 if (!pipe_ctx->plane_state) 4356 continue; 4357 if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state)) 4358 continue; 4359 pipe_ctx->plane_state->triplebuffer_flips = false; 4360 if (update_type == UPDATE_TYPE_FAST && 4361 dc->hwss.program_triplebuffer != NULL && 4362 !pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) { 4363 /*triple buffer for VUpdate only*/ 4364 pipe_ctx->plane_state->triplebuffer_flips = true; 4365 } 4366 } 4367 if (update_type == UPDATE_TYPE_FULL) { 4368 /* force vsync flip when reconfiguring pipes to prevent underflow */ 4369 plane_state->flip_immediate = false; 4370 plane_state->triplebuffer_flips = false; 4371 } 4372 } 4373 4374 // Update Type FULL, Surface updates 4375 for (j = 0; j < dc->res_pool->pipe_count; j++) { 4376 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 4377 4378 if (!pipe_ctx->top_pipe && 4379 !pipe_ctx->prev_odm_pipe && 4380 should_update_pipe_for_stream(context, pipe_ctx, stream)) { 4381 struct dc_stream_status *stream_status = NULL; 4382 4383 if (!pipe_ctx->plane_state) 4384 continue; 4385 4386 /* Full fe update*/ 4387 if (update_type == UPDATE_TYPE_FAST) 4388 continue; 4389 4390 stream_status = 4391 stream_get_status(context, pipe_ctx->stream); 4392 4393 if (dc->hwss.apply_ctx_for_surface && stream_status) 4394 dc->hwss.apply_ctx_for_surface( 4395 dc, pipe_ctx->stream, stream_status->plane_count, context); 4396 } 4397 } 4398 4399 for (j = 0; j < dc->res_pool->pipe_count; j++) { 4400 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 4401 4402 if (!pipe_ctx->plane_state) 4403 continue; 4404 4405 /* Full fe update*/ 4406 if (update_type == UPDATE_TYPE_FAST) 4407 continue; 4408 4409 ASSERT(!pipe_ctx->plane_state->triplebuffer_flips); 4410 if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) { 4411 /*turn off triple buffer for full update*/ 4412 dc->hwss.program_triplebuffer( 4413 dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips); 4414 } 4415 } 4416 4417 if (dc->hwss.program_front_end_for_ctx && update_type != UPDATE_TYPE_FAST) { 4418 dc->hwss.program_front_end_for_ctx(dc, context); 4419 4420 //Pipe busy until some frame and line # 4421 if (dc->hwseq->funcs.set_wait_for_update_needed_for_pipe && update_type == UPDATE_TYPE_FULL) { 4422 for (j = 0; j < dc->res_pool->pipe_count; j++) { 4423 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 4424 4425 dc->hwseq->funcs.set_wait_for_update_needed_for_pipe(dc, pipe_ctx); 4426 } 4427 } 4428 4429 if (dc->debug.validate_dml_output) { 4430 for (i = 0; i < dc->res_pool->pipe_count; i++) { 4431 struct pipe_ctx *cur_pipe = &context->res_ctx.pipe_ctx[i]; 4432 if (cur_pipe->stream == NULL) 4433 continue; 4434 4435 cur_pipe->plane_res.hubp->funcs->validate_dml_output( 4436 cur_pipe->plane_res.hubp, dc->ctx, 4437 &context->res_ctx.pipe_ctx[i].rq_regs, 4438 &context->res_ctx.pipe_ctx[i].dlg_regs, 4439 &context->res_ctx.pipe_ctx[i].ttu_regs); 4440 } 4441 } 4442 } 4443 4444 // Update Type FAST, Surface updates 4445 if (update_type == UPDATE_TYPE_FAST) { 4446 if (dc->hwss.set_flip_control_gsl) 4447 for (i = 0; i < surface_count; i++) { 4448 struct dc_plane_state *plane_state = srf_updates[i].surface; 4449 4450 for (j = 0; j < dc->res_pool->pipe_count; j++) { 4451 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 4452 4453 if (!should_update_pipe_for_stream(context, pipe_ctx, stream)) 4454 continue; 4455 4456 if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state)) 4457 continue; 4458 4459 // GSL has to be used for flip immediate 4460 dc->hwss.set_flip_control_gsl(pipe_ctx, 4461 pipe_ctx->plane_state->flip_immediate); 4462 } 4463 } 4464 4465 /* Perform requested Updates */ 4466 for (i = 0; i < surface_count; i++) { 4467 struct dc_plane_state *plane_state = srf_updates[i].surface; 4468 4469 for (j = 0; j < dc->res_pool->pipe_count; j++) { 4470 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 4471 4472 if (!should_update_pipe_for_stream(context, pipe_ctx, stream)) 4473 continue; 4474 4475 if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state)) 4476 continue; 4477 4478 if (srf_updates[i].cm2_params && 4479 srf_updates[i].cm2_params->cm2_luts.lut3d_data.lut3d_src == 4480 DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM && 4481 srf_updates[i].cm2_params->component_settings.shaper_3dlut_setting == 4482 DC_CM2_SHAPER_3DLUT_SETTING_ENABLE_SHAPER_3DLUT && 4483 dc->hwss.trigger_3dlut_dma_load) 4484 dc->hwss.trigger_3dlut_dma_load(dc, pipe_ctx); 4485 4486 /*program triple buffer after lock based on flip type*/ 4487 if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) { 4488 /*only enable triplebuffer for fast_update*/ 4489 dc->hwss.program_triplebuffer( 4490 dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips); 4491 } 4492 if (pipe_ctx->plane_state->update_flags.bits.addr_update) 4493 dc->hwss.update_plane_addr(dc, pipe_ctx); 4494 } 4495 } 4496 } 4497 4498 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) { 4499 dc->hwss.interdependent_update_lock(dc, context, false); 4500 } else { 4501 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false); 4502 } 4503 4504 if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed) 4505 if (top_pipe_to_program && 4506 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) { 4507 top_pipe_to_program->stream_res.tg->funcs->wait_for_state( 4508 top_pipe_to_program->stream_res.tg, 4509 CRTC_STATE_VACTIVE); 4510 top_pipe_to_program->stream_res.tg->funcs->wait_for_state( 4511 top_pipe_to_program->stream_res.tg, 4512 CRTC_STATE_VBLANK); 4513 top_pipe_to_program->stream_res.tg->funcs->wait_for_state( 4514 top_pipe_to_program->stream_res.tg, 4515 CRTC_STATE_VACTIVE); 4516 4517 if (should_use_dmub_inbox1_lock(dc, stream->link)) { 4518 union dmub_hw_lock_flags hw_locks = { 0 }; 4519 struct dmub_hw_lock_inst_flags inst_flags = { 0 }; 4520 4521 hw_locks.bits.lock_dig = 1; 4522 inst_flags.dig_inst = top_pipe_to_program->stream_res.tg->inst; 4523 4524 dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv, 4525 false, 4526 &hw_locks, 4527 &inst_flags); 4528 } else 4529 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_disable( 4530 top_pipe_to_program->stream_res.tg); 4531 } 4532 4533 if (subvp_curr_use) { 4534 /* If enabling subvp or transitioning from subvp->subvp, enable the 4535 * phantom streams before we program front end for the phantom pipes. 4536 */ 4537 if (update_type != UPDATE_TYPE_FAST) { 4538 if (dc->hwss.enable_phantom_streams) 4539 dc->hwss.enable_phantom_streams(dc, context); 4540 } 4541 } 4542 4543 if (update_type != UPDATE_TYPE_FAST) 4544 dc->hwss.post_unlock_program_front_end(dc, context); 4545 4546 if (subvp_prev_use && !subvp_curr_use) { 4547 /* If disabling subvp, disable phantom streams after front end 4548 * programming has completed (we turn on phantom OTG in order 4549 * to complete the plane disable for phantom pipes). 4550 */ 4551 4552 if (dc->hwss.disable_phantom_streams) 4553 dc->hwss.disable_phantom_streams(dc, context); 4554 } 4555 4556 if (update_type != UPDATE_TYPE_FAST) 4557 if (dc->hwss.commit_subvp_config) 4558 dc->hwss.commit_subvp_config(dc, context); 4559 /* Since phantom pipe programming is moved to post_unlock_program_front_end, 4560 * move the SubVP lock to after the phantom pipes have been setup 4561 */ 4562 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) { 4563 if (dc->hwss.subvp_pipe_control_lock) 4564 dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use); 4565 if (dc->hwss.dmub_hw_control_lock) 4566 dc->hwss.dmub_hw_control_lock(dc, context, false); 4567 } else { 4568 if (dc->hwss.subvp_pipe_control_lock) 4569 dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use); 4570 if (dc->hwss.dmub_hw_control_lock) 4571 dc->hwss.dmub_hw_control_lock(dc, context, false); 4572 } 4573 4574 // Fire manual trigger only when bottom plane is flipped 4575 for (j = 0; j < dc->res_pool->pipe_count; j++) { 4576 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 4577 4578 if (!pipe_ctx->plane_state) 4579 continue; 4580 4581 if (pipe_ctx->bottom_pipe || pipe_ctx->next_odm_pipe || 4582 !pipe_ctx->stream || !should_update_pipe_for_stream(context, pipe_ctx, stream) || 4583 !pipe_ctx->plane_state->update_flags.bits.addr_update || 4584 pipe_ctx->plane_state->skip_manual_trigger) 4585 continue; 4586 4587 if (dc->hwss.program_cursor_offload_now) 4588 dc->hwss.program_cursor_offload_now(dc, pipe_ctx); 4589 if (pipe_ctx->stream_res.tg->funcs->program_manual_trigger) 4590 pipe_ctx->stream_res.tg->funcs->program_manual_trigger(pipe_ctx->stream_res.tg); 4591 } 4592 4593 current_stream_mask = get_stream_mask(dc, context); 4594 if (current_stream_mask != context->stream_mask) { 4595 context->stream_mask = current_stream_mask; 4596 dc_dmub_srv_notify_stream_mask(dc->ctx->dmub_srv, current_stream_mask); 4597 } 4598 } 4599 4600 /** 4601 * could_mpcc_tree_change_for_active_pipes - Check if an OPP associated with MPCC might change 4602 * 4603 * @dc: Used to get the current state status 4604 * @stream: Target stream, which we want to remove the attached planes 4605 * @srf_updates: Array of surface updates 4606 * @surface_count: Number of surface update 4607 * @is_plane_addition: [in] Fill out with true if it is a plane addition case 4608 * 4609 * DCN32x and newer support a feature named Dynamic ODM which can conflict with 4610 * the MPO if used simultaneously in some specific configurations (e.g., 4611 * 4k@144). This function checks if the incoming context requires applying a 4612 * transition state with unnecessary pipe splitting and ODM disabled to 4613 * circumvent our hardware limitations to prevent this edge case. If the OPP 4614 * associated with an MPCC might change due to plane additions, this function 4615 * returns true. 4616 * 4617 * Return: 4618 * Return true if OPP and MPCC might change, otherwise, return false. 4619 */ 4620 static bool could_mpcc_tree_change_for_active_pipes(struct dc *dc, 4621 struct dc_stream_state *stream, 4622 struct dc_surface_update *srf_updates, 4623 int surface_count, 4624 bool *is_plane_addition) 4625 { 4626 4627 struct dc_stream_status *cur_stream_status = stream_get_status(dc->current_state, stream); 4628 bool force_minimal_pipe_splitting = false; 4629 bool subvp_active = false; 4630 uint32_t i; 4631 4632 *is_plane_addition = false; 4633 4634 if (cur_stream_status && 4635 dc->current_state->stream_count > 0 && 4636 dc->debug.pipe_split_policy != MPC_SPLIT_AVOID) { 4637 /* determine if minimal transition is required due to MPC*/ 4638 if (surface_count > 0) { 4639 if (cur_stream_status->plane_count > surface_count) { 4640 force_minimal_pipe_splitting = true; 4641 } else if (cur_stream_status->plane_count < surface_count) { 4642 force_minimal_pipe_splitting = true; 4643 *is_plane_addition = true; 4644 } 4645 } 4646 } 4647 4648 if (cur_stream_status && 4649 dc->current_state->stream_count == 1 && 4650 dc->debug.enable_single_display_2to1_odm_policy) { 4651 /* determine if minimal transition is required due to dynamic ODM*/ 4652 if (surface_count > 0) { 4653 if (cur_stream_status->plane_count > 2 && cur_stream_status->plane_count > surface_count) { 4654 force_minimal_pipe_splitting = true; 4655 } else if (surface_count > 2 && cur_stream_status->plane_count < surface_count) { 4656 force_minimal_pipe_splitting = true; 4657 *is_plane_addition = true; 4658 } 4659 } 4660 } 4661 4662 for (i = 0; i < dc->res_pool->pipe_count; i++) { 4663 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 4664 4665 if (dc_state_get_pipe_subvp_type(dc->current_state, pipe) != SUBVP_NONE) { 4666 subvp_active = true; 4667 break; 4668 } 4669 } 4670 4671 /* For SubVP when adding or removing planes we need to add a minimal transition 4672 * (even when disabling all planes). Whenever disabling a phantom pipe, we 4673 * must use the minimal transition path to disable the pipe correctly. 4674 * 4675 * We want to use the minimal transition whenever subvp is active, not only if 4676 * a plane is being added / removed from a subvp stream (MPO plane can be added 4677 * to a DRR pipe of SubVP + DRR config, in which case we still want to run through 4678 * a min transition to disable subvp. 4679 */ 4680 if (cur_stream_status && subvp_active) { 4681 /* determine if minimal transition is required due to SubVP*/ 4682 if (cur_stream_status->plane_count > surface_count) { 4683 force_minimal_pipe_splitting = true; 4684 } else if (cur_stream_status->plane_count < surface_count) { 4685 force_minimal_pipe_splitting = true; 4686 *is_plane_addition = true; 4687 } 4688 } 4689 4690 return force_minimal_pipe_splitting; 4691 } 4692 4693 4694 static void release_minimal_transition_state(struct dc *dc, 4695 struct dc_state *minimal_transition_context, 4696 struct dc_state *base_context, 4697 struct pipe_split_policy_backup *policy) 4698 { 4699 restore_minimal_pipe_split_policy(dc, base_context, policy); 4700 dc_state_release(minimal_transition_context); 4701 } 4702 4703 static void force_vsync_flip_in_minimal_transition_context(struct dc_state *context) 4704 { 4705 uint8_t i; 4706 int j; 4707 struct dc_stream_status *stream_status; 4708 4709 for (i = 0; i < context->stream_count; i++) { 4710 stream_status = &context->stream_status[i]; 4711 4712 for (j = 0; j < stream_status->plane_count; j++) 4713 stream_status->plane_states[j]->flip_immediate = false; 4714 } 4715 } 4716 4717 static struct dc_state *create_minimal_transition_state(struct dc *dc, 4718 struct dc_state *base_context, struct pipe_split_policy_backup *policy) 4719 { 4720 struct dc_state *minimal_transition_context = NULL; 4721 4722 minimal_transition_context = dc_state_create_copy(base_context); 4723 if (!minimal_transition_context) 4724 return NULL; 4725 4726 backup_and_set_minimal_pipe_split_policy(dc, base_context, policy); 4727 /* commit minimal state */ 4728 if (dc->res_pool->funcs->validate_bandwidth(dc, minimal_transition_context, 4729 DC_VALIDATE_MODE_AND_PROGRAMMING) == DC_OK) { 4730 /* prevent underflow and corruption when reconfiguring pipes */ 4731 force_vsync_flip_in_minimal_transition_context(minimal_transition_context); 4732 } else { 4733 /* 4734 * This should never happen, minimal transition state should 4735 * always be validated first before adding pipe split features. 4736 */ 4737 release_minimal_transition_state(dc, minimal_transition_context, base_context, policy); 4738 BREAK_TO_DEBUGGER(); 4739 minimal_transition_context = NULL; 4740 } 4741 return minimal_transition_context; 4742 } 4743 4744 static bool is_pipe_topology_transition_seamless_with_intermediate_step( 4745 struct dc *dc, 4746 struct dc_state *initial_state, 4747 struct dc_state *intermediate_state, 4748 struct dc_state *final_state) 4749 { 4750 return dc->hwss.is_pipe_topology_transition_seamless(dc, initial_state, 4751 intermediate_state) && 4752 dc->hwss.is_pipe_topology_transition_seamless(dc, 4753 intermediate_state, final_state); 4754 } 4755 4756 static void swap_and_release_current_context(struct dc *dc, 4757 struct dc_state *new_context, struct dc_stream_state *stream) 4758 { 4759 4760 int i; 4761 struct dc_state *old = dc->current_state; 4762 struct pipe_ctx *pipe_ctx; 4763 4764 /* Since memory free requires elevated IRQ, an interrupt 4765 * request is generated by mem free. If this happens 4766 * between freeing and reassigning the context, our vsync 4767 * interrupt will call into dc and cause a memory 4768 * corruption. Hence, we first reassign the context, 4769 * then free the old context. 4770 */ 4771 dc->current_state = new_context; 4772 dc_state_release(old); 4773 4774 // clear any forced full updates 4775 for (i = 0; i < dc->res_pool->pipe_count; i++) { 4776 pipe_ctx = &new_context->res_ctx.pipe_ctx[i]; 4777 4778 if (pipe_ctx->plane_state && pipe_ctx->stream == stream) 4779 pipe_ctx->plane_state->force_full_update = false; 4780 } 4781 } 4782 4783 static int initialize_empty_surface_updates( 4784 struct dc_stream_state *stream, 4785 struct dc_surface_update *srf_updates) 4786 { 4787 struct dc_stream_status *status = dc_stream_get_status(stream); 4788 int i; 4789 4790 if (!status) 4791 return 0; 4792 4793 for (i = 0; i < status->plane_count; i++) 4794 srf_updates[i].surface = status->plane_states[i]; 4795 4796 return status->plane_count; 4797 } 4798 4799 static bool commit_minimal_transition_based_on_new_context(struct dc *dc, 4800 struct dc_state *new_context, 4801 struct dc_stream_state *stream, 4802 struct dc_stream_update *stream_update, 4803 struct dc_surface_update *srf_updates, 4804 int surface_count) 4805 { 4806 bool success = false; 4807 struct pipe_split_policy_backup policy; 4808 struct dc_state *intermediate_context = 4809 create_minimal_transition_state(dc, new_context, 4810 &policy); 4811 4812 if (intermediate_context) { 4813 if (is_pipe_topology_transition_seamless_with_intermediate_step( 4814 dc, 4815 dc->current_state, 4816 intermediate_context, 4817 new_context)) { 4818 DC_LOG_DC("commit minimal transition state: base = new state\n"); 4819 commit_planes_for_stream(dc, srf_updates, 4820 surface_count, stream, stream_update, 4821 UPDATE_TYPE_FULL, intermediate_context); 4822 swap_and_release_current_context( 4823 dc, intermediate_context, stream); 4824 dc_state_retain(dc->current_state); 4825 success = true; 4826 } 4827 release_minimal_transition_state( 4828 dc, intermediate_context, new_context, &policy); 4829 } 4830 return success; 4831 } 4832 4833 static bool commit_minimal_transition_based_on_current_context(struct dc *dc, 4834 struct dc_state *new_context, struct dc_stream_state *stream) 4835 { 4836 bool success = false; 4837 struct pipe_split_policy_backup policy; 4838 struct dc_state *intermediate_context; 4839 struct dc_state *old_current_state = dc->current_state; 4840 struct dc_surface_update srf_updates[MAX_SURFACES] = {0}; 4841 int surface_count; 4842 4843 /* 4844 * Both current and new contexts share the same stream and plane state 4845 * pointers. When new context is validated, stream and planes get 4846 * populated with new updates such as new plane addresses. This makes 4847 * the current context no longer valid because stream and planes are 4848 * modified from the original. We backup current stream and plane states 4849 * into scratch space whenever we are populating new context. So we can 4850 * restore the original values back by calling the restore function now. 4851 * This restores back the original stream and plane states associated 4852 * with the current state. 4853 */ 4854 restore_planes_and_stream_state(&dc->scratch.current_state, stream); 4855 dc_state_retain(old_current_state); 4856 intermediate_context = create_minimal_transition_state(dc, 4857 old_current_state, &policy); 4858 4859 if (intermediate_context) { 4860 if (is_pipe_topology_transition_seamless_with_intermediate_step( 4861 dc, 4862 dc->current_state, 4863 intermediate_context, 4864 new_context)) { 4865 DC_LOG_DC("commit minimal transition state: base = current state\n"); 4866 surface_count = initialize_empty_surface_updates( 4867 stream, srf_updates); 4868 commit_planes_for_stream(dc, srf_updates, 4869 surface_count, stream, NULL, 4870 UPDATE_TYPE_FULL, intermediate_context); 4871 swap_and_release_current_context( 4872 dc, intermediate_context, stream); 4873 dc_state_retain(dc->current_state); 4874 success = true; 4875 } 4876 release_minimal_transition_state(dc, intermediate_context, 4877 old_current_state, &policy); 4878 } 4879 dc_state_release(old_current_state); 4880 /* 4881 * Restore stream and plane states back to the values associated with 4882 * new context. 4883 */ 4884 restore_planes_and_stream_state(&dc->scratch.new_state, stream); 4885 return success; 4886 } 4887 4888 /** 4889 * commit_minimal_transition_state_in_dc_update - Commit a minimal state based 4890 * on current or new context 4891 * 4892 * @dc: DC structure, used to get the current state 4893 * @new_context: New context 4894 * @stream: Stream getting the update for the flip 4895 * @srf_updates: Surface updates 4896 * @surface_count: Number of surfaces 4897 * 4898 * The function takes in current state and new state and determine a minimal 4899 * transition state as the intermediate step which could make the transition 4900 * between current and new states seamless. If found, it will commit the minimal 4901 * transition state and update current state to this minimal transition state 4902 * and return true, if not, it will return false. 4903 * 4904 * Return: 4905 * Return True if the minimal transition succeeded, false otherwise 4906 */ 4907 static bool commit_minimal_transition_state_in_dc_update(struct dc *dc, 4908 struct dc_state *new_context, 4909 struct dc_stream_state *stream, 4910 struct dc_surface_update *srf_updates, 4911 int surface_count) 4912 { 4913 bool success = commit_minimal_transition_based_on_new_context( 4914 dc, new_context, stream, NULL, 4915 srf_updates, surface_count); 4916 if (!success) 4917 success = commit_minimal_transition_based_on_current_context(dc, 4918 new_context, stream); 4919 if (!success) 4920 DC_LOG_ERROR("Fail to commit a seamless minimal transition state between current and new states.\nThis pipe topology update is non-seamless!\n"); 4921 return success; 4922 } 4923 4924 /** 4925 * commit_minimal_transition_state - Create a transition pipe split state 4926 * 4927 * @dc: Used to get the current state status 4928 * @transition_base_context: New transition state 4929 * 4930 * In some specific configurations, such as pipe split on multi-display with 4931 * MPO and/or Dynamic ODM, removing a plane may cause unsupported pipe 4932 * programming when moving to new planes. To mitigate those types of problems, 4933 * this function adds a transition state that minimizes pipe usage before 4934 * programming the new configuration. When adding a new plane, the current 4935 * state requires the least pipes, so it is applied without splitting. When 4936 * removing a plane, the new state requires the least pipes, so it is applied 4937 * without splitting. 4938 * 4939 * Return: 4940 * Return false if something is wrong in the transition state. 4941 */ 4942 static bool commit_minimal_transition_state(struct dc *dc, 4943 struct dc_state *transition_base_context) 4944 { 4945 struct dc_state *transition_context; 4946 struct pipe_split_policy_backup policy; 4947 enum dc_status ret = DC_ERROR_UNEXPECTED; 4948 unsigned int i, j; 4949 unsigned int pipe_in_use = 0; 4950 bool subvp_in_use = false; 4951 bool odm_in_use = false; 4952 4953 /* check current pipes in use*/ 4954 for (i = 0; i < dc->res_pool->pipe_count; i++) { 4955 struct pipe_ctx *pipe = &transition_base_context->res_ctx.pipe_ctx[i]; 4956 4957 if (pipe->plane_state) 4958 pipe_in_use++; 4959 } 4960 4961 /* If SubVP is enabled and we are adding or removing planes from any main subvp 4962 * pipe, we must use the minimal transition. 4963 */ 4964 for (i = 0; i < dc->res_pool->pipe_count; i++) { 4965 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 4966 4967 if (pipe->stream && dc_state_get_pipe_subvp_type(dc->current_state, pipe) == SUBVP_PHANTOM) { 4968 subvp_in_use = true; 4969 break; 4970 } 4971 } 4972 4973 /* If ODM is enabled and we are adding or removing planes from any ODM 4974 * pipe, we must use the minimal transition. 4975 */ 4976 for (i = 0; i < dc->res_pool->pipe_count; i++) { 4977 struct pipe_ctx *pipe = &transition_base_context->res_ctx.pipe_ctx[i]; 4978 4979 if (resource_is_pipe_type(pipe, OTG_MASTER)) { 4980 odm_in_use = resource_get_odm_slice_count(pipe) > 1; 4981 break; 4982 } 4983 } 4984 4985 /* When the OS add a new surface if we have been used all of pipes with odm combine 4986 * and mpc split feature, it need use commit_minimal_transition_state to transition safely. 4987 * After OS exit MPO, it will back to use odm and mpc split with all of pipes, we need 4988 * call it again. Otherwise return true to skip. 4989 * 4990 * Reduce the scenarios to use dc_commit_state_no_check in the stage of flip. Especially 4991 * enter/exit MPO when DCN still have enough resources. 4992 */ 4993 if (pipe_in_use != dc->res_pool->pipe_count && !subvp_in_use && !odm_in_use) 4994 return true; 4995 4996 DC_LOG_DC("%s base = %s state, reason = %s\n", __func__, 4997 dc->current_state == transition_base_context ? "current" : "new", 4998 subvp_in_use ? "Subvp In Use" : 4999 odm_in_use ? "ODM in Use" : 5000 dc->debug.pipe_split_policy != MPC_SPLIT_AVOID ? "MPC in Use" : 5001 "Unknown"); 5002 5003 dc_state_retain(transition_base_context); 5004 transition_context = create_minimal_transition_state(dc, 5005 transition_base_context, &policy); 5006 if (transition_context) { 5007 ret = dc_commit_state_no_check(dc, transition_context); 5008 release_minimal_transition_state(dc, transition_context, transition_base_context, &policy); 5009 } 5010 dc_state_release(transition_base_context); 5011 5012 if (ret != DC_OK) { 5013 /* this should never happen */ 5014 BREAK_TO_DEBUGGER(); 5015 return false; 5016 } 5017 5018 /* force full surface update */ 5019 for (i = 0; i < dc->current_state->stream_count; i++) { 5020 for (j = 0; j < dc->current_state->stream_status[i].plane_count; j++) { 5021 dc->current_state->stream_status[i].plane_states[j]->update_flags.raw = 0xFFFFFFFF; 5022 } 5023 } 5024 5025 return true; 5026 } 5027 5028 void populate_fast_updates(struct dc_fast_update *fast_update, 5029 struct dc_surface_update *srf_updates, 5030 int surface_count, 5031 struct dc_stream_update *stream_update) 5032 { 5033 int i = 0; 5034 5035 if (stream_update) { 5036 fast_update[0].out_transfer_func = stream_update->out_transfer_func; 5037 fast_update[0].output_csc_transform = stream_update->output_csc_transform; 5038 } else { 5039 fast_update[0].out_transfer_func = NULL; 5040 fast_update[0].output_csc_transform = NULL; 5041 } 5042 5043 for (i = 0; i < surface_count; i++) { 5044 fast_update[i].flip_addr = srf_updates[i].flip_addr; 5045 fast_update[i].gamma = srf_updates[i].gamma; 5046 fast_update[i].gamut_remap_matrix = srf_updates[i].gamut_remap_matrix; 5047 fast_update[i].input_csc_color_matrix = srf_updates[i].input_csc_color_matrix; 5048 fast_update[i].coeff_reduction_factor = srf_updates[i].coeff_reduction_factor; 5049 fast_update[i].cursor_csc_color_matrix = srf_updates[i].cursor_csc_color_matrix; 5050 } 5051 } 5052 5053 static bool fast_updates_exist(const struct dc_fast_update *fast_update, int surface_count) 5054 { 5055 int i; 5056 5057 if (fast_update[0].out_transfer_func || 5058 fast_update[0].output_csc_transform) 5059 return true; 5060 5061 for (i = 0; i < surface_count; i++) { 5062 if (fast_update[i].flip_addr || 5063 fast_update[i].gamma || 5064 fast_update[i].gamut_remap_matrix || 5065 fast_update[i].input_csc_color_matrix || 5066 fast_update[i].cursor_csc_color_matrix || 5067 fast_update[i].coeff_reduction_factor) 5068 return true; 5069 } 5070 5071 return false; 5072 } 5073 5074 bool fast_nonaddr_updates_exist(struct dc_fast_update *fast_update, int surface_count) 5075 { 5076 int i; 5077 5078 if (fast_update[0].out_transfer_func || 5079 fast_update[0].output_csc_transform) 5080 return true; 5081 5082 for (i = 0; i < surface_count; i++) { 5083 if (fast_update[i].input_csc_color_matrix || 5084 fast_update[i].gamma || 5085 fast_update[i].gamut_remap_matrix || 5086 fast_update[i].coeff_reduction_factor || 5087 fast_update[i].cursor_csc_color_matrix) 5088 return true; 5089 } 5090 5091 return false; 5092 } 5093 5094 static bool full_update_required_weak( 5095 const struct dc *dc, 5096 const struct dc_surface_update *srf_updates, 5097 int surface_count, 5098 const struct dc_stream_update *stream_update, 5099 const struct dc_stream_state *stream) 5100 { 5101 const struct dc_state *context = dc->current_state; 5102 if (srf_updates) 5103 for (int i = 0; i < surface_count; i++) 5104 if (!is_surface_in_context(context, srf_updates[i].surface)) 5105 return true; 5106 5107 if (stream) { 5108 const struct dc_stream_status *stream_status = dc_stream_get_status_const(stream); 5109 if (stream_status == NULL || stream_status->plane_count != surface_count) 5110 return true; 5111 } 5112 if (dc->idle_optimizations_allowed) 5113 return true; 5114 5115 if (dc_can_clear_cursor_limit(dc)) 5116 return true; 5117 5118 return false; 5119 } 5120 5121 static bool full_update_required( 5122 const struct dc *dc, 5123 const struct dc_surface_update *srf_updates, 5124 int surface_count, 5125 const struct dc_stream_update *stream_update, 5126 const struct dc_stream_state *stream) 5127 { 5128 if (full_update_required_weak(dc, srf_updates, surface_count, stream_update, stream)) 5129 return true; 5130 5131 for (int i = 0; i < surface_count; i++) { 5132 if (srf_updates && 5133 (srf_updates[i].plane_info || 5134 srf_updates[i].scaling_info || 5135 (srf_updates[i].hdr_mult.value && 5136 srf_updates[i].hdr_mult.value != srf_updates->surface->hdr_mult.value) || 5137 (srf_updates[i].sdr_white_level_nits && 5138 srf_updates[i].sdr_white_level_nits != srf_updates->surface->sdr_white_level_nits) || 5139 srf_updates[i].in_transfer_func || 5140 srf_updates[i].func_shaper || 5141 srf_updates[i].lut3d_func || 5142 srf_updates[i].surface->force_full_update || 5143 (srf_updates[i].flip_addr && 5144 srf_updates[i].flip_addr->address.tmz_surface != srf_updates[i].surface->address.tmz_surface) || 5145 (srf_updates[i].cm2_params && 5146 (srf_updates[i].cm2_params->component_settings.shaper_3dlut_setting != srf_updates[i].surface->mcm_shaper_3dlut_setting || 5147 srf_updates[i].cm2_params->component_settings.lut1d_enable != srf_updates[i].surface->mcm_lut1d_enable)))) 5148 return true; 5149 } 5150 5151 if (stream_update && 5152 (((stream_update->src.height != 0 && stream_update->src.width != 0) || 5153 (stream_update->dst.height != 0 && stream_update->dst.width != 0) || 5154 stream_update->integer_scaling_update) || 5155 stream_update->hdr_static_metadata || 5156 stream_update->abm_level || 5157 stream_update->periodic_interrupt || 5158 stream_update->vrr_infopacket || 5159 stream_update->vsc_infopacket || 5160 stream_update->vsp_infopacket || 5161 stream_update->hfvsif_infopacket || 5162 stream_update->vtem_infopacket || 5163 stream_update->adaptive_sync_infopacket || 5164 stream_update->avi_infopacket || 5165 stream_update->dpms_off || 5166 stream_update->allow_freesync || 5167 stream_update->vrr_active_variable || 5168 stream_update->vrr_active_fixed || 5169 stream_update->gamut_remap || 5170 stream_update->output_color_space || 5171 stream_update->dither_option || 5172 stream_update->wb_update || 5173 stream_update->dsc_config || 5174 stream_update->mst_bw_update || 5175 stream_update->func_shaper || 5176 stream_update->lut3d_func || 5177 stream_update->pending_test_pattern || 5178 stream_update->crtc_timing_adjust || 5179 stream_update->scaler_sharpener_update || 5180 stream_update->hw_cursor_req)) 5181 return true; 5182 5183 return false; 5184 } 5185 5186 static bool fast_update_only( 5187 const struct dc *dc, 5188 const struct dc_fast_update *fast_update, 5189 const struct dc_surface_update *srf_updates, 5190 int surface_count, 5191 const struct dc_stream_update *stream_update, 5192 const struct dc_stream_state *stream) 5193 { 5194 return fast_updates_exist(fast_update, surface_count) 5195 && !full_update_required(dc, srf_updates, surface_count, stream_update, stream); 5196 } 5197 5198 static bool update_planes_and_stream_v2(struct dc *dc, 5199 struct dc_surface_update *srf_updates, int surface_count, 5200 struct dc_stream_state *stream, 5201 struct dc_stream_update *stream_update) 5202 { 5203 struct dc_state *context; 5204 enum surface_update_type update_type; 5205 struct dc_fast_update fast_update[MAX_SURFACES] = {0}; 5206 5207 /* In cases where MPO and split or ODM are used transitions can 5208 * cause underflow. Apply stream configuration with minimal pipe 5209 * split first to avoid unsupported transitions for active pipes. 5210 */ 5211 bool force_minimal_pipe_splitting = 0; 5212 bool is_plane_addition = 0; 5213 bool is_fast_update_only; 5214 5215 populate_fast_updates(fast_update, srf_updates, surface_count, stream_update); 5216 is_fast_update_only = fast_update_only(dc, fast_update, srf_updates, 5217 surface_count, stream_update, stream); 5218 force_minimal_pipe_splitting = could_mpcc_tree_change_for_active_pipes( 5219 dc, 5220 stream, 5221 srf_updates, 5222 surface_count, 5223 &is_plane_addition); 5224 5225 /* on plane addition, minimal state is the current one */ 5226 if (force_minimal_pipe_splitting && is_plane_addition && 5227 !commit_minimal_transition_state(dc, dc->current_state)) 5228 return false; 5229 5230 if (!update_planes_and_stream_state( 5231 dc, 5232 srf_updates, 5233 surface_count, 5234 stream, 5235 stream_update, 5236 &update_type, 5237 &context)) 5238 return false; 5239 5240 /* on plane removal, minimal state is the new one */ 5241 if (force_minimal_pipe_splitting && !is_plane_addition) { 5242 if (!commit_minimal_transition_state(dc, context)) { 5243 dc_state_release(context); 5244 return false; 5245 } 5246 update_type = UPDATE_TYPE_FULL; 5247 } 5248 5249 if (dc->hwss.is_pipe_topology_transition_seamless && 5250 !dc->hwss.is_pipe_topology_transition_seamless( 5251 dc, dc->current_state, context)) 5252 commit_minimal_transition_state_in_dc_update(dc, context, stream, 5253 srf_updates, surface_count); 5254 5255 if (is_fast_update_only && !dc->check_config.enable_legacy_fast_update) { 5256 commit_planes_for_stream_fast(dc, 5257 srf_updates, 5258 surface_count, 5259 stream, 5260 stream_update, 5261 update_type, 5262 context); 5263 } else { 5264 if (!stream_update && 5265 dc->hwss.is_pipe_topology_transition_seamless && 5266 !dc->hwss.is_pipe_topology_transition_seamless( 5267 dc, dc->current_state, context)) { 5268 DC_LOG_ERROR("performing non-seamless pipe topology transition with surface only update!\n"); 5269 BREAK_TO_DEBUGGER(); 5270 } 5271 commit_planes_for_stream( 5272 dc, 5273 srf_updates, 5274 surface_count, 5275 stream, 5276 stream_update, 5277 update_type, 5278 context); 5279 } 5280 if (dc->current_state != context) 5281 swap_and_release_current_context(dc, context, stream); 5282 return true; 5283 } 5284 5285 static void commit_planes_and_stream_update_on_current_context(struct dc *dc, 5286 struct dc_surface_update *srf_updates, int surface_count, 5287 struct dc_stream_state *stream, 5288 struct dc_stream_update *stream_update, 5289 enum surface_update_type update_type) 5290 { 5291 struct dc_fast_update fast_update[MAX_SURFACES] = {0}; 5292 5293 ASSERT(update_type < UPDATE_TYPE_FULL); 5294 populate_fast_updates(fast_update, srf_updates, surface_count, 5295 stream_update); 5296 if (fast_update_only(dc, fast_update, srf_updates, surface_count, 5297 stream_update, stream) && 5298 !dc->check_config.enable_legacy_fast_update) 5299 commit_planes_for_stream_fast(dc, 5300 srf_updates, 5301 surface_count, 5302 stream, 5303 stream_update, 5304 update_type, 5305 dc->current_state); 5306 else 5307 commit_planes_for_stream( 5308 dc, 5309 srf_updates, 5310 surface_count, 5311 stream, 5312 stream_update, 5313 update_type, 5314 dc->current_state); 5315 } 5316 5317 static void commit_planes_and_stream_update_with_new_context(struct dc *dc, 5318 struct dc_surface_update *srf_updates, int surface_count, 5319 struct dc_stream_state *stream, 5320 struct dc_stream_update *stream_update, 5321 enum surface_update_type update_type, 5322 struct dc_state *new_context) 5323 { 5324 bool skip_new_context = false; 5325 ASSERT(update_type >= UPDATE_TYPE_FULL); 5326 /* 5327 * It is required by the feature design that all pipe topologies 5328 * using extra free pipes for power saving purposes such as 5329 * dynamic ODM or SubVp shall only be enabled when it can be 5330 * transitioned seamlessly to AND from its minimal transition 5331 * state. A minimal transition state is defined as the same dc 5332 * state but with all power saving features disabled. So it uses 5333 * the minimum pipe topology. When we can't seamlessly 5334 * transition from state A to state B, we will insert the 5335 * minimal transition state A' or B' in between so seamless 5336 * transition between A and B can be made possible. 5337 * 5338 * To optimize for the time it takes to execute flips, 5339 * the transition from the minimal state to the final state is 5340 * deferred until a steady state (no more transitions) is reached. 5341 */ 5342 if (!dc->hwss.is_pipe_topology_transition_seamless(dc, dc->current_state, new_context)) { 5343 if (!dc->debug.disable_deferred_minimal_transitions) { 5344 dc->check_config.deferred_transition_state = true; 5345 dc->check_config.transition_countdown_to_steady_state = 5346 dc->debug.num_fast_flips_to_steady_state_override ? 5347 dc->debug.num_fast_flips_to_steady_state_override : 5348 NUM_FAST_FLIPS_TO_STEADY_STATE; 5349 5350 if (commit_minimal_transition_based_on_new_context(dc, new_context, stream, stream_update, 5351 srf_updates, surface_count)) { 5352 skip_new_context = true; 5353 dc_state_release(new_context); 5354 new_context = dc->current_state; 5355 } else { 5356 /* 5357 * In this case a new mpo plane is being enabled on pipes that were 5358 * previously in use, and the surface update to the existing plane 5359 * includes an alpha box where the new plane will be, so the update 5360 * from minimal to final cannot be deferred as the alpha box would 5361 * be visible to the user 5362 */ 5363 commit_minimal_transition_based_on_current_context(dc, new_context, stream); 5364 } 5365 } else { 5366 commit_minimal_transition_state_in_dc_update(dc, new_context, stream, 5367 srf_updates, surface_count); 5368 } 5369 } else if (dc->check_config.deferred_transition_state) { 5370 /* reset countdown as steady state not reached */ 5371 dc->check_config.transition_countdown_to_steady_state = 5372 dc->debug.num_fast_flips_to_steady_state_override ? 5373 dc->debug.num_fast_flips_to_steady_state_override : 5374 NUM_FAST_FLIPS_TO_STEADY_STATE; 5375 } 5376 5377 if (!skip_new_context) { 5378 commit_planes_for_stream(dc, srf_updates, surface_count, stream, stream_update, update_type, new_context); 5379 swap_and_release_current_context(dc, new_context, stream); 5380 } 5381 } 5382 5383 static bool update_planes_and_stream_v3(struct dc *dc, 5384 struct dc_surface_update *srf_updates, int surface_count, 5385 struct dc_stream_state *stream, 5386 struct dc_stream_update *stream_update) 5387 { 5388 struct dc_state *new_context; 5389 enum surface_update_type update_type; 5390 5391 /* 5392 * When this function returns true and new_context is not equal to 5393 * current state, the function allocates and validates a new dc state 5394 * and assigns it to new_context. The function expects that the caller 5395 * is responsible to free this memory when new_context is no longer 5396 * used. We swap current with new context and free current instead. So 5397 * new_context's memory will live until the next full update after it is 5398 * replaced by a newer context. Refer to the use of 5399 * swap_and_free_current_context below. 5400 */ 5401 if (!update_planes_and_stream_state(dc, srf_updates, surface_count, 5402 stream, stream_update, &update_type, 5403 &new_context)) 5404 return false; 5405 5406 if (new_context == dc->current_state) { 5407 commit_planes_and_stream_update_on_current_context(dc, 5408 srf_updates, surface_count, stream, 5409 stream_update, update_type); 5410 5411 if (dc->check_config.transition_countdown_to_steady_state) 5412 dc->check_config.transition_countdown_to_steady_state--; 5413 } else { 5414 commit_planes_and_stream_update_with_new_context(dc, 5415 srf_updates, surface_count, stream, 5416 stream_update, update_type, new_context); 5417 } 5418 5419 return true; 5420 } 5421 5422 static void clear_update_flags(struct dc_surface_update *srf_updates, 5423 int surface_count, struct dc_stream_state *stream) 5424 { 5425 int i; 5426 5427 if (stream) 5428 stream->update_flags.raw = 0; 5429 5430 for (i = 0; i < surface_count; i++) 5431 if (srf_updates[i].surface) 5432 srf_updates[i].surface->update_flags.raw = 0; 5433 } 5434 5435 bool dc_update_planes_and_stream(struct dc *dc, 5436 struct dc_surface_update *srf_updates, int surface_count, 5437 struct dc_stream_state *stream, 5438 struct dc_stream_update *stream_update) 5439 { 5440 struct dc_update_scratch_space *scratch = dc_update_planes_and_stream_init( 5441 dc, 5442 srf_updates, 5443 surface_count, 5444 stream, 5445 stream_update 5446 ); 5447 bool more = true; 5448 5449 while (more) { 5450 if (!dc_update_planes_and_stream_prepare(scratch)) 5451 return false; 5452 5453 dc_update_planes_and_stream_execute(scratch); 5454 more = dc_update_planes_and_stream_cleanup(scratch); 5455 } 5456 return true; 5457 } 5458 5459 void dc_commit_updates_for_stream(struct dc *dc, 5460 struct dc_surface_update *srf_updates, 5461 int surface_count, 5462 struct dc_stream_state *stream, 5463 struct dc_stream_update *stream_update, 5464 struct dc_state *state) 5465 { 5466 bool ret = false; 5467 5468 dc_exit_ips_for_hw_access(dc); 5469 /* TODO: Since change commit sequence can have a huge impact, 5470 * we decided to only enable it for DCN3x. However, as soon as 5471 * we get more confident about this change we'll need to enable 5472 * the new sequence for all ASICs. 5473 */ 5474 if (dc->ctx->dce_version >= DCN_VERSION_4_01) { 5475 ret = update_planes_and_stream_v3(dc, srf_updates, surface_count, 5476 stream, stream_update); 5477 } else { 5478 ret = update_planes_and_stream_v2(dc, srf_updates, surface_count, 5479 stream, stream_update); 5480 } 5481 5482 if (ret && dc->ctx->dce_version >= DCN_VERSION_3_2) 5483 clear_update_flags(srf_updates, surface_count, stream); 5484 } 5485 5486 uint8_t dc_get_current_stream_count(struct dc *dc) 5487 { 5488 return dc->current_state->stream_count; 5489 } 5490 5491 struct dc_stream_state *dc_get_stream_at_index(struct dc *dc, uint8_t i) 5492 { 5493 if (i < dc->current_state->stream_count) 5494 return dc->current_state->streams[i]; 5495 return NULL; 5496 } 5497 5498 enum dc_irq_source dc_interrupt_to_irq_source( 5499 struct dc *dc, 5500 uint32_t src_id, 5501 uint32_t ext_id) 5502 { 5503 return dal_irq_service_to_irq_source(dc->res_pool->irqs, src_id, ext_id); 5504 } 5505 5506 /* 5507 * dc_interrupt_set() - Enable/disable an AMD hw interrupt source 5508 */ 5509 bool dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable) 5510 { 5511 5512 if (dc == NULL) 5513 return false; 5514 5515 return dal_irq_service_set(dc->res_pool->irqs, src, enable); 5516 } 5517 5518 void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src) 5519 { 5520 dal_irq_service_ack(dc->res_pool->irqs, src); 5521 } 5522 5523 void dc_power_down_on_boot(struct dc *dc) 5524 { 5525 if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW && 5526 dc->hwss.power_down_on_boot) { 5527 if (dc->caps.ips_support) 5528 dc_exit_ips_for_hw_access(dc); 5529 dc->hwss.power_down_on_boot(dc); 5530 } 5531 } 5532 5533 void dc_set_power_state(struct dc *dc, enum dc_acpi_cm_power_state power_state) 5534 { 5535 if (!dc->current_state) 5536 return; 5537 5538 switch (power_state) { 5539 case DC_ACPI_CM_POWER_STATE_D0: 5540 dc_state_construct(dc, dc->current_state); 5541 5542 dc_exit_ips_for_hw_access(dc); 5543 5544 dc_z10_restore(dc); 5545 5546 dc_dmub_srv_notify_fw_dc_power_state(dc->ctx->dmub_srv, power_state); 5547 5548 dc->hwss.init_hw(dc); 5549 5550 if (dc->hwss.init_sys_ctx != NULL && 5551 dc->vm_pa_config.valid) { 5552 dc->hwss.init_sys_ctx(dc->hwseq, dc, &dc->vm_pa_config); 5553 } 5554 break; 5555 case DC_ACPI_CM_POWER_STATE_D3: 5556 if (dc->caps.ips_support) 5557 dc_dmub_srv_notify_fw_dc_power_state(dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D3); 5558 5559 if (dc->caps.ips_v2_support) { 5560 if (dc->clk_mgr->funcs->set_low_power_state) 5561 dc->clk_mgr->funcs->set_low_power_state(dc->clk_mgr); 5562 } 5563 break; 5564 default: 5565 ASSERT(dc->current_state->stream_count == 0); 5566 dc_dmub_srv_notify_fw_dc_power_state(dc->ctx->dmub_srv, power_state); 5567 5568 dc_state_destruct(dc->current_state); 5569 5570 break; 5571 } 5572 } 5573 5574 void dc_resume(struct dc *dc) 5575 { 5576 uint32_t i; 5577 5578 for (i = 0; i < dc->link_count; i++) 5579 dc->link_srv->resume(dc->links[i]); 5580 } 5581 5582 bool dc_is_dmcu_initialized(struct dc *dc) 5583 { 5584 struct dmcu *dmcu = dc->res_pool->dmcu; 5585 5586 if (dmcu) 5587 return dmcu->funcs->is_dmcu_initialized(dmcu); 5588 return false; 5589 } 5590 5591 enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, uint32_t stepping) 5592 { 5593 if (dc->hwss.set_clock) 5594 return dc->hwss.set_clock(dc, clock_type, clk_khz, stepping); 5595 return DC_ERROR_UNEXPECTED; 5596 } 5597 void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg) 5598 { 5599 if (dc->hwss.get_clock) 5600 dc->hwss.get_clock(dc, clock_type, clock_cfg); 5601 } 5602 5603 /* enable/disable eDP PSR without specify stream for eDP */ 5604 bool dc_set_psr_allow_active(struct dc *dc, bool enable) 5605 { 5606 int i; 5607 bool allow_active; 5608 5609 for (i = 0; i < dc->current_state->stream_count ; i++) { 5610 struct dc_link *link; 5611 struct dc_stream_state *stream = dc->current_state->streams[i]; 5612 5613 link = stream->link; 5614 if (!link) 5615 continue; 5616 5617 if (link->psr_settings.psr_feature_enabled) { 5618 if (enable && !link->psr_settings.psr_allow_active) { 5619 allow_active = true; 5620 if (!dc_link_set_psr_allow_active(link, &allow_active, false, false, NULL)) 5621 return false; 5622 } else if (!enable && link->psr_settings.psr_allow_active) { 5623 allow_active = false; 5624 if (!dc_link_set_psr_allow_active(link, &allow_active, true, false, NULL)) 5625 return false; 5626 } 5627 } 5628 } 5629 5630 return true; 5631 } 5632 5633 /* enable/disable eDP Replay without specify stream for eDP */ 5634 bool dc_set_replay_allow_active(struct dc *dc, bool active) 5635 { 5636 int i; 5637 bool allow_active; 5638 5639 for (i = 0; i < dc->current_state->stream_count; i++) { 5640 struct dc_link *link; 5641 struct dc_stream_state *stream = dc->current_state->streams[i]; 5642 5643 link = stream->link; 5644 if (!link) 5645 continue; 5646 5647 if (link->replay_settings.replay_feature_enabled) { 5648 if (active && !link->replay_settings.replay_allow_active) { 5649 allow_active = true; 5650 if (!dc_link_set_replay_allow_active(link, &allow_active, 5651 false, false, NULL)) 5652 return false; 5653 } else if (!active && link->replay_settings.replay_allow_active) { 5654 allow_active = false; 5655 if (!dc_link_set_replay_allow_active(link, &allow_active, 5656 true, false, NULL)) 5657 return false; 5658 } 5659 } 5660 } 5661 5662 return true; 5663 } 5664 5665 /* set IPS disable state */ 5666 bool dc_set_ips_disable(struct dc *dc, unsigned int disable_ips) 5667 { 5668 dc_exit_ips_for_hw_access(dc); 5669 5670 dc->config.disable_ips = disable_ips; 5671 5672 return true; 5673 } 5674 5675 void dc_allow_idle_optimizations_internal(struct dc *dc, bool allow, char const *caller_name) 5676 { 5677 int idle_fclk_khz = 0, idle_dramclk_khz = 0, i = 0; 5678 enum mall_stream_type subvp_pipe_type[MAX_PIPES] = {0}; 5679 struct pipe_ctx *pipe = NULL; 5680 struct dc_state *context = dc->current_state; 5681 5682 if (dc->debug.disable_idle_power_optimizations) { 5683 DC_LOG_DEBUG("%s: disabled\n", __func__); 5684 return; 5685 } 5686 5687 if (allow != dc->idle_optimizations_allowed) 5688 DC_LOG_IPS("%s: allow_idle old=%d new=%d (caller=%s)\n", __func__, 5689 dc->idle_optimizations_allowed, allow, caller_name); 5690 5691 if (dc->caps.ips_support && (dc->config.disable_ips == DMUB_IPS_DISABLE_ALL)) 5692 return; 5693 5694 if (dc->clk_mgr != NULL && dc->clk_mgr->funcs->is_smu_present) 5695 if (!dc->clk_mgr->funcs->is_smu_present(dc->clk_mgr)) 5696 return; 5697 5698 if (allow == dc->idle_optimizations_allowed) 5699 return; 5700 5701 if (dc->hwss.apply_idle_power_optimizations && dc->clk_mgr != NULL && 5702 dc->hwss.apply_idle_power_optimizations(dc, allow)) { 5703 dc->idle_optimizations_allowed = allow; 5704 DC_LOG_DEBUG("%s: %s\n", __func__, allow ? "enabled" : "disabled"); 5705 } 5706 5707 // log idle clocks and sub vp pipe types at idle optimization time 5708 if (dc->clk_mgr != NULL && dc->clk_mgr->funcs->get_hard_min_fclk) 5709 idle_fclk_khz = dc->clk_mgr->funcs->get_hard_min_fclk(dc->clk_mgr); 5710 5711 if (dc->clk_mgr != NULL && dc->clk_mgr->funcs->get_hard_min_memclk) 5712 idle_dramclk_khz = dc->clk_mgr->funcs->get_hard_min_memclk(dc->clk_mgr); 5713 5714 if (dc->res_pool && context) { 5715 for (i = 0; i < dc->res_pool->pipe_count; i++) { 5716 pipe = &context->res_ctx.pipe_ctx[i]; 5717 subvp_pipe_type[i] = dc_state_get_pipe_subvp_type(context, pipe); 5718 } 5719 } 5720 if (!dc->caps.is_apu) 5721 DC_LOG_DC("%s: allow_idle=%d\n HardMinUClk_Khz=%d HardMinDramclk_Khz=%d\n Pipe_0=%d Pipe_1=%d Pipe_2=%d Pipe_3=%d Pipe_4=%d Pipe_5=%d (caller=%s)\n", 5722 __func__, allow, idle_fclk_khz, idle_dramclk_khz, subvp_pipe_type[0], subvp_pipe_type[1], subvp_pipe_type[2], 5723 subvp_pipe_type[3], subvp_pipe_type[4], subvp_pipe_type[5], caller_name); 5724 5725 } 5726 5727 void dc_exit_ips_for_hw_access_internal(struct dc *dc, const char *caller_name) 5728 { 5729 if (dc->caps.ips_support) 5730 dc_allow_idle_optimizations_internal(dc, false, caller_name); 5731 } 5732 5733 bool dc_dmub_is_ips_idle_state(struct dc *dc) 5734 { 5735 if (dc->debug.disable_idle_power_optimizations) 5736 return false; 5737 5738 if (!dc->caps.ips_support || (dc->config.disable_ips == DMUB_IPS_DISABLE_ALL)) 5739 return false; 5740 5741 if (!dc->ctx->dmub_srv) 5742 return false; 5743 5744 return dc->ctx->dmub_srv->idle_allowed; 5745 } 5746 5747 /* set min and max memory clock to lowest and highest DPM level, respectively */ 5748 void dc_unlock_memory_clock_frequency(struct dc *dc) 5749 { 5750 if (dc->clk_mgr->funcs->set_hard_min_memclk) 5751 dc->clk_mgr->funcs->set_hard_min_memclk(dc->clk_mgr, false); 5752 5753 if (dc->clk_mgr->funcs->set_hard_max_memclk) 5754 dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr); 5755 } 5756 5757 /* set min memory clock to the min required for current mode, max to maxDPM */ 5758 void dc_lock_memory_clock_frequency(struct dc *dc) 5759 { 5760 if (dc->clk_mgr->funcs->get_memclk_states_from_smu) 5761 dc->clk_mgr->funcs->get_memclk_states_from_smu(dc->clk_mgr); 5762 5763 if (dc->clk_mgr->funcs->set_hard_min_memclk) 5764 dc->clk_mgr->funcs->set_hard_min_memclk(dc->clk_mgr, true); 5765 5766 if (dc->clk_mgr->funcs->set_hard_max_memclk) 5767 dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr); 5768 } 5769 5770 static void blank_and_force_memclk(struct dc *dc, bool apply, unsigned int memclk_mhz) 5771 { 5772 struct dc_state *context = dc->current_state; 5773 struct hubp *hubp; 5774 struct pipe_ctx *pipe; 5775 int i; 5776 5777 for (i = 0; i < dc->res_pool->pipe_count; i++) { 5778 pipe = &context->res_ctx.pipe_ctx[i]; 5779 5780 if (pipe->stream != NULL) { 5781 dc->hwss.disable_pixel_data(dc, pipe, true); 5782 5783 // wait for double buffer 5784 pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VACTIVE); 5785 pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VBLANK); 5786 pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VACTIVE); 5787 5788 hubp = pipe->plane_res.hubp; 5789 hubp->funcs->set_blank_regs(hubp, true); 5790 } 5791 } 5792 if (dc->clk_mgr->funcs->set_max_memclk) 5793 dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, memclk_mhz); 5794 if (dc->clk_mgr->funcs->set_min_memclk) 5795 dc->clk_mgr->funcs->set_min_memclk(dc->clk_mgr, memclk_mhz); 5796 5797 for (i = 0; i < dc->res_pool->pipe_count; i++) { 5798 pipe = &context->res_ctx.pipe_ctx[i]; 5799 5800 if (pipe->stream != NULL) { 5801 dc->hwss.disable_pixel_data(dc, pipe, false); 5802 5803 hubp = pipe->plane_res.hubp; 5804 hubp->funcs->set_blank_regs(hubp, false); 5805 } 5806 } 5807 } 5808 5809 5810 /** 5811 * dc_enable_dcmode_clk_limit() - lower clocks in dc (battery) mode 5812 * @dc: pointer to dc of the dm calling this 5813 * @enable: True = transition to DC mode, false = transition back to AC mode 5814 * 5815 * Some SoCs define additional clock limits when in DC mode, DM should 5816 * invoke this function when the platform undergoes a power source transition 5817 * so DC can apply/unapply the limit. This interface may be disruptive to 5818 * the onscreen content. 5819 * 5820 * Context: Triggered by OS through DM interface, or manually by escape calls. 5821 * Need to hold a dclock when doing so. 5822 * 5823 * Return: none (void function) 5824 * 5825 */ 5826 void dc_enable_dcmode_clk_limit(struct dc *dc, bool enable) 5827 { 5828 unsigned int softMax = 0, maxDPM = 0, funcMin = 0, i; 5829 bool p_state_change_support; 5830 5831 if (!dc->config.dc_mode_clk_limit_support) 5832 return; 5833 5834 softMax = dc->clk_mgr->bw_params->dc_mode_softmax_memclk; 5835 for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries; i++) { 5836 if (dc->clk_mgr->bw_params->clk_table.entries[i].memclk_mhz > maxDPM) 5837 maxDPM = dc->clk_mgr->bw_params->clk_table.entries[i].memclk_mhz; 5838 } 5839 funcMin = (dc->clk_mgr->clks.dramclk_khz + 999) / 1000; 5840 p_state_change_support = dc->clk_mgr->clks.p_state_change_support; 5841 5842 if (enable && !dc->clk_mgr->dc_mode_softmax_enabled) { 5843 if (p_state_change_support) { 5844 if (funcMin <= softMax && dc->clk_mgr->funcs->set_max_memclk) 5845 dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, softMax); 5846 // else: No-Op 5847 } else { 5848 if (funcMin <= softMax) 5849 blank_and_force_memclk(dc, true, softMax); 5850 // else: No-Op 5851 } 5852 } else if (!enable && dc->clk_mgr->dc_mode_softmax_enabled) { 5853 if (p_state_change_support) { 5854 if (funcMin <= softMax && dc->clk_mgr->funcs->set_max_memclk) 5855 dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, maxDPM); 5856 // else: No-Op 5857 } else { 5858 if (funcMin <= softMax) 5859 blank_and_force_memclk(dc, true, maxDPM); 5860 // else: No-Op 5861 } 5862 } 5863 dc->clk_mgr->dc_mode_softmax_enabled = enable; 5864 } 5865 bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc, 5866 unsigned int pitch, 5867 unsigned int height, 5868 enum surface_pixel_format format, 5869 struct dc_cursor_attributes *cursor_attr) 5870 { 5871 if (dc->hwss.does_plane_fit_in_mall && dc->hwss.does_plane_fit_in_mall(dc, pitch, height, format, cursor_attr)) 5872 return true; 5873 return false; 5874 } 5875 5876 /* cleanup on driver unload */ 5877 void dc_hardware_release(struct dc *dc) 5878 { 5879 dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(dc); 5880 5881 if (dc->hwss.hardware_release) 5882 dc->hwss.hardware_release(dc); 5883 } 5884 5885 void dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(struct dc *dc) 5886 { 5887 if (dc->current_state) 5888 dc->current_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching_shut_down = true; 5889 } 5890 5891 /** 5892 * dc_is_dmub_outbox_supported - Check if DMUB firmware support outbox notification 5893 * 5894 * @dc: [in] dc structure 5895 * 5896 * Checks whether DMUB FW supports outbox notifications, if supported DM 5897 * should register outbox interrupt prior to actually enabling interrupts 5898 * via dc_enable_dmub_outbox 5899 * 5900 * Return: 5901 * True if DMUB FW supports outbox notifications, False otherwise 5902 */ 5903 bool dc_is_dmub_outbox_supported(struct dc *dc) 5904 { 5905 if (!dc->caps.dmcub_support) 5906 return false; 5907 5908 switch (dc->ctx->asic_id.chip_family) { 5909 5910 case FAMILY_YELLOW_CARP: 5911 /* DCN31 B0 USB4 DPIA needs dmub notifications for interrupts */ 5912 if (dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0 && 5913 !dc->debug.dpia_debug.bits.disable_dpia) 5914 return true; 5915 break; 5916 5917 case AMDGPU_FAMILY_GC_11_0_1: 5918 case AMDGPU_FAMILY_GC_11_5_0: 5919 if (!dc->debug.dpia_debug.bits.disable_dpia) 5920 return true; 5921 break; 5922 5923 default: 5924 break; 5925 } 5926 5927 /* dmub aux needs dmub notifications to be enabled */ 5928 return dc->debug.enable_dmub_aux_for_legacy_ddc; 5929 5930 } 5931 5932 /** 5933 * dc_enable_dmub_notifications - Check if dmub fw supports outbox 5934 * 5935 * @dc: [in] dc structure 5936 * 5937 * Calls dc_is_dmub_outbox_supported to check if dmub fw supports outbox 5938 * notifications. All DMs shall switch to dc_is_dmub_outbox_supported. This 5939 * API shall be removed after switching. 5940 * 5941 * Return: 5942 * True if DMUB FW supports outbox notifications, False otherwise 5943 */ 5944 bool dc_enable_dmub_notifications(struct dc *dc) 5945 { 5946 return dc_is_dmub_outbox_supported(dc); 5947 } 5948 5949 /** 5950 * dc_enable_dmub_outbox - Enables DMUB unsolicited notification 5951 * 5952 * @dc: [in] dc structure 5953 * 5954 * Enables DMUB unsolicited notifications to x86 via outbox. 5955 */ 5956 void dc_enable_dmub_outbox(struct dc *dc) 5957 { 5958 struct dc_context *dc_ctx = dc->ctx; 5959 5960 dmub_enable_outbox_notification(dc_ctx->dmub_srv); 5961 DC_LOG_DC("%s: dmub outbox notifications enabled\n", __func__); 5962 } 5963 5964 /** 5965 * dc_process_dmub_aux_transfer_async - Submits aux command to dmub via inbox message 5966 * Sets port index appropriately for legacy DDC 5967 * @dc: dc structure 5968 * @link_index: link index 5969 * @payload: aux payload 5970 * 5971 * Returns: True if successful, False if failure 5972 */ 5973 bool dc_process_dmub_aux_transfer_async(struct dc *dc, 5974 uint32_t link_index, 5975 struct aux_payload *payload) 5976 { 5977 uint8_t action; 5978 union dmub_rb_cmd cmd = {0}; 5979 5980 ASSERT(payload->length <= 16); 5981 5982 cmd.dp_aux_access.header.type = DMUB_CMD__DP_AUX_ACCESS; 5983 cmd.dp_aux_access.header.payload_bytes = 0; 5984 /* For dpia, ddc_pin is set to NULL */ 5985 if (!dc->links[link_index]->ddc->ddc_pin) 5986 cmd.dp_aux_access.aux_control.type = AUX_CHANNEL_DPIA; 5987 else 5988 cmd.dp_aux_access.aux_control.type = AUX_CHANNEL_LEGACY_DDC; 5989 5990 cmd.dp_aux_access.aux_control.instance = dc->links[link_index]->ddc_hw_inst; 5991 cmd.dp_aux_access.aux_control.sw_crc_enabled = 0; 5992 cmd.dp_aux_access.aux_control.timeout = 0; 5993 cmd.dp_aux_access.aux_control.dpaux.address = payload->address; 5994 cmd.dp_aux_access.aux_control.dpaux.is_i2c_over_aux = payload->i2c_over_aux; 5995 cmd.dp_aux_access.aux_control.dpaux.length = payload->length; 5996 5997 /* set aux action */ 5998 if (payload->i2c_over_aux) { 5999 if (payload->write) { 6000 if (payload->mot) 6001 action = DP_AUX_REQ_ACTION_I2C_WRITE_MOT; 6002 else 6003 action = DP_AUX_REQ_ACTION_I2C_WRITE; 6004 } else { 6005 if (payload->mot) 6006 action = DP_AUX_REQ_ACTION_I2C_READ_MOT; 6007 else 6008 action = DP_AUX_REQ_ACTION_I2C_READ; 6009 } 6010 } else { 6011 if (payload->write) 6012 action = DP_AUX_REQ_ACTION_DPCD_WRITE; 6013 else 6014 action = DP_AUX_REQ_ACTION_DPCD_READ; 6015 } 6016 6017 cmd.dp_aux_access.aux_control.dpaux.action = action; 6018 6019 if (payload->length && payload->write) { 6020 memcpy(cmd.dp_aux_access.aux_control.dpaux.data, 6021 payload->data, 6022 payload->length 6023 ); 6024 } 6025 6026 dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); 6027 6028 return true; 6029 } 6030 6031 bool dc_smart_power_oled_enable(const struct dc_link *link, bool enable, uint16_t peak_nits, 6032 uint8_t debug_control, uint16_t fixed_CLL, uint32_t triggerline) 6033 { 6034 bool status = false; 6035 struct dc *dc = link->ctx->dc; 6036 union dmub_rb_cmd cmd; 6037 uint8_t otg_inst = 0; 6038 unsigned int panel_inst = 0; 6039 struct pipe_ctx *pipe_ctx = NULL; 6040 struct resource_context *res_ctx = &link->ctx->dc->current_state->res_ctx; 6041 int i = 0; 6042 6043 // get panel_inst 6044 if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst)) 6045 return status; 6046 6047 // get otg_inst 6048 for (i = 0; i < MAX_PIPES; i++) { 6049 if (res_ctx && 6050 res_ctx->pipe_ctx[i].stream && 6051 res_ctx->pipe_ctx[i].stream->link && 6052 res_ctx->pipe_ctx[i].stream->link == link && 6053 res_ctx->pipe_ctx[i].stream->link->connector_signal == SIGNAL_TYPE_EDP) { 6054 pipe_ctx = &res_ctx->pipe_ctx[i]; 6055 //TODO: refactor for multi edp support 6056 break; 6057 } 6058 } 6059 6060 if (pipe_ctx) 6061 otg_inst = pipe_ctx->stream_res.tg->inst; 6062 6063 // before enable smart power OLED, we need to call set pipe for DMUB to set ABM config 6064 if (enable) { 6065 if (dc->hwss.set_pipe && pipe_ctx) 6066 dc->hwss.set_pipe(pipe_ctx); 6067 } 6068 6069 // fill in cmd 6070 memset(&cmd, 0, sizeof(cmd)); 6071 6072 cmd.smart_power_oled_enable.header.type = DMUB_CMD__SMART_POWER_OLED; 6073 cmd.smart_power_oled_enable.header.sub_type = DMUB_CMD__SMART_POWER_OLED_ENABLE; 6074 cmd.smart_power_oled_enable.header.payload_bytes = 6075 sizeof(struct dmub_rb_cmd_smart_power_oled_enable_data) - sizeof(struct dmub_cmd_header); 6076 cmd.smart_power_oled_enable.header.ret_status = 1; 6077 cmd.smart_power_oled_enable.data.enable = enable; 6078 cmd.smart_power_oled_enable.data.panel_inst = panel_inst; 6079 cmd.smart_power_oled_enable.data.peak_nits = peak_nits; 6080 cmd.smart_power_oled_enable.data.otg_inst = otg_inst; 6081 cmd.smart_power_oled_enable.data.digfe_inst = link->link_enc->preferred_engine; 6082 cmd.smart_power_oled_enable.data.digbe_inst = link->link_enc->transmitter; 6083 6084 cmd.smart_power_oled_enable.data.debugcontrol = debug_control; 6085 cmd.smart_power_oled_enable.data.triggerline = triggerline; 6086 cmd.smart_power_oled_enable.data.fixed_max_cll = fixed_CLL; 6087 6088 // send cmd 6089 status = dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); 6090 6091 return status; 6092 } 6093 6094 bool dc_smart_power_oled_get_max_cll(const struct dc_link *link, unsigned int *pCurrent_MaxCLL) 6095 { 6096 struct dc *dc = link->ctx->dc; 6097 union dmub_rb_cmd cmd; 6098 bool status = false; 6099 unsigned int panel_inst = 0; 6100 6101 // get panel_inst 6102 if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst)) 6103 return status; 6104 6105 // fill in cmd 6106 memset(&cmd, 0, sizeof(cmd)); 6107 6108 cmd.smart_power_oled_getmaxcll.header.type = DMUB_CMD__SMART_POWER_OLED; 6109 cmd.smart_power_oled_getmaxcll.header.sub_type = DMUB_CMD__SMART_POWER_OLED_GETMAXCLL; 6110 cmd.smart_power_oled_getmaxcll.header.payload_bytes = sizeof(cmd.smart_power_oled_getmaxcll.data); 6111 cmd.smart_power_oled_getmaxcll.header.ret_status = 1; 6112 6113 cmd.smart_power_oled_getmaxcll.data.input.panel_inst = panel_inst; 6114 6115 // send cmd and wait for reply 6116 status = dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY); 6117 6118 if (status) 6119 *pCurrent_MaxCLL = cmd.smart_power_oled_getmaxcll.data.output.current_max_cll; 6120 else 6121 *pCurrent_MaxCLL = 0; 6122 6123 return status; 6124 } 6125 6126 uint8_t get_link_index_from_dpia_port_index(const struct dc *dc, 6127 uint8_t dpia_port_index) 6128 { 6129 uint8_t index, link_index = 0xFF; 6130 6131 for (index = 0; index < dc->link_count; index++) { 6132 /* ddc_hw_inst has dpia port index for dpia links 6133 * and ddc instance for legacy links 6134 */ 6135 if (!dc->links[index]->ddc->ddc_pin) { 6136 if (dc->links[index]->ddc_hw_inst == dpia_port_index) { 6137 link_index = index; 6138 break; 6139 } 6140 } 6141 } 6142 ASSERT(link_index != 0xFF); 6143 return link_index; 6144 } 6145 6146 /** 6147 * dc_process_dmub_set_config_async - Submits set_config command 6148 * 6149 * @dc: [in] dc structure 6150 * @link_index: [in] link_index: link index 6151 * @payload: [in] aux payload 6152 * @notify: [out] set_config immediate reply 6153 * 6154 * Submits set_config command to dmub via inbox message. 6155 * 6156 * Return: 6157 * True if successful, False if failure 6158 */ 6159 bool dc_process_dmub_set_config_async(struct dc *dc, 6160 uint32_t link_index, 6161 struct set_config_cmd_payload *payload, 6162 struct dmub_notification *notify) 6163 { 6164 union dmub_rb_cmd cmd = {0}; 6165 bool is_cmd_complete = true; 6166 6167 /* prepare SET_CONFIG command */ 6168 cmd.set_config_access.header.type = DMUB_CMD__DPIA; 6169 cmd.set_config_access.header.sub_type = DMUB_CMD__DPIA_SET_CONFIG_ACCESS; 6170 6171 cmd.set_config_access.set_config_control.instance = dc->links[link_index]->ddc_hw_inst; 6172 cmd.set_config_access.set_config_control.cmd_pkt.msg_type = payload->msg_type; 6173 cmd.set_config_access.set_config_control.cmd_pkt.msg_data = payload->msg_data; 6174 6175 if (!dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) { 6176 /* command is not processed by dmub */ 6177 notify->sc_status = SET_CONFIG_UNKNOWN_ERROR; 6178 return is_cmd_complete; 6179 } 6180 6181 /* command processed by dmub, if ret_status is 1, it is completed instantly */ 6182 if (cmd.set_config_access.header.ret_status == 1) 6183 notify->sc_status = cmd.set_config_access.set_config_control.immed_status; 6184 else 6185 /* cmd pending, will receive notification via outbox */ 6186 is_cmd_complete = false; 6187 6188 return is_cmd_complete; 6189 } 6190 6191 /** 6192 * dc_process_dmub_set_mst_slots - Submits MST solt allocation 6193 * 6194 * @dc: [in] dc structure 6195 * @link_index: [in] link index 6196 * @mst_alloc_slots: [in] mst slots to be allotted 6197 * @mst_slots_in_use: [out] mst slots in use returned in failure case 6198 * 6199 * Submits mst slot allocation command to dmub via inbox message 6200 * 6201 * Return: 6202 * DC_OK if successful, DC_ERROR if failure 6203 */ 6204 enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc, 6205 uint32_t link_index, 6206 uint8_t mst_alloc_slots, 6207 uint8_t *mst_slots_in_use) 6208 { 6209 union dmub_rb_cmd cmd = {0}; 6210 6211 /* prepare MST_ALLOC_SLOTS command */ 6212 cmd.set_mst_alloc_slots.header.type = DMUB_CMD__DPIA; 6213 cmd.set_mst_alloc_slots.header.sub_type = DMUB_CMD__DPIA_MST_ALLOC_SLOTS; 6214 6215 cmd.set_mst_alloc_slots.mst_slots_control.instance = dc->links[link_index]->ddc_hw_inst; 6216 cmd.set_mst_alloc_slots.mst_slots_control.mst_alloc_slots = mst_alloc_slots; 6217 6218 if (!dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) 6219 /* command is not processed by dmub */ 6220 return DC_ERROR_UNEXPECTED; 6221 6222 /* command processed by dmub, if ret_status is 1 */ 6223 if (cmd.set_config_access.header.ret_status != 1) 6224 /* command processing error */ 6225 return DC_ERROR_UNEXPECTED; 6226 6227 /* command processed and we have a status of 2, mst not enabled in dpia */ 6228 if (cmd.set_mst_alloc_slots.mst_slots_control.immed_status == 2) 6229 return DC_FAIL_UNSUPPORTED_1; 6230 6231 /* previously configured mst alloc and used slots did not match */ 6232 if (cmd.set_mst_alloc_slots.mst_slots_control.immed_status == 3) { 6233 *mst_slots_in_use = cmd.set_mst_alloc_slots.mst_slots_control.mst_slots_in_use; 6234 return DC_NOT_SUPPORTED; 6235 } 6236 6237 return DC_OK; 6238 } 6239 6240 /** 6241 * dc_process_dmub_dpia_set_tps_notification - Submits tps notification 6242 * 6243 * @dc: [in] dc structure 6244 * @link_index: [in] link index 6245 * @tps: [in] request tps 6246 * 6247 * Submits set_tps_notification command to dmub via inbox message 6248 */ 6249 void dc_process_dmub_dpia_set_tps_notification(const struct dc *dc, uint32_t link_index, uint8_t tps) 6250 { 6251 union dmub_rb_cmd cmd = {0}; 6252 6253 cmd.set_tps_notification.header.type = DMUB_CMD__DPIA; 6254 cmd.set_tps_notification.header.sub_type = DMUB_CMD__DPIA_SET_TPS_NOTIFICATION; 6255 cmd.set_tps_notification.tps_notification.instance = dc->links[link_index]->ddc_hw_inst; 6256 cmd.set_tps_notification.tps_notification.tps = tps; 6257 6258 dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); 6259 } 6260 6261 /** 6262 * dc_process_dmub_dpia_hpd_int_enable - Submits DPIA DPD interruption 6263 * 6264 * @dc: [in] dc structure 6265 * @hpd_int_enable: [in] 1 for hpd int enable, 0 to disable 6266 * 6267 * Submits dpia hpd int enable command to dmub via inbox message 6268 */ 6269 void dc_process_dmub_dpia_hpd_int_enable(const struct dc *dc, 6270 uint32_t hpd_int_enable) 6271 { 6272 union dmub_rb_cmd cmd = {0}; 6273 6274 cmd.dpia_hpd_int_enable.header.type = DMUB_CMD__DPIA_HPD_INT_ENABLE; 6275 cmd.dpia_hpd_int_enable.enable = hpd_int_enable; 6276 6277 dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); 6278 6279 DC_LOG_DEBUG("%s: hpd_int_enable(%d)\n", __func__, hpd_int_enable); 6280 } 6281 6282 /** 6283 * dc_print_dmub_diagnostic_data - Print DMUB diagnostic data for debugging 6284 * 6285 * @dc: [in] dc structure 6286 * 6287 * 6288 */ 6289 void dc_print_dmub_diagnostic_data(const struct dc *dc) 6290 { 6291 dc_dmub_srv_log_diagnostic_data(dc->ctx->dmub_srv); 6292 } 6293 6294 /** 6295 * dc_disable_accelerated_mode - disable accelerated mode 6296 * @dc: dc structure 6297 */ 6298 void dc_disable_accelerated_mode(struct dc *dc) 6299 { 6300 bios_set_scratch_acc_mode_change(dc->ctx->dc_bios, 0); 6301 } 6302 6303 6304 /** 6305 * dc_notify_vsync_int_state - notifies vsync enable/disable state 6306 * @dc: dc structure 6307 * @stream: stream where vsync int state changed 6308 * @enable: whether vsync is enabled or disabled 6309 * 6310 * Called when vsync is enabled/disabled Will notify DMUB to start/stop ABM 6311 * interrupts after steady state is reached. 6312 */ 6313 void dc_notify_vsync_int_state(struct dc *dc, struct dc_stream_state *stream, bool enable) 6314 { 6315 int i; 6316 int edp_num; 6317 struct pipe_ctx *pipe = NULL; 6318 struct dc_link *link = stream->sink->link; 6319 struct dc_link *edp_links[MAX_NUM_EDP]; 6320 6321 6322 if (link->psr_settings.psr_feature_enabled) 6323 return; 6324 6325 if (link->replay_settings.replay_feature_enabled) 6326 return; 6327 6328 /*find primary pipe associated with stream*/ 6329 for (i = 0; i < MAX_PIPES; i++) { 6330 pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 6331 6332 if (pipe->stream == stream && pipe->stream_res.tg) 6333 break; 6334 } 6335 6336 if (i == MAX_PIPES) { 6337 ASSERT(0); 6338 return; 6339 } 6340 6341 dc_get_edp_links(dc, edp_links, &edp_num); 6342 6343 /* Determine panel inst */ 6344 for (i = 0; i < edp_num; i++) { 6345 if (edp_links[i] == link) 6346 break; 6347 } 6348 6349 if (i == edp_num) { 6350 return; 6351 } 6352 6353 if (pipe->stream_res.abm && pipe->stream_res.abm->funcs->set_abm_pause) 6354 pipe->stream_res.abm->funcs->set_abm_pause(pipe->stream_res.abm, !enable, i, pipe->stream_res.tg->inst); 6355 } 6356 6357 /***************************************************************************** 6358 * dc_abm_save_restore() - Interface to DC for save+pause and restore+un-pause 6359 * ABM 6360 * @dc: dc structure 6361 * @stream: stream where vsync int state changed 6362 * @pData: abm hw states 6363 * 6364 ****************************************************************************/ 6365 bool dc_abm_save_restore( 6366 struct dc *dc, 6367 struct dc_stream_state *stream, 6368 struct abm_save_restore *pData) 6369 { 6370 int i; 6371 int edp_num; 6372 struct pipe_ctx *pipe = NULL; 6373 struct dc_link *link = stream->sink->link; 6374 struct dc_link *edp_links[MAX_NUM_EDP]; 6375 6376 if (link->replay_settings.replay_feature_enabled) 6377 return false; 6378 6379 /*find primary pipe associated with stream*/ 6380 for (i = 0; i < MAX_PIPES; i++) { 6381 pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 6382 6383 if (pipe->stream == stream && pipe->stream_res.tg) 6384 break; 6385 } 6386 6387 if (i == MAX_PIPES) { 6388 ASSERT(0); 6389 return false; 6390 } 6391 6392 dc_get_edp_links(dc, edp_links, &edp_num); 6393 6394 /* Determine panel inst */ 6395 for (i = 0; i < edp_num; i++) 6396 if (edp_links[i] == link) 6397 break; 6398 6399 if (i == edp_num) 6400 return false; 6401 6402 if (pipe->stream_res.abm && 6403 pipe->stream_res.abm->funcs->save_restore) 6404 return pipe->stream_res.abm->funcs->save_restore( 6405 pipe->stream_res.abm, 6406 i, 6407 pData); 6408 return false; 6409 } 6410 6411 void dc_query_current_properties(struct dc *dc, struct dc_current_properties *properties) 6412 { 6413 unsigned int i; 6414 unsigned int max_cursor_size = dc->caps.max_cursor_size; 6415 unsigned int stream_cursor_size; 6416 6417 if (dc->debug.allow_sw_cursor_fallback && dc->res_pool->funcs->get_max_hw_cursor_size) { 6418 for (i = 0; i < dc->current_state->stream_count; i++) { 6419 stream_cursor_size = dc->res_pool->funcs->get_max_hw_cursor_size(dc, 6420 dc->current_state, 6421 dc->current_state->streams[i]); 6422 6423 if (stream_cursor_size < max_cursor_size) { 6424 max_cursor_size = stream_cursor_size; 6425 } 6426 } 6427 } 6428 6429 properties->cursor_size_limit = max_cursor_size; 6430 } 6431 6432 /** 6433 * dc_set_edp_power() - DM controls eDP power to be ON/OFF 6434 * 6435 * Called when DM wants to power on/off eDP. 6436 * Only work on links with flag skip_implict_edp_power_control is set. 6437 * 6438 * @dc: Current DC state 6439 * @edp_link: a link with eDP connector signal type 6440 * @powerOn: power on/off eDP 6441 * 6442 * Return: void 6443 */ 6444 void dc_set_edp_power(const struct dc *dc, struct dc_link *edp_link, 6445 bool powerOn) 6446 { 6447 if (edp_link->connector_signal != SIGNAL_TYPE_EDP) 6448 return; 6449 6450 if (edp_link->skip_implict_edp_power_control == false) 6451 return; 6452 6453 edp_link->dc->link_srv->edp_set_panel_power(edp_link, powerOn); 6454 } 6455 6456 /** 6457 * dc_get_power_profile_for_dc_state() - extracts power profile from dc state 6458 * 6459 * Called when DM wants to make power policy decisions based on dc_state 6460 * 6461 * @context: Pointer to the dc_state from which the power profile is extracted. 6462 * 6463 * Return: The power profile structure containing the power level information. 6464 */ 6465 struct dc_power_profile dc_get_power_profile_for_dc_state(const struct dc_state *context) 6466 { 6467 struct dc_power_profile profile = { 0 }; 6468 6469 profile.power_level = !context->bw_ctx.bw.dcn.clk.p_state_change_support; 6470 if (!context->clk_mgr || !context->clk_mgr->ctx || !context->clk_mgr->ctx->dc) 6471 return profile; 6472 struct dc *dc = context->clk_mgr->ctx->dc; 6473 6474 if (dc->res_pool->funcs->get_power_profile) 6475 profile.power_level = dc->res_pool->funcs->get_power_profile(context); 6476 return profile; 6477 } 6478 6479 /** 6480 * dc_get_det_buffer_size_from_state() - extracts detile buffer size from dc state 6481 * 6482 * This function is called to log the detile buffer size from the dc_state. 6483 * 6484 * @context: a pointer to the dc_state from which the detile buffer size is extracted. 6485 * 6486 * Return: the size of the detile buffer, or 0 if not available. 6487 */ 6488 unsigned int dc_get_det_buffer_size_from_state(const struct dc_state *context) 6489 { 6490 struct dc *dc = context->clk_mgr->ctx->dc; 6491 6492 if (dc->res_pool->funcs->get_det_buffer_size) 6493 return dc->res_pool->funcs->get_det_buffer_size(context); 6494 else 6495 return 0; 6496 } 6497 6498 /** 6499 * dc_get_host_router_index: Get index of host router from a dpia link 6500 * 6501 * This function return a host router index of the target link. If the target link is dpia link. 6502 * 6503 * @link: Pointer to the target link (input) 6504 * @host_router_index: Pointer to store the host router index of the target link (output). 6505 * 6506 * Return: true if the host router index is found and valid. 6507 * 6508 */ 6509 bool dc_get_host_router_index(const struct dc_link *link, unsigned int *host_router_index) 6510 { 6511 struct dc *dc; 6512 6513 if (!link || !host_router_index || link->ep_type != DISPLAY_ENDPOINT_USB4_DPIA) 6514 return false; 6515 6516 dc = link->ctx->dc; 6517 6518 if (link->link_index < dc->lowest_dpia_link_index) 6519 return false; 6520 6521 *host_router_index = (link->link_index - dc->lowest_dpia_link_index) / dc->caps.num_of_dpias_per_host_router; 6522 if (*host_router_index < dc->caps.num_of_host_routers) 6523 return true; 6524 else 6525 return false; 6526 } 6527 6528 bool dc_is_cursor_limit_pending(struct dc *dc) 6529 { 6530 uint32_t i; 6531 6532 for (i = 0; i < dc->current_state->stream_count; i++) { 6533 if (dc_stream_is_cursor_limit_pending(dc, dc->current_state->streams[i])) 6534 return true; 6535 } 6536 6537 return false; 6538 } 6539 6540 bool dc_can_clear_cursor_limit(const struct dc *dc) 6541 { 6542 uint32_t i; 6543 6544 for (i = 0; i < dc->current_state->stream_count; i++) { 6545 if (dc_state_can_clear_stream_cursor_subvp_limit(dc->current_state->streams[i], dc->current_state)) 6546 return true; 6547 } 6548 6549 return false; 6550 } 6551 6552 void dc_get_underflow_debug_data_for_otg(struct dc *dc, int primary_otg_inst, 6553 struct dc_underflow_debug_data *out_data) 6554 { 6555 struct timing_generator *tg = NULL; 6556 6557 for (int i = 0; i < MAX_PIPES; i++) { 6558 if (dc->res_pool->timing_generators[i] && 6559 dc->res_pool->timing_generators[i]->inst == primary_otg_inst) { 6560 tg = dc->res_pool->timing_generators[i]; 6561 break; 6562 } 6563 } 6564 6565 dc_exit_ips_for_hw_access(dc); 6566 if (dc->hwss.get_underflow_debug_data) 6567 dc->hwss.get_underflow_debug_data(dc, tg, out_data); 6568 } 6569 6570 void dc_get_power_feature_status(struct dc *dc, int primary_otg_inst, 6571 struct power_features *out_data) 6572 { 6573 out_data->uclk_p_state = dc->current_state->clk_mgr->clks.p_state_change_support; 6574 out_data->fams = dc->current_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching; 6575 } 6576 6577 bool dc_capture_register_software_state(struct dc *dc, struct dc_register_software_state *state) 6578 { 6579 struct dc_state *context; 6580 struct resource_context *res_ctx; 6581 int i; 6582 6583 if (!dc || !dc->current_state || !state) { 6584 if (state) 6585 state->state_valid = false; 6586 return false; 6587 } 6588 6589 /* Initialize the state structure */ 6590 memset(state, 0, sizeof(struct dc_register_software_state)); 6591 6592 context = dc->current_state; 6593 res_ctx = &context->res_ctx; 6594 6595 /* Count active pipes and streams */ 6596 state->active_pipe_count = 0; 6597 state->active_stream_count = context->stream_count; 6598 6599 for (i = 0; i < dc->res_pool->pipe_count; i++) { 6600 if (res_ctx->pipe_ctx[i].stream) 6601 state->active_pipe_count++; 6602 } 6603 6604 /* Capture HUBP programming state for each pipe */ 6605 for (i = 0; i < MAX_PIPES && i < dc->res_pool->pipe_count; i++) { 6606 struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i]; 6607 6608 state->hubp[i].valid_stream = false; 6609 if (!pipe_ctx->stream) 6610 continue; 6611 6612 state->hubp[i].valid_stream = true; 6613 6614 /* HUBP register programming variables */ 6615 if (pipe_ctx->stream_res.tg) 6616 state->hubp[i].vtg_sel = pipe_ctx->stream_res.tg->inst; 6617 6618 state->hubp[i].hubp_clock_enable = (pipe_ctx->plane_res.hubp != NULL) ? 1 : 0; 6619 6620 state->hubp[i].valid_plane_state = false; 6621 if (pipe_ctx->plane_state) { 6622 state->hubp[i].valid_plane_state = true; 6623 state->hubp[i].surface_pixel_format = pipe_ctx->plane_state->format; 6624 state->hubp[i].rotation_angle = pipe_ctx->plane_state->rotation; 6625 state->hubp[i].h_mirror_en = pipe_ctx->plane_state->horizontal_mirror ? 1 : 0; 6626 6627 /* Surface size */ 6628 if (pipe_ctx->plane_state->plane_size.surface_size.width > 0) { 6629 state->hubp[i].surface_size_width = pipe_ctx->plane_state->plane_size.surface_size.width; 6630 state->hubp[i].surface_size_height = pipe_ctx->plane_state->plane_size.surface_size.height; 6631 } 6632 6633 /* Viewport dimensions from scaler data */ 6634 if (pipe_ctx->plane_state->src_rect.width > 0) { 6635 state->hubp[i].pri_viewport_width = pipe_ctx->plane_state->src_rect.width; 6636 state->hubp[i].pri_viewport_height = pipe_ctx->plane_state->src_rect.height; 6637 state->hubp[i].pri_viewport_x_start = pipe_ctx->plane_state->src_rect.x; 6638 state->hubp[i].pri_viewport_y_start = pipe_ctx->plane_state->src_rect.y; 6639 } 6640 6641 /* DCC settings */ 6642 state->hubp[i].surface_dcc_en = (pipe_ctx->plane_state->dcc.enable) ? 1 : 0; 6643 state->hubp[i].surface_dcc_ind_64b_blk = pipe_ctx->plane_state->dcc.independent_64b_blks; 6644 state->hubp[i].surface_dcc_ind_128b_blk = pipe_ctx->plane_state->dcc.dcc_ind_blk; 6645 6646 /* Surface pitch */ 6647 state->hubp[i].surface_pitch = pipe_ctx->plane_state->plane_size.surface_pitch; 6648 state->hubp[i].meta_pitch = pipe_ctx->plane_state->dcc.meta_pitch; 6649 state->hubp[i].chroma_pitch = pipe_ctx->plane_state->plane_size.chroma_pitch; 6650 state->hubp[i].meta_pitch_c = pipe_ctx->plane_state->dcc.meta_pitch_c; 6651 6652 /* Surface addresses - primary */ 6653 state->hubp[i].primary_surface_address_low = pipe_ctx->plane_state->address.grph.addr.low_part; 6654 state->hubp[i].primary_surface_address_high = pipe_ctx->plane_state->address.grph.addr.high_part; 6655 state->hubp[i].primary_meta_surface_address_low = pipe_ctx->plane_state->address.grph.meta_addr.low_part; 6656 state->hubp[i].primary_meta_surface_address_high = pipe_ctx->plane_state->address.grph.meta_addr.high_part; 6657 6658 /* TMZ settings */ 6659 state->hubp[i].primary_surface_tmz = pipe_ctx->plane_state->address.tmz_surface; 6660 state->hubp[i].primary_meta_surface_tmz = pipe_ctx->plane_state->address.tmz_surface; 6661 6662 /* Tiling configuration */ 6663 state->hubp[i].min_dc_gfx_version9 = false; 6664 if (pipe_ctx->plane_state->tiling_info.gfxversion >= DcGfxVersion9) { 6665 state->hubp[i].min_dc_gfx_version9 = true; 6666 state->hubp[i].sw_mode = pipe_ctx->plane_state->tiling_info.gfx9.swizzle; 6667 state->hubp[i].num_pipes = pipe_ctx->plane_state->tiling_info.gfx9.num_pipes; 6668 state->hubp[i].num_banks = pipe_ctx->plane_state->tiling_info.gfx9.num_banks; 6669 state->hubp[i].pipe_interleave = pipe_ctx->plane_state->tiling_info.gfx9.pipe_interleave; 6670 state->hubp[i].num_shader_engines = pipe_ctx->plane_state->tiling_info.gfx9.num_shader_engines; 6671 state->hubp[i].num_rb_per_se = pipe_ctx->plane_state->tiling_info.gfx9.num_rb_per_se; 6672 state->hubp[i].num_pkrs = pipe_ctx->plane_state->tiling_info.gfx9.num_pkrs; 6673 } 6674 } 6675 6676 /* DML Request Size Configuration */ 6677 if (pipe_ctx->rq_regs.rq_regs_l.chunk_size > 0) { 6678 state->hubp[i].rq_chunk_size = pipe_ctx->rq_regs.rq_regs_l.chunk_size; 6679 state->hubp[i].rq_min_chunk_size = pipe_ctx->rq_regs.rq_regs_l.min_chunk_size; 6680 state->hubp[i].rq_meta_chunk_size = pipe_ctx->rq_regs.rq_regs_l.meta_chunk_size; 6681 state->hubp[i].rq_min_meta_chunk_size = pipe_ctx->rq_regs.rq_regs_l.min_meta_chunk_size; 6682 state->hubp[i].rq_dpte_group_size = pipe_ctx->rq_regs.rq_regs_l.dpte_group_size; 6683 state->hubp[i].rq_mpte_group_size = pipe_ctx->rq_regs.rq_regs_l.mpte_group_size; 6684 state->hubp[i].rq_swath_height_l = pipe_ctx->rq_regs.rq_regs_l.swath_height; 6685 state->hubp[i].rq_pte_row_height_l = pipe_ctx->rq_regs.rq_regs_l.pte_row_height_linear; 6686 } 6687 6688 /* Chroma request size configuration */ 6689 if (pipe_ctx->rq_regs.rq_regs_c.chunk_size > 0) { 6690 state->hubp[i].rq_chunk_size_c = pipe_ctx->rq_regs.rq_regs_c.chunk_size; 6691 state->hubp[i].rq_min_chunk_size_c = pipe_ctx->rq_regs.rq_regs_c.min_chunk_size; 6692 state->hubp[i].rq_meta_chunk_size_c = pipe_ctx->rq_regs.rq_regs_c.meta_chunk_size; 6693 state->hubp[i].rq_min_meta_chunk_size_c = pipe_ctx->rq_regs.rq_regs_c.min_meta_chunk_size; 6694 state->hubp[i].rq_dpte_group_size_c = pipe_ctx->rq_regs.rq_regs_c.dpte_group_size; 6695 state->hubp[i].rq_mpte_group_size_c = pipe_ctx->rq_regs.rq_regs_c.mpte_group_size; 6696 state->hubp[i].rq_swath_height_c = pipe_ctx->rq_regs.rq_regs_c.swath_height; 6697 state->hubp[i].rq_pte_row_height_c = pipe_ctx->rq_regs.rq_regs_c.pte_row_height_linear; 6698 } 6699 6700 /* DML expansion modes */ 6701 state->hubp[i].drq_expansion_mode = pipe_ctx->rq_regs.drq_expansion_mode; 6702 state->hubp[i].prq_expansion_mode = pipe_ctx->rq_regs.prq_expansion_mode; 6703 state->hubp[i].mrq_expansion_mode = pipe_ctx->rq_regs.mrq_expansion_mode; 6704 state->hubp[i].crq_expansion_mode = pipe_ctx->rq_regs.crq_expansion_mode; 6705 6706 /* DML DLG parameters - nominal */ 6707 state->hubp[i].dst_y_per_vm_vblank = pipe_ctx->dlg_regs.dst_y_per_vm_vblank; 6708 state->hubp[i].dst_y_per_row_vblank = pipe_ctx->dlg_regs.dst_y_per_row_vblank; 6709 state->hubp[i].dst_y_per_vm_flip = pipe_ctx->dlg_regs.dst_y_per_vm_flip; 6710 state->hubp[i].dst_y_per_row_flip = pipe_ctx->dlg_regs.dst_y_per_row_flip; 6711 6712 /* DML prefetch settings */ 6713 state->hubp[i].dst_y_prefetch = pipe_ctx->dlg_regs.dst_y_prefetch; 6714 state->hubp[i].vratio_prefetch = pipe_ctx->dlg_regs.vratio_prefetch; 6715 state->hubp[i].vratio_prefetch_c = pipe_ctx->dlg_regs.vratio_prefetch_c; 6716 6717 /* TTU parameters */ 6718 state->hubp[i].qos_level_low_wm = pipe_ctx->ttu_regs.qos_level_low_wm; 6719 state->hubp[i].qos_level_high_wm = pipe_ctx->ttu_regs.qos_level_high_wm; 6720 state->hubp[i].qos_level_flip = pipe_ctx->ttu_regs.qos_level_flip; 6721 state->hubp[i].min_ttu_vblank = pipe_ctx->ttu_regs.min_ttu_vblank; 6722 } 6723 6724 /* Capture HUBBUB programming state */ 6725 if (dc->res_pool->hubbub) { 6726 /* Individual DET buffer sizes - software state variables that program DET registers */ 6727 for (i = 0; i < 4 && i < dc->res_pool->pipe_count; i++) { 6728 uint32_t det_size = res_ctx->pipe_ctx[i].det_buffer_size_kb; 6729 switch (i) { 6730 case 0: 6731 state->hubbub.det0_size = det_size; 6732 break; 6733 case 1: 6734 state->hubbub.det1_size = det_size; 6735 break; 6736 case 2: 6737 state->hubbub.det2_size = det_size; 6738 break; 6739 case 3: 6740 state->hubbub.det3_size = det_size; 6741 break; 6742 } 6743 } 6744 6745 /* Compression buffer configuration - software state that programs COMPBUF_SIZE register */ 6746 // TODO: Handle logic for legacy DCN pre-DCN401 6747 state->hubbub.compbuf_size = context->bw_ctx.bw.dcn.arb_regs.compbuf_size; 6748 } 6749 6750 /* Capture DPP programming state for each pipe */ 6751 for (i = 0; i < MAX_PIPES && i < dc->res_pool->pipe_count; i++) { 6752 struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i]; 6753 6754 if (!pipe_ctx->stream) 6755 continue; 6756 6757 state->dpp[i].dpp_clock_enable = (pipe_ctx->plane_res.dpp != NULL) ? 1 : 0; 6758 6759 if (pipe_ctx->plane_state && pipe_ctx->plane_res.scl_data.recout.width > 0) { 6760 /* Access dscl_prog_data directly - this contains the actual software state used for register programming */ 6761 struct dscl_prog_data *dscl_data = &pipe_ctx->plane_res.scl_data.dscl_prog_data; 6762 6763 /* Recout (Rectangle of Interest) configuration - software state that programs RECOUT registers */ 6764 state->dpp[i].recout_start_x = dscl_data->recout.x; 6765 state->dpp[i].recout_start_y = dscl_data->recout.y; 6766 state->dpp[i].recout_width = dscl_data->recout.width; 6767 state->dpp[i].recout_height = dscl_data->recout.height; 6768 6769 /* MPC (Multiple Pipe/Plane Combiner) size - software state that programs MPC_SIZE registers */ 6770 state->dpp[i].mpc_width = dscl_data->mpc_size.width; 6771 state->dpp[i].mpc_height = dscl_data->mpc_size.height; 6772 6773 /* DSCL mode - software state that programs SCL_MODE registers */ 6774 state->dpp[i].dscl_mode = dscl_data->dscl_mode; 6775 6776 /* Scaler ratios - software state that programs scale ratio registers (use actual programmed ratios) */ 6777 state->dpp[i].horz_ratio_int = dscl_data->ratios.h_scale_ratio >> 19; // Extract integer part from programmed ratio 6778 state->dpp[i].vert_ratio_int = dscl_data->ratios.v_scale_ratio >> 19; // Extract integer part from programmed ratio 6779 6780 /* Basic scaler taps - software state that programs tap control registers (use actual programmed taps) */ 6781 state->dpp[i].h_taps = dscl_data->taps.h_taps + 1; // dscl_prog_data.taps stores (taps - 1), so add 1 back 6782 state->dpp[i].v_taps = dscl_data->taps.v_taps + 1; // dscl_prog_data.taps stores (taps - 1), so add 1 back 6783 } 6784 } 6785 6786 /* Capture essential clock state for underflow analysis */ 6787 if (dc->clk_mgr && dc->clk_mgr->clks.dispclk_khz > 0) { 6788 /* Core display clocks affecting bandwidth and timing */ 6789 state->dccg.dispclk_khz = dc->clk_mgr->clks.dispclk_khz; 6790 6791 /* Per-pipe clock configuration - only capture what's essential */ 6792 for (i = 0; i < MAX_PIPES && i < dc->res_pool->pipe_count; i++) { 6793 struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i]; 6794 if (pipe_ctx->stream) { 6795 /* Essential clocks that directly affect underflow risk */ 6796 state->dccg.dppclk_khz[i] = dc->clk_mgr->clks.dppclk_khz; 6797 state->dccg.pixclk_khz[i] = pipe_ctx->stream->timing.pix_clk_100hz / 10; 6798 state->dccg.dppclk_enable[i] = 1; 6799 6800 /* DP stream clock only for DP signals */ 6801 if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT || 6802 pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { 6803 state->dccg.dpstreamclk_enable[i] = 1; 6804 } else { 6805 state->dccg.dpstreamclk_enable[i] = 0; 6806 } 6807 } else { 6808 /* Inactive pipe - no clocks */ 6809 state->dccg.dppclk_khz[i] = 0; 6810 state->dccg.pixclk_khz[i] = 0; 6811 state->dccg.dppclk_enable[i] = 0; 6812 if (i < 4) { 6813 state->dccg.dpstreamclk_enable[i] = 0; 6814 } 6815 } 6816 } 6817 6818 /* DSC clock state - only when actually using DSC */ 6819 for (i = 0; i < MAX_PIPES; i++) { 6820 struct pipe_ctx *pipe_ctx = (i < dc->res_pool->pipe_count) ? &res_ctx->pipe_ctx[i] : NULL; 6821 if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->timing.dsc_cfg.num_slices_h > 0) { 6822 state->dccg.dscclk_khz[i] = 400000; /* Typical DSC clock frequency */ 6823 } else { 6824 state->dccg.dscclk_khz[i] = 0; 6825 } 6826 } 6827 6828 /* SYMCLK32 LE Control - only the essential HPO state for underflow analysis */ 6829 for (i = 0; i < 2; i++) { 6830 state->dccg.symclk32_le_enable[i] = 0; /* Default: disabled */ 6831 } 6832 6833 } 6834 6835 /* Capture essential DSC configuration for underflow analysis */ 6836 for (i = 0; i < MAX_PIPES && i < dc->res_pool->pipe_count; i++) { 6837 struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i]; 6838 6839 if (pipe_ctx->stream && pipe_ctx->stream->timing.dsc_cfg.num_slices_h > 0) { 6840 /* DSC is enabled - capture essential configuration */ 6841 state->dsc[i].dsc_clock_enable = 1; 6842 6843 /* DSC configuration affecting bandwidth and timing */ 6844 struct dc_dsc_config *dsc_cfg = &pipe_ctx->stream->timing.dsc_cfg; 6845 state->dsc[i].dsc_num_slices_h = dsc_cfg->num_slices_h; 6846 state->dsc[i].dsc_num_slices_v = dsc_cfg->num_slices_v; 6847 state->dsc[i].dsc_bits_per_pixel = dsc_cfg->bits_per_pixel; 6848 6849 /* OPP pipe source for DSC forwarding */ 6850 if (pipe_ctx->stream_res.opp) { 6851 state->dsc[i].dscrm_dsc_forward_enable = 1; 6852 state->dsc[i].dscrm_dsc_opp_pipe_source = pipe_ctx->stream_res.opp->inst; 6853 } else { 6854 state->dsc[i].dscrm_dsc_forward_enable = 0; 6855 state->dsc[i].dscrm_dsc_opp_pipe_source = 0; 6856 } 6857 } else { 6858 /* DSC not enabled - clear all fields */ 6859 memset(&state->dsc[i], 0, sizeof(state->dsc[i])); 6860 } 6861 } 6862 6863 /* Capture MPC programming state - comprehensive register field coverage */ 6864 for (i = 0; i < MAX_PIPES && i < dc->res_pool->pipe_count; i++) { 6865 struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i]; 6866 6867 if (pipe_ctx->plane_state && pipe_ctx->stream) { 6868 struct dc_plane_state *plane_state = pipe_ctx->plane_state; 6869 6870 /* MPCC blending tree and mode control - capture actual blend configuration */ 6871 state->mpc.mpcc_mode[i] = (plane_state->blend_tf.type != TF_TYPE_BYPASS) ? 1 : 0; 6872 state->mpc.mpcc_alpha_blend_mode[i] = plane_state->per_pixel_alpha ? 1 : 0; 6873 state->mpc.mpcc_alpha_multiplied_mode[i] = plane_state->pre_multiplied_alpha ? 1 : 0; 6874 state->mpc.mpcc_blnd_active_overlap_only[i] = 0; /* Default - no overlap restriction */ 6875 state->mpc.mpcc_global_alpha[i] = plane_state->global_alpha_value; 6876 state->mpc.mpcc_global_gain[i] = plane_state->global_alpha ? 255 : 0; 6877 state->mpc.mpcc_bg_bpc[i] = 8; /* Standard 8-bit background */ 6878 state->mpc.mpcc_bot_gain_mode[i] = 0; /* Standard gain mode */ 6879 6880 /* MPCC blending tree connections - capture tree topology */ 6881 if (pipe_ctx->bottom_pipe) { 6882 state->mpc.mpcc_bot_sel[i] = pipe_ctx->bottom_pipe->pipe_idx; 6883 } else { 6884 state->mpc.mpcc_bot_sel[i] = 0xF; /* No bottom connection */ 6885 } 6886 state->mpc.mpcc_top_sel[i] = pipe_ctx->pipe_idx; /* This pipe's DPP ID */ 6887 6888 /* MPCC output gamma control - capture gamma programming */ 6889 if (plane_state->gamma_correction.type != GAMMA_CS_TFM_1D && plane_state->gamma_correction.num_entries > 0) { 6890 state->mpc.mpcc_ogam_mode[i] = 1; /* Gamma enabled */ 6891 state->mpc.mpcc_ogam_select[i] = 0; /* Bank A selection */ 6892 state->mpc.mpcc_ogam_pwl_disable[i] = 0; /* PWL enabled */ 6893 } else { 6894 state->mpc.mpcc_ogam_mode[i] = 0; /* Bypass mode */ 6895 state->mpc.mpcc_ogam_select[i] = 0; 6896 state->mpc.mpcc_ogam_pwl_disable[i] = 1; /* PWL disabled */ 6897 } 6898 6899 /* MPCC pipe assignment and operational status */ 6900 if (pipe_ctx->stream_res.opp) { 6901 state->mpc.mpcc_opp_id[i] = pipe_ctx->stream_res.opp->inst; 6902 } else { 6903 state->mpc.mpcc_opp_id[i] = 0xF; /* No OPP assignment */ 6904 } 6905 6906 /* MPCC status indicators - active pipe state */ 6907 state->mpc.mpcc_idle[i] = 0; /* Active pipe - not idle */ 6908 state->mpc.mpcc_busy[i] = 1; /* Active pipe - busy processing */ 6909 6910 } else { 6911 /* Pipe not active - set disabled/idle state for all fields */ 6912 state->mpc.mpcc_mode[i] = 0; 6913 state->mpc.mpcc_alpha_blend_mode[i] = 0; 6914 state->mpc.mpcc_alpha_multiplied_mode[i] = 0; 6915 state->mpc.mpcc_blnd_active_overlap_only[i] = 0; 6916 state->mpc.mpcc_global_alpha[i] = 0; 6917 state->mpc.mpcc_global_gain[i] = 0; 6918 state->mpc.mpcc_bg_bpc[i] = 0; 6919 state->mpc.mpcc_bot_gain_mode[i] = 0; 6920 state->mpc.mpcc_bot_sel[i] = 0xF; /* No bottom connection */ 6921 state->mpc.mpcc_top_sel[i] = 0xF; /* No top connection */ 6922 state->mpc.mpcc_ogam_mode[i] = 0; /* Bypass */ 6923 state->mpc.mpcc_ogam_select[i] = 0; 6924 state->mpc.mpcc_ogam_pwl_disable[i] = 1; /* PWL disabled */ 6925 state->mpc.mpcc_opp_id[i] = 0xF; /* No OPP assignment */ 6926 state->mpc.mpcc_idle[i] = 1; /* Idle */ 6927 state->mpc.mpcc_busy[i] = 0; /* Not busy */ 6928 } 6929 } 6930 6931 /* Capture OPP programming state for each pipe - comprehensive register field coverage */ 6932 for (i = 0; i < MAX_PIPES && i < dc->res_pool->pipe_count; i++) { 6933 struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i]; 6934 6935 if (!pipe_ctx->stream) 6936 continue; 6937 6938 if (pipe_ctx->stream_res.opp) { 6939 struct dc_crtc_timing *timing = &pipe_ctx->stream->timing; 6940 6941 /* OPP Pipe Control */ 6942 state->opp[i].opp_pipe_clock_enable = 1; /* Active pipe has clock enabled */ 6943 6944 /* Display Pattern Generator (DPG) Control - 19 fields */ 6945 if (pipe_ctx->stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) { 6946 state->opp[i].dpg_enable = 1; 6947 } else { 6948 /* Video mode - DPG disabled */ 6949 state->opp[i].dpg_enable = 0; 6950 } 6951 6952 /* Format Control (FMT) - 18 fields */ 6953 state->opp[i].fmt_pixel_encoding = timing->pixel_encoding; 6954 6955 /* Chroma subsampling mode based on pixel encoding */ 6956 if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) { 6957 state->opp[i].fmt_subsampling_mode = 1; /* 4:2:0 subsampling */ 6958 } else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) { 6959 state->opp[i].fmt_subsampling_mode = 2; /* 4:2:2 subsampling */ 6960 } else { 6961 state->opp[i].fmt_subsampling_mode = 0; /* No subsampling (4:4:4) */ 6962 } 6963 6964 state->opp[i].fmt_cbcr_bit_reduction_bypass = (timing->pixel_encoding == PIXEL_ENCODING_RGB) ? 1 : 0; 6965 state->opp[i].fmt_stereosync_override = (timing->timing_3d_format != TIMING_3D_FORMAT_NONE) ? 1 : 0; 6966 6967 /* Dithering control based on bit depth */ 6968 if (timing->display_color_depth < COLOR_DEPTH_121212) { 6969 state->opp[i].fmt_spatial_dither_frame_counter_max = 15; /* Typical frame counter max */ 6970 state->opp[i].fmt_spatial_dither_frame_counter_bit_swap = 0; /* No bit swapping */ 6971 state->opp[i].fmt_spatial_dither_enable = 1; 6972 state->opp[i].fmt_spatial_dither_mode = 0; /* Spatial dithering mode */ 6973 state->opp[i].fmt_spatial_dither_depth = timing->display_color_depth; 6974 state->opp[i].fmt_temporal_dither_enable = 0; /* Spatial dithering preferred */ 6975 } else { 6976 state->opp[i].fmt_spatial_dither_frame_counter_max = 0; 6977 state->opp[i].fmt_spatial_dither_frame_counter_bit_swap = 0; 6978 state->opp[i].fmt_spatial_dither_enable = 0; 6979 state->opp[i].fmt_spatial_dither_mode = 0; 6980 state->opp[i].fmt_spatial_dither_depth = 0; 6981 state->opp[i].fmt_temporal_dither_enable = 0; 6982 } 6983 6984 /* Truncation control for bit depth reduction */ 6985 if (timing->display_color_depth < COLOR_DEPTH_121212) { 6986 state->opp[i].fmt_truncate_enable = 1; 6987 state->opp[i].fmt_truncate_depth = timing->display_color_depth; 6988 state->opp[i].fmt_truncate_mode = 0; /* Round mode */ 6989 } else { 6990 state->opp[i].fmt_truncate_enable = 0; 6991 state->opp[i].fmt_truncate_depth = 0; 6992 state->opp[i].fmt_truncate_mode = 0; 6993 } 6994 6995 /* Data clamping control */ 6996 state->opp[i].fmt_clamp_data_enable = 1; /* Clamping typically enabled */ 6997 state->opp[i].fmt_clamp_color_format = timing->pixel_encoding; 6998 6999 /* Dynamic expansion for limited range content */ 7000 if (timing->pixel_encoding != PIXEL_ENCODING_RGB) { 7001 state->opp[i].fmt_dynamic_exp_enable = 1; /* YCbCr typically needs expansion */ 7002 state->opp[i].fmt_dynamic_exp_mode = 0; /* Standard expansion */ 7003 } else { 7004 state->opp[i].fmt_dynamic_exp_enable = 0; /* RGB typically full range */ 7005 state->opp[i].fmt_dynamic_exp_mode = 0; 7006 } 7007 7008 /* Legacy field for compatibility */ 7009 state->opp[i].fmt_bit_depth_control = timing->display_color_depth; 7010 7011 /* Output Buffer (OPPBUF) Control - 6 fields */ 7012 state->opp[i].oppbuf_active_width = timing->h_addressable; 7013 state->opp[i].oppbuf_pixel_repetition = 0; /* No pixel repetition by default */ 7014 7015 /* Multi-Stream Output (MSO) / ODM segmentation */ 7016 if (pipe_ctx->next_odm_pipe) { 7017 state->opp[i].oppbuf_display_segmentation = 1; /* Segmented display */ 7018 state->opp[i].oppbuf_overlap_pixel_num = 0; /* ODM overlap pixels */ 7019 } else { 7020 state->opp[i].oppbuf_display_segmentation = 0; /* Single segment */ 7021 state->opp[i].oppbuf_overlap_pixel_num = 0; 7022 } 7023 7024 /* 3D/Stereo control */ 7025 if (timing->timing_3d_format != TIMING_3D_FORMAT_NONE) { 7026 state->opp[i].oppbuf_3d_vact_space1_size = 30; /* Typical stereo blanking */ 7027 state->opp[i].oppbuf_3d_vact_space2_size = 30; 7028 } else { 7029 state->opp[i].oppbuf_3d_vact_space1_size = 0; 7030 state->opp[i].oppbuf_3d_vact_space2_size = 0; 7031 } 7032 7033 /* DSC Forward Config - 3 fields */ 7034 if (timing->dsc_cfg.num_slices_h > 0) { 7035 state->opp[i].dscrm_dsc_forward_enable = 1; 7036 state->opp[i].dscrm_dsc_opp_pipe_source = pipe_ctx->stream_res.opp->inst; 7037 state->opp[i].dscrm_dsc_forward_enable_status = 1; /* Status follows enable */ 7038 } else { 7039 state->opp[i].dscrm_dsc_forward_enable = 0; 7040 state->opp[i].dscrm_dsc_opp_pipe_source = 0; 7041 state->opp[i].dscrm_dsc_forward_enable_status = 0; 7042 } 7043 } else { 7044 /* No OPP resource - set all fields to disabled state */ 7045 memset(&state->opp[i], 0, sizeof(state->opp[i])); 7046 } 7047 } 7048 7049 /* Capture OPTC programming state for each pipe - comprehensive register field coverage */ 7050 for (i = 0; i < MAX_PIPES && i < dc->res_pool->pipe_count; i++) { 7051 struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i]; 7052 7053 if (!pipe_ctx->stream) 7054 continue; 7055 7056 if (pipe_ctx->stream_res.tg) { 7057 struct dc_crtc_timing *timing = &pipe_ctx->stream->timing; 7058 7059 state->optc[i].otg_master_inst = pipe_ctx->stream_res.tg->inst; 7060 7061 /* OTG_CONTROL register - 5 fields */ 7062 state->optc[i].otg_master_enable = 1; /* Active stream */ 7063 state->optc[i].otg_disable_point_cntl = 0; /* Normal operation */ 7064 state->optc[i].otg_start_point_cntl = 0; /* Normal start */ 7065 state->optc[i].otg_field_number_cntl = (timing->flags.INTERLACE) ? 1 : 0; 7066 state->optc[i].otg_out_mux = 0; /* Direct output */ 7067 7068 /* OTG Horizontal Timing - 7 fields */ 7069 state->optc[i].otg_h_total = timing->h_total; 7070 state->optc[i].otg_h_blank_start = timing->h_addressable; 7071 state->optc[i].otg_h_blank_end = timing->h_total - timing->h_front_porch; 7072 state->optc[i].otg_h_sync_start = timing->h_addressable + timing->h_front_porch; 7073 state->optc[i].otg_h_sync_end = timing->h_addressable + timing->h_front_porch + timing->h_sync_width; 7074 state->optc[i].otg_h_sync_polarity = timing->flags.HSYNC_POSITIVE_POLARITY ? 0 : 1; 7075 state->optc[i].otg_h_timing_div_mode = (pipe_ctx->next_odm_pipe) ? 1 : 0; /* ODM divide mode */ 7076 7077 /* OTG Vertical Timing - 7 fields */ 7078 state->optc[i].otg_v_total = timing->v_total; 7079 state->optc[i].otg_v_blank_start = timing->v_addressable; 7080 state->optc[i].otg_v_blank_end = timing->v_total - timing->v_front_porch; 7081 state->optc[i].otg_v_sync_start = timing->v_addressable + timing->v_front_porch; 7082 state->optc[i].otg_v_sync_end = timing->v_addressable + timing->v_front_porch + timing->v_sync_width; 7083 state->optc[i].otg_v_sync_polarity = timing->flags.VSYNC_POSITIVE_POLARITY ? 0 : 1; 7084 state->optc[i].otg_v_sync_mode = 0; /* Normal sync mode */ 7085 7086 /* Initialize remaining core fields with appropriate defaults */ 7087 // TODO: Update logic for accurate vtotal min/max 7088 state->optc[i].otg_v_total_max = timing->v_total + 100; /* Typical DRR range */ 7089 state->optc[i].otg_v_total_min = timing->v_total - 50; 7090 state->optc[i].otg_v_total_mid = timing->v_total; 7091 7092 /* ODM configuration */ 7093 // TODO: Update logic to have complete ODM mappings (e.g. 3:1 and 4:1) stored in single pipe 7094 if (pipe_ctx->next_odm_pipe) { 7095 state->optc[i].optc_seg0_src_sel = pipe_ctx->stream_res.opp ? pipe_ctx->stream_res.opp->inst : 0; 7096 state->optc[i].optc_seg1_src_sel = pipe_ctx->next_odm_pipe->stream_res.opp ? pipe_ctx->next_odm_pipe->stream_res.opp->inst : 0; 7097 state->optc[i].optc_num_of_input_segment = 1; /* 2 segments - 1 */ 7098 } else { 7099 state->optc[i].optc_seg0_src_sel = pipe_ctx->stream_res.opp ? pipe_ctx->stream_res.opp->inst : 0; 7100 state->optc[i].optc_seg1_src_sel = 0; 7101 state->optc[i].optc_num_of_input_segment = 0; /* Single segment */ 7102 } 7103 7104 /* DSC configuration */ 7105 if (timing->dsc_cfg.num_slices_h > 0) { 7106 state->optc[i].optc_dsc_mode = 1; /* DSC enabled */ 7107 state->optc[i].optc_dsc_bytes_per_pixel = timing->dsc_cfg.bits_per_pixel / 16; /* Convert to bytes */ 7108 state->optc[i].optc_dsc_slice_width = timing->h_addressable / timing->dsc_cfg.num_slices_h; 7109 } else { 7110 state->optc[i].optc_dsc_mode = 0; 7111 state->optc[i].optc_dsc_bytes_per_pixel = 0; 7112 state->optc[i].optc_dsc_slice_width = 0; 7113 } 7114 7115 /* Essential control fields */ 7116 state->optc[i].otg_stereo_enable = (timing->timing_3d_format != TIMING_3D_FORMAT_NONE) ? 1 : 0; 7117 state->optc[i].otg_interlace_enable = timing->flags.INTERLACE ? 1 : 0; 7118 state->optc[i].otg_clock_enable = 1; /* OTG clock enabled */ 7119 state->optc[i].vtg0_enable = 1; /* VTG enabled for timing generation */ 7120 7121 /* Initialize other key fields to defaults */ 7122 state->optc[i].optc_input_pix_clk_en = 1; 7123 state->optc[i].optc_segment_width = (pipe_ctx->next_odm_pipe) ? (timing->h_addressable / 2) : timing->h_addressable; 7124 state->optc[i].otg_vready_offset = 1; 7125 state->optc[i].otg_vstartup_start = timing->v_addressable + 10; 7126 state->optc[i].otg_vupdate_offset = 0; 7127 state->optc[i].otg_vupdate_width = 5; 7128 } else { 7129 /* No timing generator resource - initialize all fields to 0 */ 7130 memset(&state->optc[i], 0, sizeof(state->optc[i])); 7131 } 7132 } 7133 7134 state->state_valid = true; 7135 return true; 7136 } 7137 7138 void dc_log_preos_dmcub_info(const struct dc *dc) 7139 { 7140 dc_dmub_srv_log_preos_dmcub_info(dc->ctx->dmub_srv); 7141 } 7142 7143 bool dc_get_qos_info(struct dc *dc, struct dc_qos_info *info) 7144 { 7145 const struct dc_clocks *clk = &dc->current_state->bw_ctx.bw.dcn.clk; 7146 struct memory_qos qos; 7147 7148 memset(info, 0, sizeof(*info)); 7149 7150 // Check if measurement function is available 7151 if (!dc->hwss.measure_memory_qos) { 7152 return false; 7153 } 7154 7155 // Call unified measurement function 7156 dc->hwss.measure_memory_qos(dc, &qos); 7157 7158 // Populate info from measured qos 7159 info->actual_peak_bw_in_mbps = qos.peak_bw_mbps; 7160 info->actual_avg_bw_in_mbps = qos.avg_bw_mbps; 7161 info->actual_min_latency_in_ns = qos.min_latency_ns; 7162 info->actual_max_latency_in_ns = qos.max_latency_ns; 7163 info->actual_avg_latency_in_ns = qos.avg_latency_ns; 7164 info->dcn_bandwidth_ub_in_mbps = (uint32_t)(clk->fclk_khz / 1000 * 64); 7165 7166 return true; 7167 } 7168 7169 enum update_v3_flow { 7170 UPDATE_V3_FLOW_INVALID, 7171 UPDATE_V3_FLOW_NO_NEW_CONTEXT_CONTEXT_FAST, 7172 UPDATE_V3_FLOW_NO_NEW_CONTEXT_CONTEXT_FULL, 7173 UPDATE_V3_FLOW_NEW_CONTEXT_SEAMLESS, 7174 UPDATE_V3_FLOW_NEW_CONTEXT_MINIMAL_NEW, 7175 UPDATE_V3_FLOW_NEW_CONTEXT_MINIMAL_CURRENT, 7176 }; 7177 7178 struct dc_update_scratch_space { 7179 struct dc *dc; 7180 struct dc_surface_update *surface_updates; 7181 int surface_count; 7182 struct dc_stream_state *stream; 7183 struct dc_stream_update *stream_update; 7184 bool update_v3; 7185 bool do_clear_update_flags; 7186 enum surface_update_type update_type; 7187 struct dc_state *new_context; 7188 enum update_v3_flow flow; 7189 struct dc_state *backup_context; 7190 struct dc_state *intermediate_context; 7191 struct pipe_split_policy_backup intermediate_policy; 7192 struct dc_surface_update intermediate_updates[MAX_SURFACES]; 7193 int intermediate_count; 7194 }; 7195 7196 size_t dc_update_scratch_space_size(void) 7197 { 7198 return sizeof(struct dc_update_scratch_space); 7199 } 7200 7201 static bool update_planes_and_stream_prepare_v2( 7202 struct dc_update_scratch_space *scratch 7203 ) 7204 { 7205 // v2 is too tangled to break into stages, so just execute everything under lock 7206 dc_exit_ips_for_hw_access(scratch->dc); 7207 return update_planes_and_stream_v2( 7208 scratch->dc, 7209 scratch->surface_updates, 7210 scratch->surface_count, 7211 scratch->stream, 7212 scratch->stream_update 7213 ); 7214 } 7215 7216 static void update_planes_and_stream_execute_v2( 7217 const struct dc_update_scratch_space *scratch 7218 ) 7219 { 7220 // Nothing to do, see `update_planes_and_stream_prepare_v2` 7221 (void) scratch; 7222 } 7223 7224 static bool update_planes_and_stream_cleanup_v2( 7225 const struct dc_update_scratch_space *scratch 7226 ) 7227 { 7228 if (scratch->do_clear_update_flags) 7229 clear_update_flags(scratch->surface_updates, scratch->surface_count, scratch->stream); 7230 7231 return false; 7232 } 7233 7234 static void update_planes_and_stream_cleanup_v3_release_minimal( 7235 struct dc_update_scratch_space *scratch, 7236 bool backup 7237 ); 7238 7239 static bool update_planes_and_stream_prepare_v3_intermediate_seamless( 7240 struct dc_update_scratch_space *scratch 7241 ) 7242 { 7243 return is_pipe_topology_transition_seamless_with_intermediate_step( 7244 scratch->dc, 7245 scratch->dc->current_state, 7246 scratch->intermediate_context, 7247 scratch->new_context 7248 ); 7249 } 7250 7251 static void transition_countdown_init(struct dc *dc) 7252 { 7253 dc->check_config.transition_countdown_to_steady_state = 7254 dc->debug.num_fast_flips_to_steady_state_override ? 7255 dc->debug.num_fast_flips_to_steady_state_override : 7256 NUM_FAST_FLIPS_TO_STEADY_STATE; 7257 } 7258 7259 static bool update_planes_and_stream_prepare_v3( 7260 struct dc_update_scratch_space *scratch 7261 ) 7262 { 7263 if (scratch->flow == UPDATE_V3_FLOW_NEW_CONTEXT_SEAMLESS) { 7264 return true; 7265 } 7266 ASSERT(scratch->flow == UPDATE_V3_FLOW_INVALID); 7267 dc_exit_ips_for_hw_access(scratch->dc); 7268 7269 if (!update_planes_and_stream_state( 7270 scratch->dc, 7271 scratch->surface_updates, 7272 scratch->surface_count, 7273 scratch->stream, 7274 scratch->stream_update, 7275 &scratch->update_type, 7276 &scratch->new_context 7277 )) { 7278 return false; 7279 } 7280 7281 if (scratch->new_context == scratch->dc->current_state) { 7282 ASSERT(scratch->update_type < UPDATE_TYPE_FULL); 7283 7284 // TODO: Do we need this to be alive in execute? 7285 struct dc_fast_update fast_update[MAX_SURFACES] = { 0 }; 7286 7287 populate_fast_updates( 7288 fast_update, 7289 scratch->surface_updates, 7290 scratch->surface_count, 7291 scratch->stream_update 7292 ); 7293 const bool fast = fast_update_only( 7294 scratch->dc, 7295 fast_update, 7296 scratch->surface_updates, 7297 scratch->surface_count, 7298 scratch->stream_update, 7299 scratch->stream 7300 ) 7301 // TODO: Can this be used to skip `populate_fast_updates`? 7302 && !scratch->dc->check_config.enable_legacy_fast_update; 7303 scratch->flow = fast 7304 ? UPDATE_V3_FLOW_NO_NEW_CONTEXT_CONTEXT_FAST 7305 : UPDATE_V3_FLOW_NO_NEW_CONTEXT_CONTEXT_FULL; 7306 return true; 7307 } 7308 7309 ASSERT(scratch->update_type >= UPDATE_TYPE_FULL); 7310 7311 const bool seamless = scratch->dc->hwss.is_pipe_topology_transition_seamless( 7312 scratch->dc, 7313 scratch->dc->current_state, 7314 scratch->new_context 7315 ); 7316 if (seamless) { 7317 scratch->flow = UPDATE_V3_FLOW_NEW_CONTEXT_SEAMLESS; 7318 if (scratch->dc->check_config.deferred_transition_state) 7319 /* reset countdown as steady state not reached */ 7320 transition_countdown_init(scratch->dc); 7321 return true; 7322 } 7323 7324 if (!scratch->dc->debug.disable_deferred_minimal_transitions) { 7325 scratch->dc->check_config.deferred_transition_state = true; 7326 transition_countdown_init(scratch->dc); 7327 } 7328 7329 scratch->intermediate_context = create_minimal_transition_state( 7330 scratch->dc, 7331 scratch->new_context, 7332 &scratch->intermediate_policy 7333 ); 7334 if (scratch->intermediate_context) { 7335 if (update_planes_and_stream_prepare_v3_intermediate_seamless(scratch)) { 7336 scratch->flow = UPDATE_V3_FLOW_NEW_CONTEXT_MINIMAL_NEW; 7337 return true; 7338 } 7339 7340 update_planes_and_stream_cleanup_v3_release_minimal(scratch, false); 7341 } 7342 7343 scratch->backup_context = scratch->dc->current_state; 7344 restore_planes_and_stream_state(&scratch->dc->scratch.current_state, scratch->stream); 7345 dc_state_retain(scratch->backup_context); 7346 scratch->intermediate_context = create_minimal_transition_state( 7347 scratch->dc, 7348 scratch->backup_context, 7349 &scratch->intermediate_policy 7350 ); 7351 if (scratch->intermediate_context) { 7352 if (update_planes_and_stream_prepare_v3_intermediate_seamless(scratch)) { 7353 scratch->flow = UPDATE_V3_FLOW_NEW_CONTEXT_MINIMAL_CURRENT; 7354 scratch->intermediate_count = initialize_empty_surface_updates( 7355 scratch->stream, scratch->intermediate_updates 7356 ); 7357 return true; 7358 } 7359 7360 update_planes_and_stream_cleanup_v3_release_minimal(scratch, true); 7361 } 7362 7363 scratch->flow = UPDATE_V3_FLOW_INVALID; 7364 dc_state_release(scratch->backup_context); 7365 restore_planes_and_stream_state(&scratch->dc->scratch.new_state, scratch->stream); 7366 return false; 7367 } 7368 7369 static void update_planes_and_stream_execute_v3_commit( 7370 const struct dc_update_scratch_space *scratch, 7371 bool intermediate_update, 7372 bool intermediate_context, 7373 bool use_stream_update 7374 ) 7375 { 7376 commit_planes_for_stream( 7377 scratch->dc, 7378 intermediate_update ? scratch->intermediate_updates : scratch->surface_updates, 7379 intermediate_update ? scratch->intermediate_count : scratch->surface_count, 7380 scratch->stream, 7381 use_stream_update ? scratch->stream_update : NULL, 7382 intermediate_context ? UPDATE_TYPE_FULL : scratch->update_type, 7383 // `dc->current_state` only used in `NO_NEW_CONTEXT`, where it is equal to `new_context` 7384 intermediate_context ? scratch->intermediate_context : scratch->new_context 7385 ); 7386 } 7387 7388 static void update_planes_and_stream_execute_v3( 7389 const struct dc_update_scratch_space *scratch 7390 ) 7391 { 7392 switch (scratch->flow) { 7393 case UPDATE_V3_FLOW_NO_NEW_CONTEXT_CONTEXT_FAST: 7394 commit_planes_for_stream_fast( 7395 scratch->dc, 7396 scratch->surface_updates, 7397 scratch->surface_count, 7398 scratch->stream, 7399 scratch->stream_update, 7400 scratch->update_type, 7401 scratch->new_context 7402 ); 7403 break; 7404 7405 case UPDATE_V3_FLOW_NO_NEW_CONTEXT_CONTEXT_FULL: 7406 case UPDATE_V3_FLOW_NEW_CONTEXT_SEAMLESS: 7407 update_planes_and_stream_execute_v3_commit(scratch, false, false, true); 7408 break; 7409 7410 case UPDATE_V3_FLOW_NEW_CONTEXT_MINIMAL_NEW: 7411 update_planes_and_stream_execute_v3_commit(scratch, false, true, 7412 scratch->dc->check_config.deferred_transition_state); 7413 break; 7414 7415 case UPDATE_V3_FLOW_NEW_CONTEXT_MINIMAL_CURRENT: 7416 update_planes_and_stream_execute_v3_commit(scratch, true, true, false); 7417 break; 7418 7419 case UPDATE_V3_FLOW_INVALID: 7420 default: 7421 ASSERT(false); 7422 } 7423 } 7424 7425 static void update_planes_and_stream_cleanup_v3_release_minimal( 7426 struct dc_update_scratch_space *scratch, 7427 bool backup 7428 ) 7429 { 7430 release_minimal_transition_state( 7431 scratch->dc, 7432 scratch->intermediate_context, 7433 backup ? scratch->backup_context : scratch->new_context, 7434 &scratch->intermediate_policy 7435 ); 7436 } 7437 7438 static void update_planes_and_stream_cleanup_v3_intermediate( 7439 struct dc_update_scratch_space *scratch, 7440 bool backup 7441 ) 7442 { 7443 swap_and_release_current_context(scratch->dc, scratch->intermediate_context, scratch->stream); 7444 dc_state_retain(scratch->dc->current_state); 7445 update_planes_and_stream_cleanup_v3_release_minimal(scratch, backup); 7446 } 7447 7448 static bool update_planes_and_stream_cleanup_v3( 7449 struct dc_update_scratch_space *scratch 7450 ) 7451 { 7452 switch (scratch->flow) { 7453 case UPDATE_V3_FLOW_NO_NEW_CONTEXT_CONTEXT_FAST: 7454 case UPDATE_V3_FLOW_NO_NEW_CONTEXT_CONTEXT_FULL: 7455 if (scratch->dc->check_config.transition_countdown_to_steady_state) 7456 scratch->dc->check_config.transition_countdown_to_steady_state--; 7457 break; 7458 7459 case UPDATE_V3_FLOW_NEW_CONTEXT_SEAMLESS: 7460 swap_and_release_current_context(scratch->dc, scratch->new_context, scratch->stream); 7461 break; 7462 7463 case UPDATE_V3_FLOW_NEW_CONTEXT_MINIMAL_NEW: 7464 update_planes_and_stream_cleanup_v3_intermediate(scratch, false); 7465 if (scratch->dc->check_config.deferred_transition_state) { 7466 dc_state_release(scratch->new_context); 7467 } else { 7468 scratch->flow = UPDATE_V3_FLOW_NEW_CONTEXT_SEAMLESS; 7469 return true; 7470 } 7471 break; 7472 7473 case UPDATE_V3_FLOW_NEW_CONTEXT_MINIMAL_CURRENT: 7474 update_planes_and_stream_cleanup_v3_intermediate(scratch, true); 7475 dc_state_release(scratch->backup_context); 7476 restore_planes_and_stream_state(&scratch->dc->scratch.new_state, scratch->stream); 7477 scratch->flow = UPDATE_V3_FLOW_NEW_CONTEXT_SEAMLESS; 7478 return true; 7479 7480 case UPDATE_V3_FLOW_INVALID: 7481 default: 7482 ASSERT(false); 7483 } 7484 7485 if (scratch->do_clear_update_flags) 7486 clear_update_flags(scratch->surface_updates, scratch->surface_count, scratch->stream); 7487 7488 return false; 7489 } 7490 7491 struct dc_update_scratch_space *dc_update_planes_and_stream_init( 7492 struct dc *dc, 7493 struct dc_surface_update *surface_updates, 7494 int surface_count, 7495 struct dc_stream_state *stream, 7496 struct dc_stream_update *stream_update 7497 ) 7498 { 7499 const enum dce_version version = dc->ctx->dce_version; 7500 struct dc_update_scratch_space *scratch = stream->update_scratch; 7501 7502 *scratch = (struct dc_update_scratch_space){ 7503 .dc = dc, 7504 .surface_updates = surface_updates, 7505 .surface_count = surface_count, 7506 .stream = stream, 7507 .stream_update = stream_update, 7508 .update_v3 = version >= DCN_VERSION_4_01 || version == DCN_VERSION_3_2 || version == DCN_VERSION_3_21, 7509 .do_clear_update_flags = version >= DCN_VERSION_1_0, 7510 }; 7511 7512 return scratch; 7513 } 7514 7515 bool dc_update_planes_and_stream_prepare( 7516 struct dc_update_scratch_space *scratch 7517 ) 7518 { 7519 return scratch->update_v3 7520 ? update_planes_and_stream_prepare_v3(scratch) 7521 : update_planes_and_stream_prepare_v2(scratch); 7522 } 7523 7524 void dc_update_planes_and_stream_execute( 7525 const struct dc_update_scratch_space *scratch 7526 ) 7527 { 7528 scratch->update_v3 7529 ? update_planes_and_stream_execute_v3(scratch) 7530 : update_planes_and_stream_execute_v2(scratch); 7531 } 7532 7533 bool dc_update_planes_and_stream_cleanup( 7534 struct dc_update_scratch_space *scratch 7535 ) 7536 { 7537 return scratch->update_v3 7538 ? update_planes_and_stream_cleanup_v3(scratch) 7539 : update_planes_and_stream_cleanup_v2(scratch); 7540 } 7541 7542