1 /* 2 * Copyright 2015 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: AMD 23 */ 24 25 #include "dm_services.h" 26 27 #include "amdgpu.h" 28 29 #include "dc.h" 30 31 #include "core_status.h" 32 #include "core_types.h" 33 #include "hw_sequencer.h" 34 #include "dce/dce_hwseq.h" 35 36 #include "resource.h" 37 #include "dc_state.h" 38 #include "dc_state_priv.h" 39 #include "dc_plane.h" 40 #include "dc_plane_priv.h" 41 #include "dc_stream_priv.h" 42 43 #include "gpio_service_interface.h" 44 #include "clk_mgr.h" 45 #include "clock_source.h" 46 #include "dc_bios_types.h" 47 48 #include "bios_parser_interface.h" 49 #include "bios/bios_parser_helper.h" 50 #include "include/irq_service_interface.h" 51 #include "transform.h" 52 #include "dmcu.h" 53 #include "dpp.h" 54 #include "timing_generator.h" 55 #include "abm.h" 56 #include "dio/virtual/virtual_link_encoder.h" 57 #include "hubp.h" 58 59 #include "link_hwss.h" 60 #include "link_encoder.h" 61 #include "link_enc_cfg.h" 62 63 #include "link_service.h" 64 #include "dm_helpers.h" 65 #include "mem_input.h" 66 67 #include "dc_dmub_srv.h" 68 69 #include "dsc.h" 70 71 #include "vm_helper.h" 72 73 #include "dce/dce_i2c.h" 74 75 #include "dmub/dmub_srv.h" 76 77 #include "dce/dmub_psr.h" 78 79 #include "dce/dmub_hw_lock_mgr.h" 80 81 #include "dc_trace.h" 82 83 #include "hw_sequencer_private.h" 84 85 #if defined(CONFIG_DRM_AMD_DC_FP) 86 #include "dml2_0/dml2_internal_types.h" 87 #include "soc_and_ip_translator.h" 88 #endif 89 90 #include "dce/dmub_outbox.h" 91 92 #define CTX \ 93 dc->ctx 94 95 #define DC_LOGGER \ 96 dc->ctx->logger 97 98 static const char DC_BUILD_ID[] = "production-build"; 99 100 /** 101 * DOC: Overview 102 * 103 * DC is the OS-agnostic component of the amdgpu DC driver. 104 * 105 * DC maintains and validates a set of structs representing the state of the 106 * driver and writes that state to AMD hardware 107 * 108 * Main DC HW structs: 109 * 110 * struct dc - The central struct. One per driver. Created on driver load, 111 * destroyed on driver unload. 112 * 113 * struct dc_context - One per driver. 114 * Used as a backpointer by most other structs in dc. 115 * 116 * struct dc_link - One per connector (the physical DP, HDMI, miniDP, or eDP 117 * plugpoints). Created on driver load, destroyed on driver unload. 118 * 119 * struct dc_sink - One per display. Created on boot or hotplug. 120 * Destroyed on shutdown or hotunplug. A dc_link can have a local sink 121 * (the display directly attached). It may also have one or more remote 122 * sinks (in the Multi-Stream Transport case) 123 * 124 * struct resource_pool - One per driver. Represents the hw blocks not in the 125 * main pipeline. Not directly accessible by dm. 126 * 127 * Main dc state structs: 128 * 129 * These structs can be created and destroyed as needed. There is a full set of 130 * these structs in dc->current_state representing the currently programmed state. 131 * 132 * struct dc_state - The global DC state to track global state information, 133 * such as bandwidth values. 134 * 135 * struct dc_stream_state - Represents the hw configuration for the pipeline from 136 * a framebuffer to a display. Maps one-to-one with dc_sink. 137 * 138 * struct dc_plane_state - Represents a framebuffer. Each stream has at least one, 139 * and may have more in the Multi-Plane Overlay case. 140 * 141 * struct resource_context - Represents the programmable state of everything in 142 * the resource_pool. Not directly accessible by dm. 143 * 144 * struct pipe_ctx - A member of struct resource_context. Represents the 145 * internal hardware pipeline components. Each dc_plane_state has either 146 * one or two (in the pipe-split case). 147 */ 148 149 /* Private functions */ 150 151 static inline void elevate_update_type( 152 struct surface_update_descriptor *descriptor, 153 enum surface_update_type new_type, 154 enum dc_lock_descriptor new_locks 155 ) 156 { 157 if (new_type > descriptor->update_type) 158 descriptor->update_type = new_type; 159 160 descriptor->lock_descriptor |= new_locks; 161 } 162 163 static void destroy_links(struct dc *dc) 164 { 165 uint32_t i; 166 167 for (i = 0; i < dc->link_count; i++) { 168 if (NULL != dc->links[i]) 169 dc->link_srv->destroy_link(&dc->links[i]); 170 } 171 } 172 173 static uint32_t get_num_of_internal_disp(struct dc_link **links, uint32_t num_links) 174 { 175 int i; 176 uint32_t count = 0; 177 178 for (i = 0; i < num_links; i++) { 179 if (links[i]->connector_signal == SIGNAL_TYPE_EDP || 180 links[i]->is_internal_display) 181 count++; 182 } 183 184 return count; 185 } 186 187 static int get_seamless_boot_stream_count(struct dc_state *ctx) 188 { 189 uint8_t i; 190 uint8_t seamless_boot_stream_count = 0; 191 192 for (i = 0; i < ctx->stream_count; i++) 193 if (ctx->streams[i]->apply_seamless_boot_optimization) 194 seamless_boot_stream_count++; 195 196 return seamless_boot_stream_count; 197 } 198 199 static bool create_links( 200 struct dc *dc, 201 uint32_t num_virtual_links) 202 { 203 int i; 204 int connectors_num; 205 struct dc_bios *bios = dc->ctx->dc_bios; 206 207 dc->link_count = 0; 208 209 connectors_num = bios->funcs->get_connectors_number(bios); 210 211 DC_LOG_DC("BIOS object table - number of connectors: %d", connectors_num); 212 213 if (connectors_num > ENUM_ID_COUNT) { 214 dm_error( 215 "DC: Number of connectors %d exceeds maximum of %d!\n", 216 connectors_num, 217 ENUM_ID_COUNT); 218 return false; 219 } 220 221 dm_output_to_console( 222 "DC: %s: connectors_num: physical:%d, virtual:%d\n", 223 __func__, 224 connectors_num, 225 num_virtual_links); 226 227 /* When getting the number of connectors, the VBIOS reports the number of valid indices, 228 * but it doesn't say which indices are valid, and not every index has an actual connector. 229 * So, if we don't find a connector on an index, that is not an error. 230 * 231 * - There is no guarantee that the first N indices will be valid 232 * - VBIOS may report a higher amount of valid indices than there are actual connectors 233 * - Some VBIOS have valid configurations for more connectors than there actually are 234 * on the card. This may be because the manufacturer used the same VBIOS for different 235 * variants of the same card. 236 */ 237 for (i = 0; dc->link_count < connectors_num && i < MAX_LINKS; i++) { 238 struct graphics_object_id connector_id = bios->funcs->get_connector_id(bios, i); 239 struct link_init_data link_init_params = {0}; 240 struct dc_link *link; 241 242 if (connector_id.id == CONNECTOR_ID_UNKNOWN) 243 continue; 244 245 DC_LOG_DC("BIOS object table - printing link object info for connector number: %d, link_index: %d", i, dc->link_count); 246 247 link_init_params.ctx = dc->ctx; 248 /* next BIOS object table connector */ 249 link_init_params.connector_index = i; 250 link_init_params.link_index = dc->link_count; 251 link_init_params.dc = dc; 252 link = dc->link_srv->create_link(&link_init_params); 253 254 if (link) { 255 dc->links[dc->link_count] = link; 256 link->dc = dc; 257 ++dc->link_count; 258 } 259 } 260 261 DC_LOG_DC("BIOS object table - end"); 262 263 /* Create a link for each usb4 dpia port */ 264 dc->lowest_dpia_link_index = MAX_LINKS; 265 for (i = 0; i < dc->res_pool->usb4_dpia_count; i++) { 266 struct link_init_data link_init_params = {0}; 267 struct dc_link *link; 268 269 link_init_params.ctx = dc->ctx; 270 link_init_params.connector_index = i; 271 link_init_params.link_index = dc->link_count; 272 link_init_params.dc = dc; 273 link_init_params.is_dpia_link = true; 274 275 link = dc->link_srv->create_link(&link_init_params); 276 if (link) { 277 if (dc->lowest_dpia_link_index > dc->link_count) 278 dc->lowest_dpia_link_index = dc->link_count; 279 280 dc->links[dc->link_count] = link; 281 link->dc = dc; 282 ++dc->link_count; 283 } 284 } 285 286 for (i = 0; i < num_virtual_links; i++) { 287 struct dc_link *link = kzalloc_obj(*link); 288 struct encoder_init_data enc_init = {0}; 289 290 if (link == NULL) { 291 BREAK_TO_DEBUGGER(); 292 goto failed_alloc; 293 } 294 295 link->link_index = dc->link_count; 296 dc->links[dc->link_count] = link; 297 dc->link_count++; 298 299 link->ctx = dc->ctx; 300 link->dc = dc; 301 link->connector_signal = SIGNAL_TYPE_VIRTUAL; 302 link->link_id.type = OBJECT_TYPE_CONNECTOR; 303 link->link_id.id = CONNECTOR_ID_VIRTUAL; 304 link->link_id.enum_id = ENUM_ID_1; 305 link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED; 306 link->replay_settings.config.replay_version = DC_REPLAY_VERSION_UNSUPPORTED; 307 link->link_enc = kzalloc_obj(*link->link_enc); 308 309 if (!link->link_enc) { 310 BREAK_TO_DEBUGGER(); 311 goto failed_alloc; 312 } 313 314 link->link_status.dpcd_caps = &link->dpcd_caps; 315 316 enc_init.ctx = dc->ctx; 317 enc_init.channel = CHANNEL_ID_UNKNOWN; 318 enc_init.hpd_source = HPD_SOURCEID_UNKNOWN; 319 enc_init.transmitter = TRANSMITTER_UNKNOWN; 320 enc_init.connector = link->link_id; 321 enc_init.encoder.type = OBJECT_TYPE_ENCODER; 322 enc_init.encoder.id = ENCODER_ID_INTERNAL_VIRTUAL; 323 enc_init.encoder.enum_id = ENUM_ID_1; 324 virtual_link_encoder_construct(link->link_enc, &enc_init); 325 } 326 327 dc->caps.num_of_internal_disp = get_num_of_internal_disp(dc->links, dc->link_count); 328 329 return true; 330 331 failed_alloc: 332 return false; 333 } 334 335 /* Create additional DIG link encoder objects if fewer than the platform 336 * supports were created during link construction. This can happen if the 337 * number of physical connectors is less than the number of DIGs. 338 */ 339 static bool create_link_encoders(struct dc *dc) 340 { 341 bool res = true; 342 unsigned int num_usb4_dpia = dc->res_pool->res_cap->num_usb4_dpia; 343 unsigned int num_dig_link_enc = dc->res_pool->res_cap->num_dig_link_enc; 344 int i; 345 346 /* A platform without USB4 DPIA endpoints has a fixed mapping between DIG 347 * link encoders and physical display endpoints and does not require 348 * additional link encoder objects. 349 */ 350 if (num_usb4_dpia == 0) 351 return res; 352 353 /* Create as many link encoder objects as the platform supports. DPIA 354 * endpoints can be programmably mapped to any DIG. 355 */ 356 if (num_dig_link_enc > dc->res_pool->dig_link_enc_count) { 357 for (i = 0; i < num_dig_link_enc; i++) { 358 struct link_encoder *link_enc = dc->res_pool->link_encoders[i]; 359 360 if (!link_enc && dc->res_pool->funcs->link_enc_create_minimal) { 361 link_enc = dc->res_pool->funcs->link_enc_create_minimal(dc->ctx, 362 (enum engine_id)(ENGINE_ID_DIGA + i)); 363 if (link_enc) { 364 dc->res_pool->link_encoders[i] = link_enc; 365 dc->res_pool->dig_link_enc_count++; 366 } else { 367 res = false; 368 } 369 } 370 } 371 } 372 373 return res; 374 } 375 376 /* Destroy any additional DIG link encoder objects created by 377 * create_link_encoders(). 378 * NB: Must only be called after destroy_links(). 379 */ 380 static void destroy_link_encoders(struct dc *dc) 381 { 382 unsigned int num_usb4_dpia; 383 unsigned int num_dig_link_enc; 384 int i; 385 386 if (!dc->res_pool) 387 return; 388 389 num_usb4_dpia = dc->res_pool->res_cap->num_usb4_dpia; 390 num_dig_link_enc = dc->res_pool->res_cap->num_dig_link_enc; 391 392 /* A platform without USB4 DPIA endpoints has a fixed mapping between DIG 393 * link encoders and physical display endpoints and does not require 394 * additional link encoder objects. 395 */ 396 if (num_usb4_dpia == 0) 397 return; 398 399 for (i = 0; i < num_dig_link_enc; i++) { 400 struct link_encoder *link_enc = dc->res_pool->link_encoders[i]; 401 402 if (link_enc) { 403 link_enc->funcs->destroy(&link_enc); 404 dc->res_pool->link_encoders[i] = NULL; 405 dc->res_pool->dig_link_enc_count--; 406 } 407 } 408 } 409 410 static struct dc_perf_trace *dc_perf_trace_create(void) 411 { 412 return kzalloc_obj(struct dc_perf_trace); 413 } 414 415 static void dc_perf_trace_destroy(struct dc_perf_trace **perf_trace) 416 { 417 kfree(*perf_trace); 418 *perf_trace = NULL; 419 } 420 421 static bool set_long_vtotal(struct dc *dc, struct dc_stream_state *stream, struct dc_crtc_timing_adjust *adjust) 422 { 423 if (!dc || !stream || !adjust) 424 return false; 425 426 if (!dc->current_state) 427 return false; 428 429 int i; 430 431 for (i = 0; i < MAX_PIPES; i++) { 432 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 433 434 if (pipe->stream == stream && pipe->stream_res.tg) { 435 if (dc->hwss.set_long_vtotal) 436 dc->hwss.set_long_vtotal(&pipe, 1, adjust->v_total_min, adjust->v_total_max); 437 438 return true; 439 } 440 } 441 442 return false; 443 } 444 445 /** 446 * dc_stream_get_last_used_drr_vtotal - Looks up the pipe context of 447 * dc_stream_state and gets the last VTOTAL used by DRR (Dynamic Refresh Rate) 448 * 449 * @dc: [in] dc reference 450 * @stream: [in] Initial dc stream state 451 * @refresh_rate: [in] new refresh_rate 452 * 453 * Return: %true if the pipe context is found and there is an associated 454 * timing_generator for the DC; 455 * %false if the pipe context is not found or there is no 456 * timing_generator for the DC. 457 */ 458 bool dc_stream_get_last_used_drr_vtotal(struct dc *dc, 459 struct dc_stream_state *stream, 460 uint32_t *refresh_rate) 461 { 462 bool status = false; 463 464 int i = 0; 465 466 dc_exit_ips_for_hw_access(dc); 467 468 for (i = 0; i < MAX_PIPES; i++) { 469 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 470 471 if (pipe->stream == stream && pipe->stream_res.tg) { 472 /* Only execute if a function pointer has been defined for 473 * the DC version in question 474 */ 475 if (pipe->stream_res.tg->funcs->get_last_used_drr_vtotal) { 476 pipe->stream_res.tg->funcs->get_last_used_drr_vtotal(pipe->stream_res.tg, refresh_rate); 477 478 status = true; 479 480 break; 481 } 482 } 483 } 484 485 return status; 486 } 487 488 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 489 static inline void 490 dc_stream_forward_dmub_crc_window(struct dc_dmub_srv *dmub_srv, 491 struct rect *rect, struct otg_phy_mux *mux_mapping, bool is_stop) 492 { 493 union dmub_rb_cmd cmd = {0}; 494 495 cmd.secure_display.roi_info.phy_id = mux_mapping->phy_output_num; 496 cmd.secure_display.roi_info.otg_id = mux_mapping->otg_output_num; 497 498 if (is_stop) { 499 cmd.secure_display.header.type = DMUB_CMD__SECURE_DISPLAY; 500 cmd.secure_display.header.sub_type = DMUB_CMD__SECURE_DISPLAY_CRC_STOP_UPDATE; 501 } else { 502 cmd.secure_display.header.type = DMUB_CMD__SECURE_DISPLAY; 503 cmd.secure_display.header.sub_type = DMUB_CMD__SECURE_DISPLAY_CRC_WIN_NOTIFY; 504 cmd.secure_display.roi_info.x_start = rect->x; 505 cmd.secure_display.roi_info.y_start = rect->y; 506 cmd.secure_display.roi_info.x_end = rect->x + rect->width; 507 cmd.secure_display.roi_info.y_end = rect->y + rect->height; 508 } 509 510 dc_wake_and_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT); 511 } 512 513 static inline void 514 dc_stream_forward_dmcu_crc_window(struct dmcu *dmcu, 515 struct rect *rect, struct otg_phy_mux *mux_mapping, bool is_stop) 516 { 517 if (is_stop) 518 dmcu->funcs->stop_crc_win_update(dmcu, mux_mapping); 519 else 520 dmcu->funcs->forward_crc_window(dmcu, rect, mux_mapping); 521 } 522 523 bool 524 dc_stream_forward_crc_window(struct dc_stream_state *stream, 525 struct rect *rect, uint8_t phy_id, bool is_stop) 526 { 527 struct dmcu *dmcu; 528 struct dc_dmub_srv *dmub_srv; 529 struct otg_phy_mux mux_mapping; 530 struct pipe_ctx *pipe; 531 int i; 532 struct dc *dc = stream->ctx->dc; 533 534 for (i = 0; i < MAX_PIPES; i++) { 535 pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 536 if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe) 537 break; 538 } 539 540 /* Stream not found */ 541 if (i == MAX_PIPES) 542 return false; 543 544 mux_mapping.phy_output_num = phy_id; 545 mux_mapping.otg_output_num = pipe->stream_res.tg->inst; 546 547 dmcu = dc->res_pool->dmcu; 548 dmub_srv = dc->ctx->dmub_srv; 549 550 /* forward to dmub */ 551 if (dmub_srv) 552 dc_stream_forward_dmub_crc_window(dmub_srv, rect, &mux_mapping, is_stop); 553 /* forward to dmcu */ 554 else if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) 555 dc_stream_forward_dmcu_crc_window(dmcu, rect, &mux_mapping, is_stop); 556 else 557 return false; 558 559 return true; 560 } 561 562 static void 563 dc_stream_forward_dmub_multiple_crc_window(struct dc_dmub_srv *dmub_srv, 564 struct crc_window *window, struct otg_phy_mux *mux_mapping, bool stop) 565 { 566 int i; 567 union dmub_rb_cmd cmd = {0}; 568 569 cmd.secure_display.mul_roi_ctl.phy_id = mux_mapping->phy_output_num; 570 cmd.secure_display.mul_roi_ctl.otg_id = mux_mapping->otg_output_num; 571 572 cmd.secure_display.header.type = DMUB_CMD__SECURE_DISPLAY; 573 574 if (stop) { 575 cmd.secure_display.header.sub_type = DMUB_CMD__SECURE_DISPLAY_MULTIPLE_CRC_STOP_UPDATE; 576 } else { 577 cmd.secure_display.header.sub_type = DMUB_CMD__SECURE_DISPLAY_MULTIPLE_CRC_WIN_NOTIFY; 578 for (i = 0; i < MAX_CRC_WINDOW_NUM; i++) { 579 cmd.secure_display.mul_roi_ctl.roi_ctl[i].x_start = window[i].rect.x; 580 cmd.secure_display.mul_roi_ctl.roi_ctl[i].y_start = window[i].rect.y; 581 cmd.secure_display.mul_roi_ctl.roi_ctl[i].x_end = window[i].rect.x + window[i].rect.width; 582 cmd.secure_display.mul_roi_ctl.roi_ctl[i].y_end = window[i].rect.y + window[i].rect.height; 583 cmd.secure_display.mul_roi_ctl.roi_ctl[i].enable = window[i].enable; 584 } 585 } 586 587 dc_wake_and_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT); 588 } 589 590 bool 591 dc_stream_forward_multiple_crc_window(struct dc_stream_state *stream, 592 struct crc_window *window, uint8_t phy_id, bool stop) 593 { 594 struct dc_dmub_srv *dmub_srv; 595 struct otg_phy_mux mux_mapping; 596 struct pipe_ctx *pipe; 597 int i; 598 struct dc *dc = stream->ctx->dc; 599 600 for (i = 0; i < MAX_PIPES; i++) { 601 pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 602 if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe) 603 break; 604 } 605 606 /* Stream not found */ 607 if (i == MAX_PIPES) 608 return false; 609 610 mux_mapping.phy_output_num = phy_id; 611 mux_mapping.otg_output_num = pipe->stream_res.tg->inst; 612 613 dmub_srv = dc->ctx->dmub_srv; 614 615 /* forward to dmub only. no dmcu support*/ 616 if (dmub_srv) 617 dc_stream_forward_dmub_multiple_crc_window(dmub_srv, window, &mux_mapping, stop); 618 else 619 return false; 620 621 return true; 622 } 623 #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */ 624 625 /** 626 * dc_stream_configure_crc() - Configure CRC capture for the given stream. 627 * @dc: DC Object 628 * @stream: The stream to configure CRC on. 629 * @crc_window: CRC window (x/y start/end) information 630 * @enable: Enable CRC if true, disable otherwise. 631 * @continuous: Capture CRC on every frame if true. Otherwise, only capture 632 * once. 633 * @idx: Capture CRC on which CRC engine instance 634 * @reset: Reset CRC engine before the configuration 635 * @crc_poly_mode: CRC polynomial mode 636 * 637 * By default, the entire frame is used to calculate the CRC. 638 * 639 * Return: %false if the stream is not found or CRC capture is not supported; 640 * %true if the stream has been configured. 641 */ 642 bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream, 643 struct crc_params *crc_window, bool enable, bool continuous, 644 uint8_t idx, bool reset, enum crc_poly_mode crc_poly_mode) 645 { 646 struct pipe_ctx *pipe; 647 struct crc_params param; 648 struct timing_generator *tg; 649 650 pipe = resource_get_otg_master_for_stream( 651 &dc->current_state->res_ctx, stream); 652 653 /* Stream not found */ 654 if (pipe == NULL) 655 return false; 656 657 dc_exit_ips_for_hw_access(dc); 658 659 /* By default, capture the full frame */ 660 param.windowa_x_start = 0; 661 param.windowa_y_start = 0; 662 param.windowa_x_end = pipe->stream->timing.h_addressable; 663 param.windowa_y_end = pipe->stream->timing.v_addressable; 664 param.windowb_x_start = 0; 665 param.windowb_y_start = 0; 666 param.windowb_x_end = pipe->stream->timing.h_addressable; 667 param.windowb_y_end = pipe->stream->timing.v_addressable; 668 param.crc_poly_mode = crc_poly_mode; 669 670 if (crc_window) { 671 param.windowa_x_start = crc_window->windowa_x_start; 672 param.windowa_y_start = crc_window->windowa_y_start; 673 param.windowa_x_end = crc_window->windowa_x_end; 674 param.windowa_y_end = crc_window->windowa_y_end; 675 param.windowb_x_start = crc_window->windowb_x_start; 676 param.windowb_y_start = crc_window->windowb_y_start; 677 param.windowb_x_end = crc_window->windowb_x_end; 678 param.windowb_y_end = crc_window->windowb_y_end; 679 } 680 681 param.dsc_mode = pipe->stream->timing.flags.DSC ? 1:0; 682 param.odm_mode = pipe->next_odm_pipe ? 1:0; 683 684 /* Default to the union of both windows */ 685 param.selection = UNION_WINDOW_A_B; 686 param.continuous_mode = continuous; 687 param.enable = enable; 688 689 param.crc_eng_inst = idx; 690 param.reset = reset; 691 692 tg = pipe->stream_res.tg; 693 694 /* Only call if supported */ 695 if (tg->funcs->configure_crc) 696 return tg->funcs->configure_crc(tg, ¶m); 697 DC_LOG_WARNING("CRC capture not supported."); 698 return false; 699 } 700 701 /** 702 * dc_stream_get_crc() - Get CRC values for the given stream. 703 * 704 * @dc: DC object. 705 * @stream: The DC stream state of the stream to get CRCs from. 706 * @idx: index of crc engine to get CRC from 707 * @r_cr: CRC value for the red component. 708 * @g_y: CRC value for the green component. 709 * @b_cb: CRC value for the blue component. 710 * 711 * dc_stream_configure_crc needs to be called beforehand to enable CRCs. 712 * 713 * Return: 714 * %false if stream is not found, or if CRCs are not enabled. 715 */ 716 bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream, uint8_t idx, 717 uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb) 718 { 719 int i; 720 struct pipe_ctx *pipe = NULL; 721 struct timing_generator *tg; 722 723 dc_exit_ips_for_hw_access(dc); 724 725 for (i = 0; i < MAX_PIPES; i++) { 726 pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 727 if (pipe->stream == stream) 728 break; 729 } 730 /* Stream not found */ 731 if (i == MAX_PIPES) 732 return false; 733 734 tg = pipe->stream_res.tg; 735 736 if (tg->funcs->get_crc) 737 return tg->funcs->get_crc(tg, idx, r_cr, g_y, b_cb); 738 DC_LOG_WARNING("CRC capture not supported."); 739 return false; 740 } 741 742 void dc_stream_set_dyn_expansion(struct dc *dc, struct dc_stream_state *stream, 743 enum dc_dynamic_expansion option) 744 { 745 /* OPP FMT dyn expansion updates*/ 746 int i; 747 struct pipe_ctx *pipe_ctx; 748 749 dc_exit_ips_for_hw_access(dc); 750 751 for (i = 0; i < MAX_PIPES; i++) { 752 if (dc->current_state->res_ctx.pipe_ctx[i].stream 753 == stream) { 754 pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; 755 pipe_ctx->stream_res.opp->dyn_expansion = option; 756 pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion( 757 pipe_ctx->stream_res.opp, 758 COLOR_SPACE_YCBCR601, 759 stream->timing.display_color_depth, 760 stream->signal); 761 } 762 } 763 } 764 765 void dc_stream_set_dither_option(struct dc_stream_state *stream, 766 enum dc_dither_option option) 767 { 768 struct bit_depth_reduction_params params; 769 struct dc_link *link = stream->link; 770 struct pipe_ctx *pipes = NULL; 771 int i; 772 773 for (i = 0; i < MAX_PIPES; i++) { 774 if (link->dc->current_state->res_ctx.pipe_ctx[i].stream == 775 stream) { 776 pipes = &link->dc->current_state->res_ctx.pipe_ctx[i]; 777 break; 778 } 779 } 780 781 if (!pipes) 782 return; 783 if (option > DITHER_OPTION_MAX) 784 return; 785 786 dc_exit_ips_for_hw_access(stream->ctx->dc); 787 788 stream->dither_option = option; 789 790 memset(¶ms, 0, sizeof(params)); 791 resource_build_bit_depth_reduction_params(stream, ¶ms); 792 stream->bit_depth_params = params; 793 794 if (pipes->plane_res.xfm && 795 pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth) { 796 pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth( 797 pipes->plane_res.xfm, 798 pipes->plane_res.scl_data.lb_params.depth, 799 &stream->bit_depth_params); 800 } 801 802 pipes->stream_res.opp->funcs-> 803 opp_program_bit_depth_reduction(pipes->stream_res.opp, ¶ms); 804 } 805 806 bool dc_stream_set_gamut_remap(struct dc *dc, const struct dc_stream_state *stream) 807 { 808 int i; 809 bool ret = false; 810 struct pipe_ctx *pipes; 811 812 dc_exit_ips_for_hw_access(dc); 813 814 for (i = 0; i < MAX_PIPES; i++) { 815 if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) { 816 pipes = &dc->current_state->res_ctx.pipe_ctx[i]; 817 dc->hwss.program_gamut_remap(pipes); 818 ret = true; 819 } 820 } 821 822 return ret; 823 } 824 825 bool dc_stream_program_csc_matrix(struct dc *dc, struct dc_stream_state *stream) 826 { 827 int i; 828 bool ret = false; 829 struct pipe_ctx *pipes; 830 831 dc_exit_ips_for_hw_access(dc); 832 833 for (i = 0; i < MAX_PIPES; i++) { 834 if (dc->current_state->res_ctx.pipe_ctx[i].stream 835 == stream) { 836 837 pipes = &dc->current_state->res_ctx.pipe_ctx[i]; 838 dc->hwss.program_output_csc(dc, 839 pipes, 840 stream->output_color_space, 841 stream->csc_color_matrix.matrix, 842 pipes->stream_res.opp->inst); 843 ret = true; 844 } 845 } 846 847 return ret; 848 } 849 850 void dc_stream_set_static_screen_params(struct dc *dc, 851 struct dc_stream_state **streams, 852 int num_streams, 853 const struct dc_static_screen_params *params) 854 { 855 int i, j; 856 struct pipe_ctx *pipes_affected[MAX_PIPES]; 857 int num_pipes_affected = 0; 858 859 dc_exit_ips_for_hw_access(dc); 860 861 for (i = 0; i < num_streams; i++) { 862 struct dc_stream_state *stream = streams[i]; 863 864 for (j = 0; j < MAX_PIPES; j++) { 865 if (dc->current_state->res_ctx.pipe_ctx[j].stream 866 == stream) { 867 pipes_affected[num_pipes_affected++] = 868 &dc->current_state->res_ctx.pipe_ctx[j]; 869 } 870 } 871 } 872 873 dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, params); 874 } 875 876 static void dc_destruct(struct dc *dc) 877 { 878 // reset link encoder assignment table on destruct 879 if (dc->res_pool && dc->res_pool->funcs->link_encs_assign && 880 !dc->config.unify_link_enc_assignment) 881 link_enc_cfg_init(dc, dc->current_state); 882 883 if (dc->current_state) { 884 dc_state_release(dc->current_state); 885 dc->current_state = NULL; 886 } 887 888 destroy_links(dc); 889 890 destroy_link_encoders(dc); 891 892 if (dc->clk_mgr) { 893 dc_destroy_clk_mgr(dc->clk_mgr); 894 dc->clk_mgr = NULL; 895 } 896 897 dc_destroy_resource_pool(dc); 898 #ifdef CONFIG_DRM_AMD_DC_FP 899 dc_destroy_soc_and_ip_translator(&dc->soc_and_ip_translator); 900 #endif 901 if (dc->link_srv) 902 link_destroy_link_service(&dc->link_srv); 903 904 if (dc->ctx) { 905 if (dc->ctx->gpio_service) 906 dal_gpio_service_destroy(&dc->ctx->gpio_service); 907 908 if (dc->ctx->created_bios) 909 dal_bios_parser_destroy(&dc->ctx->dc_bios); 910 kfree(dc->ctx->logger); 911 dc_perf_trace_destroy(&dc->ctx->perf_trace); 912 913 kfree(dc->ctx); 914 dc->ctx = NULL; 915 } 916 917 kfree(dc->bw_vbios); 918 dc->bw_vbios = NULL; 919 920 kfree(dc->bw_dceip); 921 dc->bw_dceip = NULL; 922 923 kfree(dc->dcn_soc); 924 dc->dcn_soc = NULL; 925 926 kfree(dc->dcn_ip); 927 dc->dcn_ip = NULL; 928 929 kfree(dc->vm_helper); 930 dc->vm_helper = NULL; 931 932 } 933 934 static bool dc_construct_ctx(struct dc *dc, 935 const struct dc_init_data *init_params) 936 { 937 struct dc_context *dc_ctx; 938 939 dc_ctx = kzalloc_obj(*dc_ctx); 940 if (!dc_ctx) 941 return false; 942 943 dc_stream_init_rmcm_3dlut(dc); 944 945 dc_ctx->cgs_device = init_params->cgs_device; 946 dc_ctx->driver_context = init_params->driver; 947 dc_ctx->dc = dc; 948 dc_ctx->asic_id = init_params->asic_id; 949 dc_ctx->dc_sink_id_count = 0; 950 dc_ctx->dc_stream_id_count = 0; 951 dc_ctx->dce_environment = init_params->dce_environment; 952 dc_ctx->dcn_reg_offsets = init_params->dcn_reg_offsets; 953 dc_ctx->nbio_reg_offsets = init_params->nbio_reg_offsets; 954 dc_ctx->clk_reg_offsets = init_params->clk_reg_offsets; 955 956 /* Create logger */ 957 dc_ctx->logger = kmalloc_obj(*dc_ctx->logger); 958 959 if (!dc_ctx->logger) { 960 kfree(dc_ctx); 961 return false; 962 } 963 964 dc_ctx->logger->dev = adev_to_drm(init_params->driver); 965 dc->dml.logger = dc_ctx->logger; 966 967 dc_ctx->dce_version = resource_parse_asic_id(init_params->asic_id); 968 969 dc_ctx->perf_trace = dc_perf_trace_create(); 970 if (!dc_ctx->perf_trace) { 971 kfree(dc_ctx); 972 ASSERT_CRITICAL(false); 973 return false; 974 } 975 976 dc->ctx = dc_ctx; 977 978 dc->link_srv = link_create_link_service(); 979 if (!dc->link_srv) 980 return false; 981 982 return true; 983 } 984 985 static bool dc_construct(struct dc *dc, 986 const struct dc_init_data *init_params) 987 { 988 struct dc_context *dc_ctx; 989 struct bw_calcs_dceip *dc_dceip; 990 struct bw_calcs_vbios *dc_vbios; 991 struct dcn_soc_bounding_box *dcn_soc; 992 struct dcn_ip_params *dcn_ip; 993 994 dc->config = init_params->flags; 995 996 // Allocate memory for the vm_helper 997 dc->vm_helper = kzalloc_obj(struct vm_helper); 998 if (!dc->vm_helper) { 999 dm_error("%s: failed to create dc->vm_helper\n", __func__); 1000 goto fail; 1001 } 1002 1003 memcpy(&dc->bb_overrides, &init_params->bb_overrides, sizeof(dc->bb_overrides)); 1004 1005 dc_dceip = kzalloc_obj(*dc_dceip); 1006 if (!dc_dceip) { 1007 dm_error("%s: failed to create dceip\n", __func__); 1008 goto fail; 1009 } 1010 1011 dc->bw_dceip = dc_dceip; 1012 1013 dc_vbios = kzalloc_obj(*dc_vbios); 1014 if (!dc_vbios) { 1015 dm_error("%s: failed to create vbios\n", __func__); 1016 goto fail; 1017 } 1018 1019 dc->bw_vbios = dc_vbios; 1020 dcn_soc = kzalloc_obj(*dcn_soc); 1021 if (!dcn_soc) { 1022 dm_error("%s: failed to create dcn_soc\n", __func__); 1023 goto fail; 1024 } 1025 1026 dc->dcn_soc = dcn_soc; 1027 1028 dcn_ip = kzalloc_obj(*dcn_ip); 1029 if (!dcn_ip) { 1030 dm_error("%s: failed to create dcn_ip\n", __func__); 1031 goto fail; 1032 } 1033 1034 dc->dcn_ip = dcn_ip; 1035 1036 if (init_params->bb_from_dmub) 1037 dc->dml2_options.bb_from_dmub = init_params->bb_from_dmub; 1038 else 1039 dc->dml2_options.bb_from_dmub = NULL; 1040 1041 if (!dc_construct_ctx(dc, init_params)) { 1042 dm_error("%s: failed to create ctx\n", __func__); 1043 goto fail; 1044 } 1045 1046 dc_ctx = dc->ctx; 1047 1048 /* Resource should construct all asic specific resources. 1049 * This should be the only place where we need to parse the asic id 1050 */ 1051 if (init_params->vbios_override) 1052 dc_ctx->dc_bios = init_params->vbios_override; 1053 else { 1054 /* Create BIOS parser */ 1055 struct bp_init_data bp_init_data; 1056 1057 bp_init_data.ctx = dc_ctx; 1058 bp_init_data.bios = init_params->asic_id.atombios_base_address; 1059 1060 dc_ctx->dc_bios = dal_bios_parser_create( 1061 &bp_init_data, dc_ctx->dce_version); 1062 1063 if (!dc_ctx->dc_bios) { 1064 ASSERT_CRITICAL(false); 1065 goto fail; 1066 } 1067 1068 dc_ctx->created_bios = true; 1069 } 1070 1071 dc->vendor_signature = init_params->vendor_signature; 1072 1073 /* Create GPIO service */ 1074 dc_ctx->gpio_service = dal_gpio_service_create( 1075 dc_ctx->dce_version, 1076 dc_ctx->dce_environment, 1077 dc_ctx); 1078 1079 if (!dc_ctx->gpio_service) { 1080 ASSERT_CRITICAL(false); 1081 goto fail; 1082 } 1083 1084 dc->res_pool = dc_create_resource_pool(dc, init_params, dc_ctx->dce_version); 1085 if (!dc->res_pool) 1086 goto fail; 1087 1088 /* set i2c speed if not done by the respective dcnxxx__resource.c */ 1089 if (dc->caps.i2c_speed_in_khz_hdcp == 0) 1090 dc->caps.i2c_speed_in_khz_hdcp = dc->caps.i2c_speed_in_khz; 1091 if (dc->check_config.max_optimizable_video_width == 0) 1092 dc->check_config.max_optimizable_video_width = 5120; 1093 dc->clk_mgr = dc_clk_mgr_create(dc->ctx, dc->res_pool->pp_smu, dc->res_pool->dccg); 1094 if (!dc->clk_mgr) 1095 goto fail; 1096 #ifdef CONFIG_DRM_AMD_DC_FP 1097 dc->clk_mgr->force_smu_not_present = init_params->force_smu_not_present; 1098 1099 if (dc->res_pool->funcs->update_bw_bounding_box) 1100 dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params); 1101 dc->soc_and_ip_translator = dc_create_soc_and_ip_translator(dc_ctx->dce_version); 1102 if (!dc->soc_and_ip_translator) 1103 goto fail; 1104 #endif 1105 1106 if (!create_links(dc, init_params->num_virtual_links)) 1107 goto fail; 1108 1109 /* Create additional DIG link encoder objects if fewer than the platform 1110 * supports were created during link construction. 1111 */ 1112 if (!create_link_encoders(dc)) 1113 goto fail; 1114 1115 /* Creation of current_state must occur after dc->dml 1116 * is initialized in dc_create_resource_pool because 1117 * on creation it copies the contents of dc->dml 1118 */ 1119 dc->current_state = dc_state_create(dc, NULL); 1120 1121 if (!dc->current_state) { 1122 dm_error("%s: failed to create validate ctx\n", __func__); 1123 goto fail; 1124 } 1125 1126 return true; 1127 1128 fail: 1129 return false; 1130 } 1131 1132 static void disable_all_writeback_pipes_for_stream( 1133 const struct dc *dc, 1134 struct dc_stream_state *stream, 1135 struct dc_state *context) 1136 { 1137 (void)dc; 1138 (void)context; 1139 int i; 1140 1141 for (i = 0; i < stream->num_wb_info; i++) 1142 stream->writeback_info[i].wb_enabled = false; 1143 } 1144 1145 static void apply_ctx_interdependent_lock(struct dc *dc, 1146 struct dc_state *context, 1147 struct dc_stream_state *stream, 1148 bool lock) 1149 { 1150 (void)dc; 1151 (void)context; 1152 int i; 1153 1154 /* Checks if interdependent update function pointer is NULL or not, takes care of DCE110 case */ 1155 if (dc->hwss.interdependent_update_lock) 1156 dc->hwss.interdependent_update_lock(dc, context, lock); 1157 else { 1158 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1159 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 1160 struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; 1161 1162 // Copied conditions that were previously in dce110_apply_ctx_for_surface 1163 if (stream == pipe_ctx->stream) { 1164 if (resource_is_pipe_type(pipe_ctx, OPP_HEAD) && 1165 (pipe_ctx->plane_state || old_pipe_ctx->plane_state)) 1166 dc->hwss.pipe_control_lock(dc, pipe_ctx, lock); 1167 } 1168 } 1169 } 1170 } 1171 1172 static void dc_update_visual_confirm_color(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx) 1173 { 1174 if (dc->debug.visual_confirm & VISUAL_CONFIRM_EXPLICIT) { 1175 memcpy(&pipe_ctx->visual_confirm_color, &pipe_ctx->plane_state->visual_confirm_color, 1176 sizeof(pipe_ctx->visual_confirm_color)); 1177 return; 1178 } 1179 1180 if (dc->ctx->dce_version >= DCN_VERSION_1_0) { 1181 memset(&pipe_ctx->visual_confirm_color, 0, sizeof(struct tg_color)); 1182 1183 if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR) 1184 get_hdr_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color)); 1185 else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE) 1186 get_surface_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color)); 1187 else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SWIZZLE) 1188 get_surface_tile_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color)); 1189 else if (dc->debug.visual_confirm == VISUAL_CONFIRM_HW_CURSOR) 1190 get_cursor_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color)); 1191 else if (dc->debug.visual_confirm == VISUAL_CONFIRM_DCC) 1192 get_dcc_visual_confirm_color(dc, pipe_ctx, &(pipe_ctx->visual_confirm_color)); 1193 else { 1194 if (dc->ctx->dce_version < DCN_VERSION_2_0) 1195 color_space_to_black_color( 1196 dc, pipe_ctx->stream->output_color_space, &(pipe_ctx->visual_confirm_color)); 1197 } 1198 if (dc->ctx->dce_version >= DCN_VERSION_2_0) { 1199 if (dc->debug.visual_confirm == VISUAL_CONFIRM_MPCTREE) 1200 get_mpctree_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color)); 1201 else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SUBVP) 1202 get_subvp_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color)); 1203 else if (dc->debug.visual_confirm == VISUAL_CONFIRM_MCLK_SWITCH) 1204 get_mclk_switch_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color)); 1205 else if (dc->debug.visual_confirm == VISUAL_CONFIRM_FAMS2) 1206 get_fams2_visual_confirm_color(dc, context, pipe_ctx, &(pipe_ctx->visual_confirm_color)); 1207 else if (dc->debug.visual_confirm == VISUAL_CONFIRM_VABC) 1208 get_vabc_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color)); 1209 else if (dc->debug.visual_confirm == VISUAL_CONFIRM_BOOSTED_REFRESH_RATE) 1210 get_refresh_rate_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color)); 1211 } 1212 } 1213 } 1214 1215 void dc_get_visual_confirm_for_stream( 1216 struct dc *dc, 1217 struct dc_stream_state *stream_state, 1218 struct tg_color *color) 1219 { 1220 struct dc_stream_status *stream_status = dc_stream_get_status(stream_state); 1221 struct pipe_ctx *pipe_ctx; 1222 int i; 1223 struct dc_plane_state *plane_state = NULL; 1224 1225 if (!stream_status) 1226 return; 1227 1228 switch (dc->debug.visual_confirm) { 1229 case VISUAL_CONFIRM_DISABLE: 1230 return; 1231 case VISUAL_CONFIRM_PSR: 1232 case VISUAL_CONFIRM_FAMS: 1233 pipe_ctx = dc_stream_get_pipe_ctx(stream_state); 1234 if (!pipe_ctx) 1235 return; 1236 dc_dmub_srv_get_visual_confirm_color_cmd(dc, pipe_ctx); 1237 memcpy(color, &dc->ctx->dmub_srv->dmub->visual_confirm_color, sizeof(struct tg_color)); 1238 return; 1239 1240 default: 1241 /* find plane with highest layer_index */ 1242 for (i = 0; i < stream_status->plane_count; i++) { 1243 if (stream_status->plane_states[i]->visible) 1244 plane_state = stream_status->plane_states[i]; 1245 } 1246 if (!plane_state) 1247 return; 1248 /* find pipe that contains plane with highest layer index */ 1249 for (i = 0; i < MAX_PIPES; i++) { 1250 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 1251 1252 if (pipe->plane_state == plane_state) { 1253 memcpy(color, &pipe->visual_confirm_color, sizeof(struct tg_color)); 1254 return; 1255 } 1256 } 1257 } 1258 } 1259 1260 /** 1261 * dc_stream_adjust_vmin_vmax - look up pipe context & update parts of DRR 1262 * @dc: dc reference 1263 * @stream: Initial dc stream state 1264 * @adjust: Updated parameters for vertical_total_min and vertical_total_max 1265 * 1266 * Looks up the pipe context of dc_stream_state and updates the 1267 * vertical_total_min and vertical_total_max of the DRR, Dynamic Refresh 1268 * Rate, which is a power-saving feature that targets reducing panel 1269 * refresh rate while the screen is static 1270 * 1271 * Return: %true if the pipe context is found and adjusted; 1272 * %false if the pipe context is not found. 1273 */ 1274 bool dc_stream_adjust_vmin_vmax(struct dc *dc, 1275 struct dc_stream_state *stream, 1276 struct dc_crtc_timing_adjust *adjust) 1277 { 1278 int i; 1279 1280 /* 1281 * Don't adjust DRR while there's bandwidth optimizations pending to 1282 * avoid conflicting with firmware updates. 1283 */ 1284 if (dc->ctx->dce_version > DCE_VERSION_MAX) { 1285 if (dc->optimized_required && 1286 (stream->adjust.v_total_max != adjust->v_total_max || 1287 stream->adjust.v_total_min != adjust->v_total_min)) { 1288 stream->adjust.timing_adjust_pending = true; 1289 return false; 1290 } 1291 } 1292 1293 dc_exit_ips_for_hw_access(dc); 1294 1295 stream->adjust.v_total_max = adjust->v_total_max; 1296 stream->adjust.v_total_mid = adjust->v_total_mid; 1297 stream->adjust.v_total_mid_frame_num = adjust->v_total_mid_frame_num; 1298 stream->adjust.v_total_min = adjust->v_total_min; 1299 stream->adjust.allow_otg_v_count_halt = adjust->allow_otg_v_count_halt; 1300 1301 if (dc->caps.max_v_total != 0 && 1302 (adjust->v_total_max > dc->caps.max_v_total || adjust->v_total_min > dc->caps.max_v_total)) { 1303 stream->adjust.timing_adjust_pending = false; 1304 if (adjust->allow_otg_v_count_halt) 1305 return set_long_vtotal(dc, stream, adjust); 1306 else 1307 return false; 1308 } 1309 1310 for (i = 0; i < MAX_PIPES; i++) { 1311 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 1312 1313 if (pipe->stream == stream && pipe->stream_res.tg) { 1314 dc->hwss.set_drr(&pipe, 1315 1, 1316 *adjust); 1317 stream->adjust.timing_adjust_pending = false; 1318 1319 if (dc->debug.visual_confirm == VISUAL_CONFIRM_BOOSTED_REFRESH_RATE) { 1320 if (pipe->stream && pipe->plane_state) { 1321 dc_update_visual_confirm_color(dc, dc->current_state, pipe); 1322 dc->hwss.update_visual_confirm_color(dc, pipe, pipe->plane_res.hubp->mpcc_id); 1323 1324 } 1325 } 1326 1327 if (dc->hwss.notify_cursor_offload_drr_update) 1328 dc->hwss.notify_cursor_offload_drr_update(dc, dc->current_state, stream); 1329 1330 return true; 1331 } 1332 } 1333 1334 return false; 1335 } 1336 1337 static void disable_dangling_plane(struct dc *dc, struct dc_state *context) 1338 { 1339 int i, j; 1340 struct dc_state *dangling_context = dc_state_create_current_copy(dc); 1341 struct dc_state *current_ctx; 1342 struct pipe_ctx *pipe; 1343 struct timing_generator *tg; 1344 1345 if (dangling_context == NULL) 1346 return; 1347 1348 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1349 struct dc_stream_state *old_stream = 1350 dc->current_state->res_ctx.pipe_ctx[i].stream; 1351 bool should_disable = true; 1352 bool pipe_split_change = false; 1353 1354 if ((context->res_ctx.pipe_ctx[i].top_pipe) && 1355 (dc->current_state->res_ctx.pipe_ctx[i].top_pipe)) 1356 pipe_split_change = context->res_ctx.pipe_ctx[i].top_pipe->pipe_idx != 1357 dc->current_state->res_ctx.pipe_ctx[i].top_pipe->pipe_idx; 1358 else 1359 pipe_split_change = context->res_ctx.pipe_ctx[i].top_pipe != 1360 dc->current_state->res_ctx.pipe_ctx[i].top_pipe; 1361 1362 for (j = 0; j < context->stream_count; j++) { 1363 if (old_stream == context->streams[j]) { 1364 should_disable = false; 1365 break; 1366 } 1367 } 1368 if (!should_disable && pipe_split_change && 1369 dc->current_state->stream_count != context->stream_count) 1370 should_disable = true; 1371 1372 if (old_stream && !dc->current_state->res_ctx.pipe_ctx[i].top_pipe && 1373 !dc->current_state->res_ctx.pipe_ctx[i].prev_odm_pipe) { 1374 struct pipe_ctx *old_pipe, *new_pipe; 1375 1376 old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 1377 new_pipe = &context->res_ctx.pipe_ctx[i]; 1378 1379 if (old_pipe->plane_state && !new_pipe->plane_state) 1380 should_disable = true; 1381 } 1382 1383 if (should_disable && old_stream) { 1384 bool is_phantom = dc_state_get_stream_subvp_type(dc->current_state, old_stream) == SUBVP_PHANTOM; 1385 pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 1386 tg = pipe->stream_res.tg; 1387 /* When disabling plane for a phantom pipe, we must turn on the 1388 * phantom OTG so the disable programming gets the double buffer 1389 * update. Otherwise the pipe will be left in a partially disabled 1390 * state that can result in underflow or hang when enabling it 1391 * again for different use. 1392 */ 1393 if (is_phantom) { 1394 if (tg->funcs->enable_crtc) { 1395 if (dc->hwseq->funcs.blank_pixel_data) 1396 dc->hwseq->funcs.blank_pixel_data(dc, pipe, true); 1397 tg->funcs->enable_crtc(tg); 1398 } 1399 } 1400 1401 if (is_phantom) 1402 dc_state_rem_all_phantom_planes_for_stream(dc, old_stream, dangling_context, true); 1403 else 1404 dc_state_rem_all_planes_for_stream(dc, old_stream, dangling_context); 1405 disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context); 1406 1407 if (pipe->stream && pipe->plane_state) { 1408 if (!dc->debug.using_dml2) 1409 set_p_state_switch_method(dc, context, pipe); 1410 dc_update_visual_confirm_color(dc, context, pipe); 1411 } 1412 1413 if (dc->hwss.apply_ctx_for_surface) { 1414 apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, true); 1415 dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context); 1416 apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, false); 1417 dc->hwss.post_unlock_program_front_end(dc, dangling_context); 1418 } 1419 1420 if (dc->res_pool->funcs->prepare_mcache_programming) 1421 dc->res_pool->funcs->prepare_mcache_programming(dc, dangling_context); 1422 if (dc->hwss.program_front_end_for_ctx) { 1423 dc->hwss.interdependent_update_lock(dc, dc->current_state, true); 1424 dc->hwss.program_front_end_for_ctx(dc, dangling_context); 1425 dc->hwss.interdependent_update_lock(dc, dc->current_state, false); 1426 dc->hwss.post_unlock_program_front_end(dc, dangling_context); 1427 } 1428 /* We need to put the phantom OTG back into it's default (disabled) state or we 1429 * can get corruption when transition from one SubVP config to a different one. 1430 * The OTG is set to disable on falling edge of VUPDATE so the plane disable 1431 * will still get it's double buffer update. 1432 */ 1433 if (is_phantom) { 1434 if (tg->funcs->disable_phantom_crtc) 1435 tg->funcs->disable_phantom_crtc(tg); 1436 } 1437 } 1438 } 1439 1440 current_ctx = dc->current_state; 1441 dc->current_state = dangling_context; 1442 dc_state_release(current_ctx); 1443 } 1444 1445 static void disable_vbios_mode_if_required( 1446 struct dc *dc, 1447 struct dc_state *context) 1448 { 1449 unsigned int i, j; 1450 1451 /* check if timing_changed, disable stream*/ 1452 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1453 struct dc_stream_state *stream = NULL; 1454 struct dc_link *link = NULL; 1455 struct pipe_ctx *pipe = NULL; 1456 1457 pipe = &context->res_ctx.pipe_ctx[i]; 1458 stream = pipe->stream; 1459 if (stream == NULL) 1460 continue; 1461 1462 if (stream->apply_seamless_boot_optimization) 1463 continue; 1464 1465 // only looking for first odm pipe 1466 if (pipe->prev_odm_pipe) 1467 continue; 1468 1469 if (stream->link->local_sink && 1470 stream->link->local_sink->sink_signal == SIGNAL_TYPE_EDP) { 1471 link = stream->link; 1472 } 1473 1474 if (link != NULL && link->link_enc->funcs->is_dig_enabled(link->link_enc)) { 1475 unsigned int enc_inst, tg_inst = 0; 1476 unsigned int pix_clk_100hz = 0; 1477 1478 enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc); 1479 if (enc_inst != ENGINE_ID_UNKNOWN) { 1480 for (j = 0; j < dc->res_pool->stream_enc_count; j++) { 1481 if (dc->res_pool->stream_enc[j]->id == enc_inst) { 1482 tg_inst = dc->res_pool->stream_enc[j]->funcs->dig_source_otg( 1483 dc->res_pool->stream_enc[j]); 1484 break; 1485 } 1486 } 1487 1488 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz( 1489 dc->res_pool->dp_clock_source, 1490 tg_inst, &pix_clk_100hz); 1491 1492 if (link->link_status.link_active) { 1493 uint32_t requested_pix_clk_100hz = 1494 pipe->stream_res.pix_clk_params.requested_pix_clk_100hz; 1495 1496 if (pix_clk_100hz != requested_pix_clk_100hz) { 1497 dc->link_srv->set_dpms_off(pipe); 1498 pipe->stream->dpms_off = false; 1499 } 1500 } 1501 } 1502 } 1503 } 1504 } 1505 1506 /* Public functions */ 1507 1508 struct dc *dc_create(const struct dc_init_data *init_params) 1509 { 1510 struct dc *dc = kzalloc_obj(*dc); 1511 unsigned int full_pipe_count; 1512 1513 if (!dc) 1514 return NULL; 1515 1516 if (init_params->dce_environment == DCE_ENV_VIRTUAL_HW) { 1517 dc->caps.linear_pitch_alignment = 64; 1518 if (!dc_construct_ctx(dc, init_params)) 1519 goto destruct_dc; 1520 } else { 1521 if (!dc_construct(dc, init_params)) 1522 goto destruct_dc; 1523 1524 full_pipe_count = dc->res_pool->pipe_count; 1525 if (dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE) 1526 full_pipe_count--; 1527 dc->caps.max_streams = min( 1528 full_pipe_count, 1529 dc->res_pool->stream_enc_count); 1530 1531 dc->caps.max_links = dc->link_count; 1532 dc->caps.max_audios = dc->res_pool->audio_count; 1533 dc->caps.linear_pitch_alignment = 64; 1534 1535 dc->caps.max_dp_protocol_version = DP_VERSION_1_4; 1536 1537 dc->caps.max_otg_num = dc->res_pool->res_cap->num_timing_generator; 1538 1539 if (dc->res_pool->dmcu != NULL) 1540 dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version; 1541 } 1542 1543 dc->dcn_reg_offsets = init_params->dcn_reg_offsets; 1544 dc->nbio_reg_offsets = init_params->nbio_reg_offsets; 1545 dc->clk_reg_offsets = init_params->clk_reg_offsets; 1546 1547 /* Populate versioning information */ 1548 dc->versions.dc_ver = DC_VER; 1549 1550 dc->build_id = DC_BUILD_ID; 1551 1552 DC_LOG_DC("Display Core initialized\n"); 1553 1554 return dc; 1555 1556 destruct_dc: 1557 dc_destruct(dc); 1558 kfree(dc); 1559 return NULL; 1560 } 1561 1562 static void detect_edp_presence(struct dc *dc) 1563 { 1564 struct dc_link *edp_links[MAX_NUM_EDP]; 1565 struct dc_link *edp_link = NULL; 1566 enum dc_connection_type type; 1567 unsigned int i, edp_num; 1568 1569 dc_get_edp_links(dc, edp_links, &edp_num); 1570 if (!edp_num) 1571 return; 1572 1573 for (i = 0; i < edp_num; i++) { 1574 edp_link = edp_links[i]; 1575 if (dc->config.edp_not_connected) { 1576 edp_link->edp_sink_present = false; 1577 } else { 1578 dc_link_detect_connection_type(edp_link, &type); 1579 edp_link->edp_sink_present = (type != dc_connection_none); 1580 } 1581 } 1582 } 1583 1584 void dc_hardware_init(struct dc *dc) 1585 { 1586 1587 detect_edp_presence(dc); 1588 if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW) 1589 dc->hwss.init_hw(dc); 1590 dc_dmub_srv_notify_fw_dc_power_state(dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D0); 1591 } 1592 1593 void dc_init_callbacks(struct dc *dc, 1594 const struct dc_callback_init *init_params) 1595 { 1596 dc->ctx->cp_psp = init_params->cp_psp; 1597 } 1598 1599 void dc_deinit_callbacks(struct dc *dc) 1600 { 1601 memset(&dc->ctx->cp_psp, 0, sizeof(dc->ctx->cp_psp)); 1602 } 1603 1604 void dc_destroy(struct dc **dc) 1605 { 1606 dc_destruct(*dc); 1607 kfree(*dc); 1608 *dc = NULL; 1609 } 1610 1611 static void enable_timing_multisync( 1612 struct dc *dc, 1613 struct dc_state *ctx) 1614 { 1615 int i, multisync_count = 0; 1616 int pipe_count = dc->res_pool->pipe_count; 1617 struct pipe_ctx *multisync_pipes[MAX_PIPES] = { NULL }; 1618 1619 for (i = 0; i < pipe_count; i++) { 1620 if (!ctx->res_ctx.pipe_ctx[i].stream || 1621 !ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.enabled) 1622 continue; 1623 if (ctx->res_ctx.pipe_ctx[i].stream == ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.event_source) 1624 continue; 1625 multisync_pipes[multisync_count] = &ctx->res_ctx.pipe_ctx[i]; 1626 multisync_count++; 1627 } 1628 1629 if (multisync_count > 0) { 1630 dc->hwss.enable_per_frame_crtc_position_reset( 1631 dc, multisync_count, multisync_pipes); 1632 } 1633 } 1634 1635 static void program_timing_sync( 1636 struct dc *dc, 1637 struct dc_state *ctx) 1638 { 1639 int i, j, k; 1640 int group_index = 0; 1641 int num_group = 0; 1642 int pipe_count = dc->res_pool->pipe_count; 1643 struct pipe_ctx *unsynced_pipes[MAX_PIPES] = { NULL }; 1644 1645 for (i = 0; i < pipe_count; i++) { 1646 if (!ctx->res_ctx.pipe_ctx[i].stream 1647 || ctx->res_ctx.pipe_ctx[i].top_pipe 1648 || ctx->res_ctx.pipe_ctx[i].prev_odm_pipe) 1649 continue; 1650 1651 unsynced_pipes[i] = &ctx->res_ctx.pipe_ctx[i]; 1652 } 1653 1654 for (i = 0; i < pipe_count; i++) { 1655 int group_size = 1; 1656 enum timing_synchronization_type sync_type = NOT_SYNCHRONIZABLE; 1657 struct pipe_ctx *pipe_set[MAX_PIPES]; 1658 1659 if (!unsynced_pipes[i]) 1660 continue; 1661 1662 pipe_set[0] = unsynced_pipes[i]; 1663 unsynced_pipes[i] = NULL; 1664 1665 /* Add tg to the set, search rest of the tg's for ones with 1666 * same timing, add all tgs with same timing to the group 1667 */ 1668 for (j = i + 1; j < pipe_count; j++) { 1669 if (!unsynced_pipes[j]) 1670 continue; 1671 if (sync_type != TIMING_SYNCHRONIZABLE && 1672 dc->hwss.enable_vblanks_synchronization && 1673 unsynced_pipes[j]->stream_res.tg->funcs->align_vblanks && 1674 resource_are_vblanks_synchronizable( 1675 unsynced_pipes[j]->stream, 1676 pipe_set[0]->stream)) { 1677 sync_type = VBLANK_SYNCHRONIZABLE; 1678 pipe_set[group_size] = unsynced_pipes[j]; 1679 unsynced_pipes[j] = NULL; 1680 group_size++; 1681 } else 1682 if (sync_type != VBLANK_SYNCHRONIZABLE && 1683 resource_are_streams_timing_synchronizable( 1684 unsynced_pipes[j]->stream, 1685 pipe_set[0]->stream)) { 1686 sync_type = TIMING_SYNCHRONIZABLE; 1687 pipe_set[group_size] = unsynced_pipes[j]; 1688 unsynced_pipes[j] = NULL; 1689 group_size++; 1690 } 1691 } 1692 1693 /* set first unblanked pipe as master */ 1694 for (j = 0; j < group_size; j++) { 1695 bool is_blanked; 1696 1697 if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked) 1698 is_blanked = 1699 pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp); 1700 else 1701 is_blanked = 1702 pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg); 1703 if (!is_blanked) { 1704 if (j == 0) 1705 break; 1706 1707 swap(pipe_set[0], pipe_set[j]); 1708 break; 1709 } 1710 } 1711 1712 for (k = 0; k < group_size; k++) { 1713 struct dc_stream_status *status = dc_state_get_stream_status(ctx, pipe_set[k]->stream); 1714 1715 if (!status) 1716 continue; 1717 1718 status->timing_sync_info.group_id = num_group; 1719 status->timing_sync_info.group_size = group_size; 1720 if (k == 0) 1721 status->timing_sync_info.master = true; 1722 else 1723 status->timing_sync_info.master = false; 1724 1725 } 1726 1727 /* remove any other unblanked pipes as they have already been synced */ 1728 if (dc->config.use_pipe_ctx_sync_logic) { 1729 /* check pipe's syncd to decide which pipe to be removed */ 1730 for (j = 1; j < group_size; j++) { 1731 if (pipe_set[j]->pipe_idx_syncd == pipe_set[0]->pipe_idx_syncd) { 1732 group_size--; 1733 pipe_set[j] = pipe_set[group_size]; 1734 j--; 1735 } else 1736 /* link slave pipe's syncd with master pipe */ 1737 pipe_set[j]->pipe_idx_syncd = pipe_set[0]->pipe_idx_syncd; 1738 } 1739 } else { 1740 /* remove any other pipes by checking valid plane */ 1741 for (j = j + 1; j < group_size; j++) { 1742 bool is_blanked; 1743 1744 if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked) 1745 is_blanked = 1746 pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp); 1747 else 1748 is_blanked = 1749 pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg); 1750 if (!is_blanked) { 1751 group_size--; 1752 pipe_set[j] = pipe_set[group_size]; 1753 j--; 1754 } 1755 } 1756 } 1757 1758 if (group_size > 1) { 1759 if (sync_type == TIMING_SYNCHRONIZABLE) { 1760 dc->hwss.enable_timing_synchronization( 1761 dc, ctx, group_index, group_size, pipe_set); 1762 } else 1763 if (sync_type == VBLANK_SYNCHRONIZABLE) { 1764 dc->hwss.enable_vblanks_synchronization( 1765 dc, group_index, group_size, pipe_set); 1766 } 1767 group_index++; 1768 } 1769 num_group++; 1770 } 1771 } 1772 1773 static bool streams_changed(struct dc *dc, 1774 struct dc_stream_state *streams[], 1775 uint8_t stream_count) 1776 { 1777 uint8_t i; 1778 1779 if (stream_count != dc->current_state->stream_count) 1780 return true; 1781 1782 for (i = 0; i < dc->current_state->stream_count; i++) { 1783 if (dc->current_state->streams[i] != streams[i]) 1784 return true; 1785 if (!streams[i]->link->link_state_valid) 1786 return true; 1787 } 1788 1789 return false; 1790 } 1791 1792 bool dc_validate_boot_timing(const struct dc *dc, 1793 const struct dc_sink *sink, 1794 struct dc_crtc_timing *crtc_timing) 1795 { 1796 struct timing_generator *tg; 1797 struct stream_encoder *se = NULL; 1798 1799 struct dc_crtc_timing hw_crtc_timing = {0}; 1800 1801 struct dc_link *link = sink->link; 1802 unsigned int i, enc_inst, tg_inst = 0; 1803 1804 /* Support seamless boot on EDP displays only */ 1805 if (sink->sink_signal != SIGNAL_TYPE_EDP) { 1806 return false; 1807 } 1808 1809 if (dc->debug.force_odm_combine) { 1810 DC_LOG_DEBUG("boot timing validation failed due to force_odm_combine\n"); 1811 return false; 1812 } 1813 1814 /* Check for enabled DIG to identify enabled display */ 1815 if (!link->link_enc->funcs->is_dig_enabled(link->link_enc)) { 1816 DC_LOG_DEBUG("boot timing validation failed due to disabled DIG\n"); 1817 return false; 1818 } 1819 1820 enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc); 1821 1822 if (enc_inst == ENGINE_ID_UNKNOWN) { 1823 DC_LOG_DEBUG("boot timing validation failed due to unknown DIG engine ID\n"); 1824 return false; 1825 } 1826 1827 for (i = 0; i < dc->res_pool->stream_enc_count; i++) { 1828 if (dc->res_pool->stream_enc[i]->id == enc_inst) { 1829 1830 se = dc->res_pool->stream_enc[i]; 1831 1832 tg_inst = dc->res_pool->stream_enc[i]->funcs->dig_source_otg( 1833 dc->res_pool->stream_enc[i]); 1834 break; 1835 } 1836 } 1837 1838 // tg_inst not found 1839 if (i == dc->res_pool->stream_enc_count) { 1840 DC_LOG_DEBUG("boot timing validation failed due to timing generator instance not found\n"); 1841 return false; 1842 } 1843 1844 if (tg_inst >= dc->res_pool->timing_generator_count) { 1845 DC_LOG_DEBUG("boot timing validation failed due to invalid timing generator count\n"); 1846 return false; 1847 } 1848 1849 if (tg_inst != link->link_enc->preferred_engine) { 1850 DC_LOG_DEBUG("boot timing validation failed due to non-preferred timing generator\n"); 1851 return false; 1852 } 1853 1854 tg = dc->res_pool->timing_generators[tg_inst]; 1855 1856 if (!tg->funcs->get_hw_timing) { 1857 DC_LOG_DEBUG("boot timing validation failed due to missing get_hw_timing callback\n"); 1858 return false; 1859 } 1860 1861 if (!tg->funcs->get_hw_timing(tg, &hw_crtc_timing)) { 1862 DC_LOG_DEBUG("boot timing validation failed due to failed get_hw_timing return\n"); 1863 return false; 1864 } 1865 1866 if (crtc_timing->h_total != hw_crtc_timing.h_total) { 1867 DC_LOG_DEBUG("boot timing validation failed due to h_total mismatch\n"); 1868 return false; 1869 } 1870 1871 if (crtc_timing->h_border_left != hw_crtc_timing.h_border_left) { 1872 DC_LOG_DEBUG("boot timing validation failed due to h_border_left mismatch\n"); 1873 return false; 1874 } 1875 1876 if (crtc_timing->h_addressable != hw_crtc_timing.h_addressable) { 1877 DC_LOG_DEBUG("boot timing validation failed due to h_addressable mismatch\n"); 1878 return false; 1879 } 1880 1881 if (crtc_timing->h_border_right != hw_crtc_timing.h_border_right) { 1882 DC_LOG_DEBUG("boot timing validation failed due to h_border_right mismatch\n"); 1883 return false; 1884 } 1885 1886 if (crtc_timing->h_front_porch != hw_crtc_timing.h_front_porch) { 1887 DC_LOG_DEBUG("boot timing validation failed due to h_front_porch mismatch\n"); 1888 return false; 1889 } 1890 1891 if (crtc_timing->h_sync_width != hw_crtc_timing.h_sync_width) { 1892 DC_LOG_DEBUG("boot timing validation failed due to h_sync_width mismatch\n"); 1893 return false; 1894 } 1895 1896 if (crtc_timing->v_total != hw_crtc_timing.v_total) { 1897 DC_LOG_DEBUG("boot timing validation failed due to v_total mismatch\n"); 1898 return false; 1899 } 1900 1901 if (crtc_timing->v_border_top != hw_crtc_timing.v_border_top) { 1902 DC_LOG_DEBUG("boot timing validation failed due to v_border_top mismatch\n"); 1903 return false; 1904 } 1905 1906 if (crtc_timing->v_addressable != hw_crtc_timing.v_addressable) { 1907 DC_LOG_DEBUG("boot timing validation failed due to v_addressable mismatch\n"); 1908 return false; 1909 } 1910 1911 if (crtc_timing->v_border_bottom != hw_crtc_timing.v_border_bottom) { 1912 DC_LOG_DEBUG("boot timing validation failed due to v_border_bottom mismatch\n"); 1913 return false; 1914 } 1915 1916 if (crtc_timing->v_front_porch != hw_crtc_timing.v_front_porch) { 1917 DC_LOG_DEBUG("boot timing validation failed due to v_front_porch mismatch\n"); 1918 return false; 1919 } 1920 1921 if (crtc_timing->v_sync_width != hw_crtc_timing.v_sync_width) { 1922 DC_LOG_DEBUG("boot timing validation failed due to v_sync_width mismatch\n"); 1923 return false; 1924 } 1925 1926 if (crtc_timing->flags.DSC) { 1927 struct display_stream_compressor *dsc = NULL; 1928 struct dcn_dsc_state dsc_state = {0}; 1929 1930 /* Find DSC associated with this timing generator */ 1931 if (tg_inst < dc->res_pool->res_cap->num_dsc) { 1932 dsc = dc->res_pool->dscs[tg_inst]; 1933 } 1934 1935 if (!dsc || !dsc->funcs->dsc_read_state) { 1936 DC_LOG_DEBUG("boot timing validation failed due to no DSC resource or read function\n"); 1937 return false; 1938 } 1939 1940 /* Read current DSC hardware state */ 1941 dsc->funcs->dsc_read_state(dsc, &dsc_state); 1942 1943 /* Check if DSC is actually enabled in hardware */ 1944 if (dsc_state.dsc_clock_en == 0) { 1945 DC_LOG_DEBUG("boot timing validation failed due to DSC not enabled in hardware\n"); 1946 return false; 1947 } 1948 1949 uint32_t num_slices_h = 0; 1950 uint32_t num_slices_v = 0; 1951 1952 if (dsc_state.dsc_slice_width > 0) { 1953 num_slices_h = (crtc_timing->h_addressable + dsc_state.dsc_slice_width - 1) / dsc_state.dsc_slice_width; 1954 } 1955 1956 if (dsc_state.dsc_slice_height > 0) { 1957 num_slices_v = (crtc_timing->v_addressable + dsc_state.dsc_slice_height - 1) / dsc_state.dsc_slice_height; 1958 } 1959 1960 if (crtc_timing->dsc_cfg.num_slices_h != num_slices_h) { 1961 DC_LOG_DEBUG("boot timing validation failed due to num_slices_h mismatch\n"); 1962 return false; 1963 } 1964 1965 if (crtc_timing->dsc_cfg.num_slices_v != num_slices_v) { 1966 DC_LOG_DEBUG("boot timing validation failed due to num_slices_v mismatch\n"); 1967 return false; 1968 } 1969 1970 if (crtc_timing->dsc_cfg.bits_per_pixel != dsc_state.dsc_bits_per_pixel) { 1971 DC_LOG_DEBUG("boot timing validation failed due to bits_per_pixel mismatch\n"); 1972 return false; 1973 } 1974 1975 if (crtc_timing->dsc_cfg.block_pred_enable != dsc_state.dsc_block_pred_enable) { 1976 DC_LOG_DEBUG("boot timing validation failed due to block_pred_enable mismatch\n"); 1977 return false; 1978 } 1979 1980 if (crtc_timing->dsc_cfg.linebuf_depth != dsc_state.dsc_line_buf_depth) { 1981 DC_LOG_DEBUG("boot timing validation failed due to linebuf_depth mismatch\n"); 1982 return false; 1983 } 1984 1985 if (crtc_timing->dsc_cfg.version_minor != dsc_state.dsc_version_minor) { 1986 DC_LOG_DEBUG("boot timing validation failed due to version_minor mismatch\n"); 1987 return false; 1988 } 1989 1990 if (crtc_timing->dsc_cfg.ycbcr422_simple != dsc_state.dsc_simple_422) { 1991 DC_LOG_DEBUG("boot timing validation failed due to pixel encoding mismatch\n"); 1992 return false; 1993 } 1994 1995 // Skip checks for is_frl, is_dp, and rc_buffer_size which are not programmed by vbios 1996 // or not necessary for seamless boot validation. 1997 } 1998 1999 if (dc_is_dp_signal(link->connector_signal)) { 2000 unsigned int pix_clk_100hz = 0; 2001 uint32_t numOdmPipes = 1; 2002 uint32_t id_src[4] = {0}; 2003 2004 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz( 2005 dc->res_pool->dp_clock_source, 2006 tg_inst, &pix_clk_100hz); 2007 2008 if (tg->funcs->get_optc_source) 2009 tg->funcs->get_optc_source(tg, 2010 &numOdmPipes, &id_src[0], &id_src[1]); 2011 2012 if (numOdmPipes == 2) { 2013 pix_clk_100hz *= 2; 2014 } else if (numOdmPipes == 4) { 2015 pix_clk_100hz *= 4; 2016 } else if (se && se->funcs->get_pixels_per_cycle) { 2017 uint32_t pixels_per_cycle = se->funcs->get_pixels_per_cycle(se); 2018 2019 if (pixels_per_cycle != 1 && !dc->debug.enable_dp_dig_pixel_rate_div_policy) { 2020 DC_LOG_DEBUG("boot timing validation failed due to pixels_per_cycle\n"); 2021 return false; 2022 } 2023 2024 pix_clk_100hz *= pixels_per_cycle; 2025 } 2026 2027 // Note: In rare cases, HW pixclk may differ from crtc's pixclk 2028 // slightly due to rounding issues in 10 kHz units. 2029 if (crtc_timing->pix_clk_100hz != pix_clk_100hz) { 2030 DC_LOG_DEBUG("boot timing validation failed due to pix_clk_100hz mismatch\n"); 2031 return false; 2032 } 2033 2034 if (!se || !se->funcs->dp_get_pixel_format) { 2035 DC_LOG_DEBUG("boot timing validation failed due to missing dp_get_pixel_format\n"); 2036 return false; 2037 } 2038 2039 if (!se->funcs->dp_get_pixel_format( 2040 se, 2041 &hw_crtc_timing.pixel_encoding, 2042 &hw_crtc_timing.display_color_depth)) { 2043 DC_LOG_DEBUG("boot timing validation failed due to dp_get_pixel_format failure\n"); 2044 return false; 2045 } 2046 2047 if (hw_crtc_timing.display_color_depth != crtc_timing->display_color_depth) { 2048 DC_LOG_DEBUG("boot timing validation failed due to display_color_depth mismatch\n"); 2049 return false; 2050 } 2051 2052 if (hw_crtc_timing.pixel_encoding != crtc_timing->pixel_encoding) { 2053 DC_LOG_DEBUG("boot timing validation failed due to pixel_encoding mismatch\n"); 2054 return false; 2055 } 2056 } 2057 2058 2059 if (link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) { 2060 DC_LOG_DEBUG("boot timing validation failed due to VSC SDP colorimetry\n"); 2061 return false; 2062 } 2063 2064 if (link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED) { 2065 DC_LOG_DEBUG("boot timing validation failed due to DP 128b/132b\n"); 2066 return false; 2067 } 2068 2069 if (dc->link_srv->edp_is_ilr_optimization_required(link, crtc_timing)) { 2070 DC_LOG_EVENT_LINK_TRAINING("Seamless boot disabled to optimize eDP link rate\n"); 2071 return false; 2072 } 2073 2074 return true; 2075 } 2076 2077 static inline bool should_update_pipe_for_stream( 2078 struct dc_state *context, 2079 struct pipe_ctx *pipe_ctx, 2080 struct dc_stream_state *stream) 2081 { 2082 return (pipe_ctx->stream && pipe_ctx->stream == stream); 2083 } 2084 2085 static inline bool should_update_pipe_for_plane( 2086 struct dc_state *context, 2087 struct pipe_ctx *pipe_ctx, 2088 struct dc_plane_state *plane_state) 2089 { 2090 return (pipe_ctx->plane_state == plane_state); 2091 } 2092 2093 void dc_enable_stereo( 2094 struct dc *dc, 2095 struct dc_state *context, 2096 struct dc_stream_state *streams[], 2097 uint8_t stream_count) 2098 { 2099 int i, j; 2100 struct pipe_ctx *pipe; 2101 2102 dc_exit_ips_for_hw_access(dc); 2103 2104 for (i = 0; i < MAX_PIPES; i++) { 2105 if (context != NULL) { 2106 pipe = &context->res_ctx.pipe_ctx[i]; 2107 } else { 2108 context = dc->current_state; 2109 pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 2110 } 2111 2112 for (j = 0; pipe && j < stream_count; j++) { 2113 if (should_update_pipe_for_stream(context, pipe, streams[j]) && 2114 dc->hwss.setup_stereo) 2115 dc->hwss.setup_stereo(pipe, dc); 2116 } 2117 } 2118 } 2119 2120 void dc_trigger_sync(struct dc *dc, struct dc_state *context) 2121 { 2122 if (context->stream_count > 1 && !dc->debug.disable_timing_sync) { 2123 dc_exit_ips_for_hw_access(dc); 2124 2125 enable_timing_multisync(dc, context); 2126 program_timing_sync(dc, context); 2127 } 2128 } 2129 2130 static uint8_t get_stream_mask(struct dc *dc, struct dc_state *context) 2131 { 2132 int i; 2133 unsigned int stream_mask = 0; 2134 2135 for (i = 0; i < dc->res_pool->pipe_count; i++) { 2136 if (context->res_ctx.pipe_ctx[i].stream) 2137 stream_mask |= 1 << i; 2138 } 2139 2140 return stream_mask; 2141 } 2142 2143 void dc_z10_restore(const struct dc *dc) 2144 { 2145 if (dc->hwss.z10_restore) 2146 dc->hwss.z10_restore(dc); 2147 } 2148 2149 void dc_z10_save_init(struct dc *dc) 2150 { 2151 if (dc->hwss.z10_save_init) 2152 dc->hwss.z10_save_init(dc); 2153 } 2154 2155 /* Set a pipe unlock order based on the change in DET allocation and stores it in dc scratch memory 2156 * Prevents over allocation of DET during unlock process 2157 * e.g. 2 pipe config with different streams with a max of 20 DET segments 2158 * Before: After: 2159 * - Pipe0: 10 DET segments - Pipe0: 12 DET segments 2160 * - Pipe1: 10 DET segments - Pipe1: 8 DET segments 2161 * If Pipe0 gets updated first, 22 DET segments will be allocated 2162 */ 2163 static void determine_pipe_unlock_order(struct dc *dc, struct dc_state *context) 2164 { 2165 unsigned int i = 0; 2166 struct pipe_ctx *pipe = NULL; 2167 struct timing_generator *tg = NULL; 2168 2169 if (!dc->config.set_pipe_unlock_order) 2170 return; 2171 2172 memset(dc->scratch.pipes_to_unlock_first, 0, sizeof(dc->scratch.pipes_to_unlock_first)); 2173 for (i = 0; i < dc->res_pool->pipe_count; i++) { 2174 pipe = &context->res_ctx.pipe_ctx[i]; 2175 tg = pipe->stream_res.tg; 2176 2177 if (!resource_is_pipe_type(pipe, OTG_MASTER) || 2178 !tg->funcs->is_tg_enabled(tg) || 2179 dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) { 2180 continue; 2181 } 2182 2183 if (resource_calculate_det_for_stream(context, pipe) < 2184 resource_calculate_det_for_stream(dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i])) { 2185 dc->scratch.pipes_to_unlock_first[i] = true; 2186 } 2187 } 2188 } 2189 2190 /** 2191 * dc_commit_state_no_check - Apply context to the hardware 2192 * 2193 * @dc: DC object with the current status to be updated 2194 * @context: New state that will become the current status at the end of this function 2195 * 2196 * Applies given context to the hardware and copy it into current context. 2197 * It's up to the user to release the src context afterwards. 2198 * 2199 * Return: an enum dc_status result code for the operation 2200 */ 2201 static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *context) 2202 { 2203 struct dc_bios *dcb = dc->ctx->dc_bios; 2204 enum dc_status result = DC_ERROR_UNEXPECTED; 2205 struct pipe_ctx *pipe; 2206 int i, k, l; 2207 struct dc_stream_state *dc_streams[MAX_STREAMS] = {0}; 2208 struct dc_state *old_state; 2209 bool subvp_prev_use = false; 2210 2211 dc_z10_restore(dc); 2212 dc_allow_idle_optimizations(dc, false); 2213 2214 for (i = 0; i < dc->res_pool->pipe_count; i++) { 2215 struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 2216 2217 /* Check old context for SubVP */ 2218 subvp_prev_use |= (dc_state_get_pipe_subvp_type(dc->current_state, old_pipe) == SUBVP_PHANTOM); 2219 if (subvp_prev_use) 2220 break; 2221 } 2222 2223 for (i = 0; i < context->stream_count; i++) 2224 dc_streams[i] = context->streams[i]; 2225 2226 if (!dcb->funcs->is_accelerated_mode(dcb)) { 2227 disable_vbios_mode_if_required(dc, context); 2228 dc->hwss.enable_accelerated_mode(dc, context); 2229 } else if (get_seamless_boot_stream_count(dc->current_state) > 0) { 2230 /* If the previous Stream still retains the apply seamless boot flag, 2231 * it means the OS has not actually performed a flip yet. 2232 * At this point, if we receive dc_commit_streams again, we should 2233 * once more check whether the actual HW timing matches what the OS 2234 * has provided 2235 */ 2236 disable_vbios_mode_if_required(dc, context); 2237 } 2238 2239 if (dc->hwseq->funcs.wait_for_pipe_update_if_needed) { 2240 for (i = 0; i < dc->res_pool->pipe_count; i++) { 2241 pipe = &context->res_ctx.pipe_ctx[i]; 2242 //Only delay otg master for a given config 2243 if (resource_is_pipe_type(pipe, OTG_MASTER)) { 2244 //dc_commit_state_no_check is always a full update 2245 dc->hwseq->funcs.wait_for_pipe_update_if_needed(dc, pipe, false); 2246 break; 2247 } 2248 } 2249 } 2250 2251 if (context->stream_count > get_seamless_boot_stream_count(context) || 2252 context->stream_count == 0) 2253 dc->hwss.prepare_bandwidth(dc, context); 2254 2255 /* When SubVP is active, all HW programming must be done while 2256 * SubVP lock is acquired 2257 */ 2258 if (dc->hwss.subvp_pipe_control_lock) 2259 dc->hwss.subvp_pipe_control_lock(dc, context, true, true, NULL, subvp_prev_use); 2260 if (dc->hwss.dmub_hw_control_lock) 2261 dc->hwss.dmub_hw_control_lock(dc, context, true); 2262 2263 if (dc->hwss.update_dsc_pg) 2264 dc->hwss.update_dsc_pg(dc, context, false); 2265 2266 disable_dangling_plane(dc, context); 2267 /* re-program planes for existing stream, in case we need to 2268 * free up plane resource for later use 2269 */ 2270 if (dc->hwss.apply_ctx_for_surface) { 2271 for (i = 0; i < context->stream_count; i++) { 2272 if (context->streams[i]->mode_changed) 2273 continue; 2274 apply_ctx_interdependent_lock(dc, context, context->streams[i], true); 2275 dc->hwss.apply_ctx_for_surface( 2276 dc, context->streams[i], 2277 context->stream_status[i].plane_count, 2278 context); /* use new pipe config in new context */ 2279 apply_ctx_interdependent_lock(dc, context, context->streams[i], false); 2280 dc->hwss.post_unlock_program_front_end(dc, context); 2281 } 2282 } 2283 2284 /* Program hardware */ 2285 for (i = 0; i < dc->res_pool->pipe_count; i++) { 2286 pipe = &context->res_ctx.pipe_ctx[i]; 2287 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe); 2288 } 2289 2290 for (i = 0; i < dc->current_state->stream_count; i++) 2291 dc_dmub_srv_control_cursor_offload(dc, dc->current_state, dc->current_state->streams[i], false); 2292 2293 result = dc->hwss.apply_ctx_to_hw(dc, context); 2294 2295 for (i = 0; i < context->stream_count; i++) 2296 dc_dmub_srv_control_cursor_offload(dc, context, context->streams[i], true); 2297 2298 if (result != DC_OK) { 2299 /* Application of dc_state to hardware stopped. */ 2300 dc->current_state->res_ctx.link_enc_cfg_ctx.mode = LINK_ENC_CFG_STEADY; 2301 return result; 2302 } 2303 2304 dc_trigger_sync(dc, context); 2305 2306 /* Full update should unconditionally be triggered when dc_commit_state_no_check is called */ 2307 for (i = 0; i < context->stream_count; i++) { 2308 uint32_t prev_dsc_changed = context->streams[i]->update_flags.bits.dsc_changed; 2309 2310 context->streams[i]->update_flags.raw = 0xFFFFFFFF; 2311 context->streams[i]->update_flags.bits.dsc_changed = prev_dsc_changed; 2312 } 2313 2314 determine_pipe_unlock_order(dc, context); 2315 /* Program all planes within new context*/ 2316 if (dc->res_pool->funcs->prepare_mcache_programming) 2317 dc->res_pool->funcs->prepare_mcache_programming(dc, context); 2318 if (dc->hwss.program_front_end_for_ctx) { 2319 dc->hwss.interdependent_update_lock(dc, context, true); 2320 dc->hwss.program_front_end_for_ctx(dc, context); 2321 2322 if (dc->hwseq->funcs.set_wait_for_update_needed_for_pipe) { 2323 for (i = 0; i < dc->res_pool->pipe_count; i++) { 2324 pipe = &context->res_ctx.pipe_ctx[i]; 2325 dc->hwseq->funcs.set_wait_for_update_needed_for_pipe(dc, pipe); 2326 } 2327 } 2328 2329 dc->hwss.interdependent_update_lock(dc, context, false); 2330 dc->hwss.post_unlock_program_front_end(dc, context); 2331 } 2332 2333 if (dc->hwss.commit_subvp_config) 2334 dc->hwss.commit_subvp_config(dc, context); 2335 if (dc->hwss.subvp_pipe_control_lock) 2336 dc->hwss.subvp_pipe_control_lock(dc, context, false, true, NULL, subvp_prev_use); 2337 if (dc->hwss.dmub_hw_control_lock) 2338 dc->hwss.dmub_hw_control_lock(dc, context, false); 2339 2340 for (i = 0; i < context->stream_count; i++) { 2341 const struct dc_link *link = context->streams[i]->link; 2342 2343 if (!context->streams[i]->mode_changed) 2344 continue; 2345 2346 if (dc->hwss.apply_ctx_for_surface) { 2347 apply_ctx_interdependent_lock(dc, context, context->streams[i], true); 2348 dc->hwss.apply_ctx_for_surface( 2349 dc, context->streams[i], 2350 context->stream_status[i].plane_count, 2351 context); 2352 apply_ctx_interdependent_lock(dc, context, context->streams[i], false); 2353 dc->hwss.post_unlock_program_front_end(dc, context); 2354 } 2355 2356 /* 2357 * enable stereo 2358 * TODO rework dc_enable_stereo call to work with validation sets? 2359 */ 2360 for (k = 0; k < MAX_PIPES; k++) { 2361 pipe = &context->res_ctx.pipe_ctx[k]; 2362 2363 for (l = 0 ; pipe && l < context->stream_count; l++) { 2364 if (context->streams[l] && 2365 context->streams[l] == pipe->stream && 2366 dc->hwss.setup_stereo) 2367 dc->hwss.setup_stereo(pipe, dc); 2368 } 2369 } 2370 2371 CONN_MSG_MODE(link, "{%dx%d, %dx%d@%dKhz}", 2372 context->streams[i]->timing.h_addressable, 2373 context->streams[i]->timing.v_addressable, 2374 context->streams[i]->timing.h_total, 2375 context->streams[i]->timing.v_total, 2376 context->streams[i]->timing.pix_clk_100hz / 10); 2377 } 2378 2379 dc_enable_stereo(dc, context, dc_streams, context->stream_count); 2380 2381 if (get_seamless_boot_stream_count(context) == 0 || 2382 context->stream_count == 0) { 2383 /* Must wait for no flips to be pending before doing optimize bw */ 2384 hwss_wait_for_no_pipes_pending(dc, context); 2385 /* 2386 * optimized dispclk depends on ODM setup. Need to wait for ODM 2387 * update pending complete before optimizing bandwidth. 2388 */ 2389 hwss_wait_for_odm_update_pending_complete(dc, context); 2390 /* pplib is notified if disp_num changed */ 2391 dc->hwss.optimize_bandwidth(dc, context); 2392 /* Need to do otg sync again as otg could be out of sync due to otg 2393 * workaround applied during clock update 2394 */ 2395 dc_trigger_sync(dc, context); 2396 } 2397 2398 if (dc->hwss.update_dsc_pg) 2399 dc->hwss.update_dsc_pg(dc, context, true); 2400 2401 if (dc->ctx->dce_version >= DCE_VERSION_MAX) 2402 TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk); 2403 else 2404 TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce); 2405 2406 context->stream_mask = get_stream_mask(dc, context); 2407 2408 if (context->stream_mask != dc->current_state->stream_mask) 2409 dc_dmub_srv_notify_stream_mask(dc->ctx->dmub_srv, context->stream_mask); 2410 2411 for (i = 0; i < context->stream_count; i++) 2412 context->streams[i]->mode_changed = false; 2413 2414 /* Clear update flags that were set earlier to avoid redundant programming */ 2415 for (i = 0; i < context->stream_count; i++) { 2416 context->streams[i]->update_flags.raw = 0x0; 2417 } 2418 2419 old_state = dc->current_state; 2420 dc->current_state = context; 2421 2422 dc_state_release(old_state); 2423 2424 dc_state_retain(dc->current_state); 2425 2426 return result; 2427 } 2428 2429 static bool commit_minimal_transition_state(struct dc *dc, 2430 struct dc_state *transition_base_context); 2431 2432 /** 2433 * dc_commit_streams - Commit current stream state 2434 * 2435 * @dc: DC object with the commit state to be configured in the hardware 2436 * @params: Parameters for the commit, including the streams to be committed 2437 * 2438 * Function responsible for commit streams change to the hardware. 2439 * 2440 * Return: 2441 * Return DC_OK if everything work as expected, otherwise, return a dc_status 2442 * code. 2443 */ 2444 enum dc_status dc_commit_streams(struct dc *dc, struct dc_commit_streams_params *params) 2445 { 2446 int i, j; 2447 struct dc_state *context; 2448 enum dc_status res = DC_OK; 2449 struct dc_validation_set set[MAX_STREAMS] = {0}; 2450 struct pipe_ctx *pipe; 2451 bool handle_exit_odm2to1 = false; 2452 2453 if (!params) 2454 return DC_ERROR_UNEXPECTED; 2455 2456 if (dc->ctx->dce_environment == DCE_ENV_VIRTUAL_HW) 2457 return res; 2458 2459 if (!streams_changed(dc, params->streams, params->stream_count) && 2460 dc->current_state->power_source == params->power_source) 2461 return res; 2462 2463 dc_exit_ips_for_hw_access(dc); 2464 2465 DC_LOG_DC("%s: %d streams\n", __func__, params->stream_count); 2466 2467 for (i = 0; i < params->stream_count; i++) { 2468 struct dc_stream_state *stream = params->streams[i]; 2469 struct dc_stream_status *status = dc_stream_get_status(stream); 2470 struct dc_sink *sink = stream->sink; 2471 2472 /* revalidate streams */ 2473 if (!dc_is_virtual_signal(sink->sink_signal)) { 2474 res = dc_validate_stream(dc, stream); 2475 if (res != DC_OK) 2476 return res; 2477 } 2478 2479 2480 dc_stream_log(dc, stream); 2481 2482 set[i].stream = stream; 2483 2484 if (status) { 2485 set[i].plane_count = status->plane_count; 2486 for (j = 0; j < status->plane_count; j++) 2487 set[i].plane_states[j] = status->plane_states[j]; 2488 } 2489 } 2490 2491 /* ODM Combine 2:1 power optimization is only applied for single stream 2492 * scenario, it uses extra pipes than needed to reduce power consumption 2493 * We need to switch off this feature to make room for new streams. 2494 */ 2495 if (params->stream_count > dc->current_state->stream_count && 2496 dc->current_state->stream_count == 1) { 2497 for (i = 0; i < dc->res_pool->pipe_count; i++) { 2498 pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 2499 if (pipe->next_odm_pipe) 2500 handle_exit_odm2to1 = true; 2501 } 2502 } 2503 2504 if (handle_exit_odm2to1) 2505 res = commit_minimal_transition_state(dc, dc->current_state); 2506 2507 context = dc_state_create_current_copy(dc); 2508 if (!context) 2509 goto context_alloc_fail; 2510 2511 context->power_source = params->power_source; 2512 2513 res = dc_validate_with_context(dc, set, params->stream_count, context, DC_VALIDATE_MODE_AND_PROGRAMMING); 2514 2515 /* 2516 * Only update link encoder to stream assignment after bandwidth validation passed. 2517 */ 2518 if (res == DC_OK && dc->res_pool->funcs->link_encs_assign && !dc->config.unify_link_enc_assignment) 2519 dc->res_pool->funcs->link_encs_assign( 2520 dc, context, context->streams, context->stream_count); 2521 2522 if (res != DC_OK) { 2523 BREAK_TO_DEBUGGER(); 2524 goto fail; 2525 } 2526 2527 /* 2528 * If not already seamless, make transition seamless by inserting intermediate minimal transition 2529 */ 2530 if (dc->hwss.is_pipe_topology_transition_seamless && 2531 !dc->hwss.is_pipe_topology_transition_seamless(dc, dc->current_state, context)) { 2532 res = commit_minimal_transition_state(dc, context); 2533 if (res != DC_OK) { 2534 BREAK_TO_DEBUGGER(); 2535 goto fail; 2536 } 2537 } 2538 2539 res = dc_commit_state_no_check(dc, context); 2540 2541 for (i = 0; i < params->stream_count; i++) { 2542 for (j = 0; j < context->stream_count; j++) { 2543 if (params->streams[i]->stream_id == context->streams[j]->stream_id) 2544 params->streams[i]->out.otg_offset = context->stream_status[j].primary_otg_inst; 2545 2546 if (dc_is_embedded_signal(params->streams[i]->signal)) { 2547 struct dc_stream_status *status = dc_state_get_stream_status(context, params->streams[i]); 2548 2549 if (!status) 2550 continue; 2551 2552 if (dc->hwss.is_abm_supported) 2553 status->is_abm_supported = dc->hwss.is_abm_supported(dc, context, params->streams[i]); 2554 else 2555 status->is_abm_supported = true; 2556 } 2557 } 2558 } 2559 2560 fail: 2561 dc_state_release(context); 2562 2563 context_alloc_fail: 2564 2565 DC_LOG_DC("%s Finished.\n", __func__); 2566 2567 return res; 2568 } 2569 2570 bool dc_acquire_release_mpc_3dlut( 2571 struct dc *dc, bool acquire, 2572 struct dc_stream_state *stream, 2573 struct dc_3dlut **lut, 2574 struct dc_transfer_func **shaper) 2575 { 2576 int pipe_idx; 2577 bool ret = false; 2578 bool found_pipe_idx = false; 2579 const struct resource_pool *pool = dc->res_pool; 2580 struct resource_context *res_ctx = &dc->current_state->res_ctx; 2581 int mpcc_id = 0; 2582 2583 if (pool && res_ctx) { 2584 if (acquire) { 2585 /*find pipe idx for the given stream*/ 2586 for (pipe_idx = 0; pipe_idx < pool->pipe_count; pipe_idx++) { 2587 if (res_ctx->pipe_ctx[pipe_idx].stream == stream) { 2588 found_pipe_idx = true; 2589 mpcc_id = res_ctx->pipe_ctx[pipe_idx].plane_res.hubp->inst; 2590 break; 2591 } 2592 } 2593 } else 2594 found_pipe_idx = true;/*for release pipe_idx is not required*/ 2595 2596 if (found_pipe_idx) { 2597 if (acquire && pool->funcs->acquire_post_bldn_3dlut) 2598 ret = pool->funcs->acquire_post_bldn_3dlut(res_ctx, pool, mpcc_id, lut, shaper); 2599 else if (!acquire && pool->funcs->release_post_bldn_3dlut) 2600 ret = pool->funcs->release_post_bldn_3dlut(res_ctx, pool, lut, shaper); 2601 } 2602 } 2603 return ret; 2604 } 2605 2606 static bool is_flip_pending_in_pipes(struct dc *dc, struct dc_state *context) 2607 { 2608 int i; 2609 struct pipe_ctx *pipe; 2610 2611 for (i = 0; i < MAX_PIPES; i++) { 2612 pipe = &context->res_ctx.pipe_ctx[i]; 2613 2614 // Don't check flip pending on phantom pipes 2615 if (!pipe->plane_state || (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM)) 2616 continue; 2617 2618 /* Must set to false to start with, due to OR in update function */ 2619 pipe->plane_state->status.is_flip_pending = false; 2620 dc->hwss.update_pending_status(pipe); 2621 if (pipe->plane_state->status.is_flip_pending) 2622 return true; 2623 } 2624 return false; 2625 } 2626 2627 /* Perform updates here which need to be deferred until next vupdate 2628 * 2629 * i.e. blnd lut, 3dlut, and shaper lut bypass regs are double buffered 2630 * but forcing lut memory to shutdown state is immediate. This causes 2631 * single frame corruption as lut gets disabled mid-frame unless shutdown 2632 * is deferred until after entering bypass. 2633 */ 2634 static void process_deferred_updates(struct dc *dc) 2635 { 2636 int i = 0; 2637 2638 if (dc->debug.enable_mem_low_power.bits.cm) { 2639 ASSERT(dc->dcn_ip->max_num_dpp); 2640 for (i = 0; i < dc->dcn_ip->max_num_dpp; i++) 2641 if (dc->res_pool->dpps[i]->funcs->dpp_deferred_update) 2642 dc->res_pool->dpps[i]->funcs->dpp_deferred_update(dc->res_pool->dpps[i]); 2643 } 2644 } 2645 2646 void dc_post_update_surfaces_to_stream(struct dc *dc) 2647 { 2648 int i; 2649 struct dc_state *context = dc->current_state; 2650 2651 if ((!dc->optimized_required) || get_seamless_boot_stream_count(context) > 0) 2652 return; 2653 2654 post_surface_trace(dc); 2655 2656 /* 2657 * Only relevant for DCN behavior where we can guarantee the optimization 2658 * is safe to apply - retain the legacy behavior for DCE. 2659 */ 2660 2661 if (dc->ctx->dce_version < DCE_VERSION_MAX) 2662 TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce); 2663 else { 2664 TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk); 2665 2666 if (is_flip_pending_in_pipes(dc, context)) 2667 return; 2668 2669 for (i = 0; i < dc->res_pool->pipe_count; i++) 2670 if (context->res_ctx.pipe_ctx[i].stream == NULL || 2671 context->res_ctx.pipe_ctx[i].plane_state == NULL) { 2672 context->res_ctx.pipe_ctx[i].pipe_idx = i; 2673 dc->hwss.disable_plane(dc, context, &context->res_ctx.pipe_ctx[i]); 2674 } 2675 2676 process_deferred_updates(dc); 2677 2678 dc->hwss.optimize_bandwidth(dc, context); 2679 2680 if (dc->hwss.update_dsc_pg) 2681 dc->hwss.update_dsc_pg(dc, context, true); 2682 } 2683 2684 dc->optimized_required = false; 2685 } 2686 2687 void dc_get_default_tiling_info(const struct dc *dc, struct dc_tiling_info *tiling_info) 2688 { 2689 if (!dc || !tiling_info) 2690 return; 2691 if (dc->res_pool && dc->res_pool->funcs && dc->res_pool->funcs->get_default_tiling_info) { 2692 dc->res_pool->funcs->get_default_tiling_info(tiling_info); 2693 return; 2694 } 2695 } 2696 2697 bool dc_set_generic_gpio_for_stereo(bool enable, 2698 struct gpio_service *gpio_service) 2699 { 2700 enum gpio_result gpio_result = GPIO_RESULT_NON_SPECIFIC_ERROR; 2701 struct gpio_pin_info pin_info; 2702 struct gpio *generic; 2703 struct gpio_generic_mux_config *config = kzalloc_obj(struct gpio_generic_mux_config); 2704 2705 if (!config) 2706 return false; 2707 pin_info = dal_gpio_get_generic_pin_info(gpio_service, GPIO_ID_GENERIC, 0); 2708 2709 if (pin_info.mask == 0xFFFFFFFF || pin_info.offset == 0xFFFFFFFF) { 2710 kfree(config); 2711 return false; 2712 } else { 2713 generic = dal_gpio_service_create_generic_mux( 2714 gpio_service, 2715 pin_info.offset, 2716 pin_info.mask); 2717 } 2718 2719 if (!generic) { 2720 kfree(config); 2721 return false; 2722 } 2723 2724 gpio_result = dal_gpio_open(generic, GPIO_MODE_OUTPUT); 2725 2726 config->enable_output_from_mux = enable; 2727 config->mux_select = GPIO_SIGNAL_SOURCE_PASS_THROUGH_STEREO_SYNC; 2728 2729 if (gpio_result == GPIO_RESULT_OK) 2730 gpio_result = dal_mux_setup_config(generic, config); 2731 2732 if (gpio_result == GPIO_RESULT_OK) { 2733 dal_gpio_close(generic); 2734 dal_gpio_destroy_generic_mux(&generic); 2735 kfree(config); 2736 return true; 2737 } else { 2738 dal_gpio_close(generic); 2739 dal_gpio_destroy_generic_mux(&generic); 2740 kfree(config); 2741 return false; 2742 } 2743 } 2744 2745 static bool is_surface_in_context( 2746 const struct dc_state *context, 2747 const struct dc_plane_state *plane_state) 2748 { 2749 int j; 2750 2751 for (j = 0; j < MAX_PIPES; j++) { 2752 const struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 2753 2754 if (plane_state == pipe_ctx->plane_state) { 2755 return true; 2756 } 2757 } 2758 2759 return false; 2760 } 2761 2762 static struct surface_update_descriptor get_plane_info_update_type(const struct dc_surface_update *u) 2763 { 2764 union surface_update_flags *update_flags = &u->surface->update_flags; 2765 struct surface_update_descriptor update_type = { UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_NONE }; 2766 2767 if (!u->plane_info) 2768 return update_type; 2769 2770 // `plane_info` present means at least `STREAM` lock is required 2771 elevate_update_type(&update_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM); 2772 2773 if (u->plane_info->color_space != u->surface->color_space) { 2774 update_flags->bits.color_space_change = 1; 2775 elevate_update_type(&update_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM); 2776 } 2777 2778 if (u->plane_info->horizontal_mirror != u->surface->horizontal_mirror) { 2779 update_flags->bits.horizontal_mirror_change = 1; 2780 elevate_update_type(&update_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM); 2781 } 2782 2783 if (u->plane_info->rotation != u->surface->rotation) { 2784 update_flags->bits.rotation_change = 1; 2785 elevate_update_type(&update_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL); 2786 } 2787 2788 if (u->plane_info->format != u->surface->format) { 2789 update_flags->bits.pixel_format_change = 1; 2790 elevate_update_type(&update_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL); 2791 } 2792 2793 if (u->plane_info->stereo_format != u->surface->stereo_format) { 2794 update_flags->bits.stereo_format_change = 1; 2795 elevate_update_type(&update_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL); 2796 } 2797 2798 if (u->plane_info->per_pixel_alpha != u->surface->per_pixel_alpha) { 2799 update_flags->bits.per_pixel_alpha_change = 1; 2800 elevate_update_type(&update_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM); 2801 } 2802 2803 if (u->plane_info->global_alpha_value != u->surface->global_alpha_value) { 2804 update_flags->bits.global_alpha_change = 1; 2805 elevate_update_type(&update_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM); 2806 } 2807 2808 if (u->plane_info->dcc.enable != u->surface->dcc.enable 2809 || u->plane_info->dcc.dcc_ind_blk != u->surface->dcc.dcc_ind_blk 2810 || u->plane_info->dcc.meta_pitch != u->surface->dcc.meta_pitch) { 2811 /* During DCC on/off, stutter period is calculated before 2812 * DCC has fully transitioned. This results in incorrect 2813 * stutter period calculation. Triggering a full update will 2814 * recalculate stutter period. 2815 */ 2816 update_flags->bits.dcc_change = 1; 2817 elevate_update_type(&update_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL); 2818 } 2819 2820 if (resource_pixel_format_to_bpp(u->plane_info->format) != 2821 resource_pixel_format_to_bpp(u->surface->format)) { 2822 /* different bytes per element will require full bandwidth 2823 * and DML calculation 2824 */ 2825 update_flags->bits.bpp_change = 1; 2826 elevate_update_type(&update_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL); 2827 } 2828 2829 if (u->plane_info->plane_size.surface_pitch != u->surface->plane_size.surface_pitch 2830 || u->plane_info->plane_size.chroma_pitch != u->surface->plane_size.chroma_pitch) { 2831 update_flags->bits.plane_size_change = 1; 2832 elevate_update_type(&update_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM); 2833 } 2834 2835 const struct dc_tiling_info *tiling = &u->plane_info->tiling_info; 2836 2837 if (memcmp(tiling, &u->surface->tiling_info, sizeof(*tiling)) != 0) { 2838 update_flags->bits.swizzle_change = 1; 2839 2840 if (tiling->flags.avoid_full_update_on_tiling_change) { 2841 elevate_update_type(&update_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM); 2842 } else { 2843 update_flags->bits.bandwidth_change = 1; 2844 elevate_update_type(&update_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL); 2845 } 2846 } 2847 2848 /* This should be UPDATE_TYPE_FAST if nothing has changed. */ 2849 return update_type; 2850 } 2851 2852 static struct surface_update_descriptor get_scaling_info_update_type( 2853 const struct dc_check_config *check_config, 2854 const struct dc_surface_update *u) 2855 { 2856 union surface_update_flags *update_flags = &u->surface->update_flags; 2857 struct surface_update_descriptor update_type = { UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_NONE }; 2858 2859 if (!u->scaling_info) 2860 return update_type; 2861 2862 // `scaling_info` present means at least `STREAM` lock is required 2863 elevate_update_type(&update_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM); 2864 2865 if (u->scaling_info->src_rect.width != u->surface->src_rect.width 2866 || u->scaling_info->src_rect.height != u->surface->src_rect.height 2867 || u->scaling_info->dst_rect.width != u->surface->dst_rect.width 2868 || u->scaling_info->dst_rect.height != u->surface->dst_rect.height 2869 || u->scaling_info->clip_rect.width != u->surface->clip_rect.width 2870 || u->scaling_info->clip_rect.height != u->surface->clip_rect.height 2871 || u->scaling_info->scaling_quality.integer_scaling != 2872 u->surface->scaling_quality.integer_scaling) { 2873 update_flags->bits.scaling_change = 1; 2874 elevate_update_type(&update_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL); 2875 2876 if (u->scaling_info->src_rect.width > u->surface->src_rect.width 2877 || u->scaling_info->src_rect.height > u->surface->src_rect.height) 2878 /* Making src rect bigger requires a bandwidth change */ 2879 update_flags->bits.clock_change = 1; 2880 2881 if ((u->scaling_info->dst_rect.width < u->surface->dst_rect.width 2882 || u->scaling_info->dst_rect.height < u->surface->dst_rect.height) 2883 && (u->scaling_info->dst_rect.width < u->surface->src_rect.width 2884 || u->scaling_info->dst_rect.height < u->surface->src_rect.height)) 2885 /* Making dst rect smaller requires a bandwidth change */ 2886 update_flags->bits.bandwidth_change = 1; 2887 2888 if (u->scaling_info->src_rect.width > check_config->max_optimizable_video_width && 2889 (u->scaling_info->clip_rect.width > u->surface->clip_rect.width || 2890 u->scaling_info->clip_rect.height > u->surface->clip_rect.height)) 2891 /* Changing clip size of a large surface may result in MPC slice count change */ 2892 update_flags->bits.bandwidth_change = 1; 2893 } 2894 2895 if (u->scaling_info->src_rect.x != u->surface->src_rect.x 2896 || u->scaling_info->src_rect.y != u->surface->src_rect.y 2897 || u->scaling_info->clip_rect.x != u->surface->clip_rect.x 2898 || u->scaling_info->clip_rect.y != u->surface->clip_rect.y 2899 || u->scaling_info->dst_rect.x != u->surface->dst_rect.x 2900 || u->scaling_info->dst_rect.y != u->surface->dst_rect.y) { 2901 elevate_update_type(&update_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM); 2902 update_flags->bits.position_change = 1; 2903 } 2904 2905 return update_type; 2906 } 2907 2908 static struct surface_update_descriptor det_surface_update( 2909 const struct dc_check_config *check_config, 2910 struct dc_surface_update *u) 2911 { 2912 struct surface_update_descriptor overall_type = { UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_NONE }; 2913 union surface_update_flags *update_flags = &u->surface->update_flags; 2914 2915 if (u->surface->force_full_update) { 2916 update_flags->raw = 0xFFFFFFFF; 2917 elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL); 2918 return overall_type; 2919 } 2920 2921 update_flags->raw = 0; // Reset all flags 2922 2923 struct surface_update_descriptor inner_type = get_plane_info_update_type(u); 2924 2925 elevate_update_type(&overall_type, inner_type.update_type, inner_type.lock_descriptor); 2926 2927 inner_type = get_scaling_info_update_type(check_config, u); 2928 elevate_update_type(&overall_type, inner_type.update_type, inner_type.lock_descriptor); 2929 2930 if (u->flip_addr) { 2931 update_flags->bits.addr_update = 1; 2932 elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM); 2933 2934 if (u->flip_addr->address.tmz_surface != u->surface->address.tmz_surface) { 2935 update_flags->bits.tmz_changed = 1; 2936 elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL); 2937 } 2938 } 2939 if (u->in_transfer_func) { 2940 update_flags->bits.in_transfer_func_change = 1; 2941 elevate_update_type(&overall_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM); 2942 } 2943 2944 if (u->input_csc_color_matrix) { 2945 update_flags->bits.input_csc_change = 1; 2946 elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM); 2947 } 2948 2949 if (u->coeff_reduction_factor) { 2950 update_flags->bits.coeff_reduction_change = 1; 2951 elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM); 2952 } 2953 2954 if (u->gamut_remap_matrix) { 2955 update_flags->bits.gamut_remap_change = 1; 2956 elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM); 2957 } 2958 2959 if (u->cm || (u->gamma && dce_use_lut(u->plane_info ? u->plane_info->format : u->surface->format))) { 2960 update_flags->bits.gamma_change = 1; 2961 elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM); 2962 } 2963 2964 if (u->cm && (u->cm->flags.bits.lut3d_enable || u->surface->cm.flags.bits.lut3d_enable)) { 2965 update_flags->bits.lut_3d = 1; 2966 elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM); 2967 } 2968 2969 if (u->cm && u->cm->flags.bits.lut3d_dma_enable != u->surface->cm.flags.bits.lut3d_dma_enable && 2970 u->cm->flags.bits.lut3d_enable && u->surface->cm.flags.bits.lut3d_enable) { 2971 /* Toggling 3DLUT loading between DMA and Host is illegal */ 2972 BREAK_TO_DEBUGGER(); 2973 } 2974 2975 if (u->cm && u->cm->flags.bits.lut3d_enable && !u->cm->flags.bits.lut3d_dma_enable) { 2976 /* Host loading 3DLUT requires full update but only stream lock */ 2977 elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_STREAM); 2978 } 2979 2980 if (u->hdr_mult.value) 2981 if (u->hdr_mult.value != u->surface->hdr_mult.value) { 2982 // TODO: Should be fast? 2983 update_flags->bits.hdr_mult = 1; 2984 elevate_update_type(&overall_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM); 2985 } 2986 2987 if (u->sdr_white_level_nits) 2988 if (u->sdr_white_level_nits != u->surface->sdr_white_level_nits) { 2989 // TODO: Should be fast? 2990 update_flags->bits.sdr_white_level_nits = 1; 2991 elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL); 2992 } 2993 2994 if (u->cm_hist_control) { 2995 update_flags->bits.cm_hist_change = 1; 2996 elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM); 2997 } 2998 if (check_config->enable_legacy_fast_update && 2999 (update_flags->bits.gamma_change || 3000 update_flags->bits.gamut_remap_change || 3001 update_flags->bits.input_csc_change || 3002 update_flags->bits.cm_hist_change || 3003 update_flags->bits.coeff_reduction_change)) { 3004 elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL); 3005 } 3006 return overall_type; 3007 } 3008 3009 /* May need to flip the desktop plane in cases where MPO plane receives a flip but desktop plane doesn't 3010 * while both planes are flip_immediate 3011 */ 3012 static void force_immediate_gsl_plane_flip(struct dc *dc, struct dc_surface_update *updates, int surface_count) 3013 { 3014 (void)dc; 3015 bool has_flip_immediate_plane = false; 3016 int i; 3017 3018 for (i = 0; i < surface_count; i++) { 3019 if (updates[i].surface->flip_immediate) { 3020 has_flip_immediate_plane = true; 3021 break; 3022 } 3023 } 3024 3025 if (has_flip_immediate_plane && surface_count > 1) { 3026 for (i = 0; i < surface_count; i++) { 3027 if (updates[i].surface->flip_immediate) 3028 updates[i].surface->update_flags.bits.addr_update = 1; 3029 } 3030 } 3031 } 3032 3033 static struct surface_update_descriptor check_update_surfaces_for_stream( 3034 const struct dc_check_config *check_config, 3035 struct dc_surface_update *updates, 3036 int surface_count, 3037 struct dc_stream_update *stream_update) 3038 { 3039 struct surface_update_descriptor overall_type = { UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_NONE }; 3040 3041 /* When countdown finishes, promote this flip to full to trigger deferred final transition */ 3042 if (check_config->deferred_transition_state && !check_config->transition_countdown_to_steady_state) { 3043 elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL); 3044 } 3045 3046 if (stream_update && stream_update->pending_test_pattern) { 3047 elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL); 3048 } 3049 3050 if (stream_update && stream_update->hw_cursor_req) { 3051 elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL); 3052 } 3053 3054 /* some stream updates require passive update */ 3055 if (stream_update) { 3056 elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM); 3057 3058 union stream_update_flags *su_flags = &stream_update->stream->update_flags; 3059 3060 if ((stream_update->src.height != 0 && stream_update->src.width != 0) || 3061 (stream_update->dst.height != 0 && stream_update->dst.width != 0) || 3062 stream_update->integer_scaling_update) 3063 su_flags->bits.scaling = 1; 3064 3065 if (check_config->enable_legacy_fast_update && stream_update->out_transfer_func) 3066 su_flags->bits.out_tf = 1; 3067 3068 if (stream_update->abm_level) 3069 su_flags->bits.abm_level = 1; 3070 3071 if (stream_update->dpms_off) { 3072 su_flags->bits.dpms_off = 1; 3073 elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL | LOCK_DESCRIPTOR_LINK); 3074 } 3075 3076 if (stream_update->gamut_remap) 3077 su_flags->bits.gamut_remap = 1; 3078 3079 if (stream_update->wb_update) 3080 su_flags->bits.wb_update = 1; 3081 3082 if (stream_update->dsc_config) 3083 su_flags->bits.dsc_changed = 1; 3084 3085 if (stream_update->mst_bw_update) 3086 su_flags->bits.mst_bw = 1; 3087 3088 if (stream_update->stream->freesync_on_desktop && 3089 (stream_update->vrr_infopacket || stream_update->allow_freesync || 3090 stream_update->vrr_active_variable || stream_update->vrr_active_fixed)) 3091 su_flags->bits.fams_changed = 1; 3092 3093 if (stream_update->scaler_sharpener_update) 3094 su_flags->bits.scaler_sharpener = 1; 3095 3096 if (stream_update->sharpening_required) 3097 su_flags->bits.sharpening_required = 1; 3098 3099 if (stream_update->output_color_space) 3100 su_flags->bits.out_csc = 1; 3101 3102 // TODO: Make each elevation explicit, as to not override fast stream in crct_timing_adjust 3103 if (su_flags->raw) 3104 elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL); 3105 3106 // Non-global cases 3107 if (stream_update->output_csc_transform) { 3108 su_flags->bits.out_csc = 1; 3109 elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM); 3110 } 3111 3112 if (!check_config->enable_legacy_fast_update && stream_update->out_transfer_func) { 3113 su_flags->bits.out_tf = 1; 3114 elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM); 3115 } 3116 } 3117 3118 for (int i = 0 ; i < surface_count; i++) { 3119 struct surface_update_descriptor inner_type = 3120 det_surface_update(check_config, &updates[i]); 3121 3122 elevate_update_type(&overall_type, inner_type.update_type, inner_type.lock_descriptor); 3123 } 3124 3125 return overall_type; 3126 } 3127 3128 /* 3129 * dc_check_update_surfaces_for_stream() - Determine update type (fast, med, or full) 3130 * 3131 * See :c:type:`enum surface_update_type <surface_update_type>` for explanation of update types 3132 */ 3133 struct surface_update_descriptor dc_check_update_surfaces_for_stream( 3134 const struct dc_check_config *check_config, 3135 struct dc_surface_update *updates, 3136 int surface_count, 3137 struct dc_stream_update *stream_update) 3138 { 3139 if (stream_update) 3140 stream_update->stream->update_flags.raw = 0; 3141 for (int i = 0; i < surface_count; i++) 3142 updates[i].surface->update_flags.raw = 0; 3143 3144 return check_update_surfaces_for_stream(check_config, updates, surface_count, stream_update); 3145 } 3146 3147 static struct dc_stream_status *stream_get_status( 3148 struct dc_state *ctx, 3149 struct dc_stream_state *stream) 3150 { 3151 uint8_t i; 3152 3153 for (i = 0; i < ctx->stream_count; i++) { 3154 if (stream == ctx->streams[i]) { 3155 return &ctx->stream_status[i]; 3156 } 3157 } 3158 3159 return NULL; 3160 } 3161 3162 static const enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL; 3163 3164 static void copy_surface_update_to_plane( 3165 struct dc_plane_state *surface, 3166 struct dc_surface_update *srf_update) 3167 { 3168 if (srf_update->flip_addr) { 3169 surface->address = srf_update->flip_addr->address; 3170 surface->flip_immediate = 3171 srf_update->flip_addr->flip_immediate; 3172 surface->time.time_elapsed_in_us[surface->time.index] = 3173 srf_update->flip_addr->flip_timestamp_in_us - 3174 surface->time.prev_update_time_in_us; 3175 surface->time.prev_update_time_in_us = 3176 srf_update->flip_addr->flip_timestamp_in_us; 3177 surface->time.index++; 3178 if (surface->time.index >= DC_PLANE_UPDATE_TIMES_MAX) 3179 surface->time.index = 0; 3180 3181 surface->triplebuffer_flips = srf_update->flip_addr->triplebuffer_flips; 3182 } 3183 3184 if (srf_update->scaling_info) { 3185 surface->scaling_quality = 3186 srf_update->scaling_info->scaling_quality; 3187 surface->dst_rect = 3188 srf_update->scaling_info->dst_rect; 3189 surface->src_rect = 3190 srf_update->scaling_info->src_rect; 3191 surface->clip_rect = 3192 srf_update->scaling_info->clip_rect; 3193 } 3194 3195 if (srf_update->plane_info) { 3196 surface->color_space = 3197 srf_update->plane_info->color_space; 3198 surface->format = 3199 srf_update->plane_info->format; 3200 surface->plane_size = 3201 srf_update->plane_info->plane_size; 3202 surface->rotation = 3203 srf_update->plane_info->rotation; 3204 surface->horizontal_mirror = 3205 srf_update->plane_info->horizontal_mirror; 3206 surface->stereo_format = 3207 srf_update->plane_info->stereo_format; 3208 surface->tiling_info = 3209 srf_update->plane_info->tiling_info; 3210 surface->visible = 3211 srf_update->plane_info->visible; 3212 surface->per_pixel_alpha = 3213 srf_update->plane_info->per_pixel_alpha; 3214 surface->global_alpha = 3215 srf_update->plane_info->global_alpha; 3216 surface->global_alpha_value = 3217 srf_update->plane_info->global_alpha_value; 3218 surface->dcc = 3219 srf_update->plane_info->dcc; 3220 surface->layer_index = 3221 srf_update->plane_info->layer_index; 3222 } 3223 3224 if (srf_update->gamma) { 3225 memcpy(&surface->gamma_correction.entries, 3226 &srf_update->gamma->entries, 3227 sizeof(struct dc_gamma_entries)); 3228 surface->gamma_correction.is_identity = 3229 srf_update->gamma->is_identity; 3230 surface->gamma_correction.num_entries = 3231 srf_update->gamma->num_entries; 3232 surface->gamma_correction.type = 3233 srf_update->gamma->type; 3234 } 3235 if (srf_update->cm_hist_control) { 3236 memcpy(&surface->cm_hist_control, 3237 srf_update->cm_hist_control, 3238 sizeof(surface->cm_hist_control)); 3239 } 3240 3241 if (srf_update->in_transfer_func) { 3242 surface->in_transfer_func.sdr_ref_white_level = 3243 srf_update->in_transfer_func->sdr_ref_white_level; 3244 surface->in_transfer_func.tf = 3245 srf_update->in_transfer_func->tf; 3246 surface->in_transfer_func.type = 3247 srf_update->in_transfer_func->type; 3248 memcpy(&surface->in_transfer_func.tf_pts, 3249 &srf_update->in_transfer_func->tf_pts, 3250 sizeof(struct dc_transfer_func_distributed_points)); 3251 } 3252 3253 /* Shaper, 3DLUT, 1DLUT */ 3254 if (srf_update->cm) { 3255 memcpy(&surface->cm, srf_update->cm, 3256 sizeof(surface->cm)); 3257 } 3258 3259 if (srf_update->hdr_mult.value) 3260 surface->hdr_mult = 3261 srf_update->hdr_mult; 3262 3263 if (srf_update->sdr_white_level_nits) 3264 surface->sdr_white_level_nits = 3265 srf_update->sdr_white_level_nits; 3266 3267 if (srf_update->input_csc_color_matrix) 3268 surface->input_csc_color_matrix = 3269 *srf_update->input_csc_color_matrix; 3270 3271 if (srf_update->coeff_reduction_factor) 3272 surface->coeff_reduction_factor = 3273 *srf_update->coeff_reduction_factor; 3274 3275 if (srf_update->gamut_remap_matrix) 3276 surface->gamut_remap_matrix = 3277 *srf_update->gamut_remap_matrix; 3278 3279 if (srf_update->cursor_csc_color_matrix) 3280 surface->cursor_csc_color_matrix = 3281 *srf_update->cursor_csc_color_matrix; 3282 3283 if (srf_update->bias_and_scale.bias_and_scale_valid) 3284 surface->bias_and_scale = 3285 srf_update->bias_and_scale; 3286 } 3287 3288 static void copy_stream_update_to_stream(struct dc *dc, 3289 struct dc_state *context, 3290 struct dc_stream_state *stream, 3291 struct dc_stream_update *update) 3292 { 3293 (void)context; 3294 struct dc_context *dc_ctx = dc->ctx; 3295 3296 if (update == NULL || stream == NULL) 3297 return; 3298 3299 if (update->src.height && update->src.width) 3300 stream->src = update->src; 3301 3302 if (update->dst.height && update->dst.width) 3303 stream->dst = update->dst; 3304 3305 if (update->out_transfer_func) { 3306 stream->out_transfer_func.sdr_ref_white_level = 3307 update->out_transfer_func->sdr_ref_white_level; 3308 stream->out_transfer_func.tf = update->out_transfer_func->tf; 3309 stream->out_transfer_func.type = 3310 update->out_transfer_func->type; 3311 memcpy(&stream->out_transfer_func.tf_pts, 3312 &update->out_transfer_func->tf_pts, 3313 sizeof(struct dc_transfer_func_distributed_points)); 3314 } 3315 3316 if (update->hdr_static_metadata) 3317 stream->hdr_static_metadata = *update->hdr_static_metadata; 3318 3319 if (update->abm_level) 3320 stream->abm_level = *update->abm_level; 3321 3322 if (update->periodic_interrupt) 3323 stream->periodic_interrupt = *update->periodic_interrupt; 3324 3325 if (update->gamut_remap) 3326 stream->gamut_remap_matrix = *update->gamut_remap; 3327 3328 /* Note: this being updated after mode set is currently not a use case 3329 * however if it arises OCSC would need to be reprogrammed at the 3330 * minimum 3331 */ 3332 if (update->output_color_space) 3333 stream->output_color_space = *update->output_color_space; 3334 3335 if (update->output_csc_transform) 3336 stream->csc_color_matrix = *update->output_csc_transform; 3337 3338 if (update->vrr_infopacket) 3339 stream->vrr_infopacket = *update->vrr_infopacket; 3340 3341 if (update->hw_cursor_req) 3342 stream->hw_cursor_req = *update->hw_cursor_req; 3343 3344 if (update->allow_freesync) 3345 stream->allow_freesync = *update->allow_freesync; 3346 3347 if (update->vrr_active_variable) 3348 stream->vrr_active_variable = *update->vrr_active_variable; 3349 3350 if (update->vrr_active_fixed) 3351 stream->vrr_active_fixed = *update->vrr_active_fixed; 3352 3353 if (update->crtc_timing_adjust) { 3354 if (stream->adjust.v_total_min != update->crtc_timing_adjust->v_total_min || 3355 stream->adjust.v_total_max != update->crtc_timing_adjust->v_total_max || 3356 stream->adjust.timing_adjust_pending) 3357 update->crtc_timing_adjust->timing_adjust_pending = true; 3358 stream->adjust = *update->crtc_timing_adjust; 3359 update->crtc_timing_adjust->timing_adjust_pending = false; 3360 } 3361 3362 if (update->dpms_off) 3363 stream->dpms_off = *update->dpms_off; 3364 3365 if (update->hfvsif_infopacket) 3366 stream->hfvsif_infopacket = *update->hfvsif_infopacket; 3367 3368 if (update->vtem_infopacket) 3369 stream->vtem_infopacket = *update->vtem_infopacket; 3370 3371 if (update->vsc_infopacket) 3372 stream->vsc_infopacket = *update->vsc_infopacket; 3373 3374 if (update->vsp_infopacket) 3375 stream->vsp_infopacket = *update->vsp_infopacket; 3376 3377 if (update->adaptive_sync_infopacket) 3378 stream->adaptive_sync_infopacket = *update->adaptive_sync_infopacket; 3379 3380 if (update->avi_infopacket) 3381 stream->avi_infopacket = *update->avi_infopacket; 3382 3383 if (update->dither_option) 3384 stream->dither_option = *update->dither_option; 3385 3386 if (update->pending_test_pattern) 3387 stream->test_pattern = *update->pending_test_pattern; 3388 /* update current stream with writeback info */ 3389 if (update->wb_update) { 3390 int i; 3391 3392 stream->num_wb_info = update->wb_update->num_wb_info; 3393 ASSERT(stream->num_wb_info <= MAX_DWB_PIPES); 3394 for (i = 0; i < stream->num_wb_info; i++) 3395 stream->writeback_info[i] = 3396 update->wb_update->writeback_info[i]; 3397 } 3398 if (update->dsc_config) { 3399 struct dc_dsc_config old_dsc_cfg = stream->timing.dsc_cfg; 3400 uint32_t old_dsc_enabled = stream->timing.flags.DSC; 3401 uint32_t enable_dsc = (update->dsc_config->num_slices_h != 0 && 3402 update->dsc_config->num_slices_v != 0); 3403 3404 /* Use temporarry context for validating new DSC config */ 3405 struct dc_state *dsc_validate_context = dc_state_create_copy(dc->current_state); 3406 3407 if (dsc_validate_context) { 3408 stream->timing.dsc_cfg = *update->dsc_config; 3409 stream->timing.flags.DSC = enable_dsc; 3410 if (dc->res_pool->funcs->validate_bandwidth(dc, dsc_validate_context, 3411 DC_VALIDATE_MODE_ONLY) != DC_OK) { 3412 stream->timing.dsc_cfg = old_dsc_cfg; 3413 stream->timing.flags.DSC = old_dsc_enabled; 3414 update->dsc_config = NULL; 3415 } 3416 3417 dc_state_release(dsc_validate_context); 3418 } else { 3419 DC_ERROR("Failed to allocate new validate context for DSC change\n"); 3420 update->dsc_config = NULL; 3421 } 3422 } 3423 if (update->scaler_sharpener_update) 3424 stream->scaler_sharpener_update = *update->scaler_sharpener_update; 3425 if (update->sharpening_required) 3426 stream->sharpening_required = *update->sharpening_required; 3427 3428 if (update->drr_trigger_mode) { 3429 stream->drr_trigger_mode = *update->drr_trigger_mode; 3430 } 3431 } 3432 3433 static void backup_planes_and_stream_state( 3434 struct dc_scratch_space *scratch, 3435 struct dc_stream_state *stream) 3436 { 3437 int i; 3438 struct dc_stream_status *status = dc_stream_get_status(stream); 3439 3440 if (!status) 3441 return; 3442 3443 for (i = 0; i < status->plane_count; i++) { 3444 dc_plane_copy_config(&scratch->plane_states[i], status->plane_states[i]); 3445 } 3446 scratch->stream_state = *stream; 3447 } 3448 3449 static void restore_planes_and_stream_state( 3450 struct dc_scratch_space *scratch, 3451 struct dc_stream_state *stream) 3452 { 3453 int i; 3454 struct dc_stream_status *status = dc_stream_get_status(stream); 3455 3456 if (!status) 3457 return; 3458 3459 for (i = 0; i < status->plane_count; i++) { 3460 dc_plane_copy_config(status->plane_states[i], &scratch->plane_states[i]); 3461 } 3462 3463 // refcount is persistent 3464 struct kref temp_refcount = stream->refcount; 3465 *stream = scratch->stream_state; 3466 stream->refcount = temp_refcount; 3467 } 3468 3469 /** 3470 * update_seamless_boot_flags() - Helper function for updating seamless boot flags 3471 * 3472 * @dc: Current DC state 3473 * @context: New DC state to be programmed 3474 * @surface_count: Number of surfaces that have an updated 3475 * @stream: Corresponding stream to be updated in the current flip 3476 * 3477 * Updating seamless boot flags do not need to be part of the commit sequence. This 3478 * helper function will update the seamless boot flags on each flip (if required) 3479 * outside of the HW commit sequence (fast or slow). 3480 * 3481 * Return: void 3482 */ 3483 static void update_seamless_boot_flags(struct dc *dc, 3484 struct dc_state *context, 3485 int surface_count, 3486 struct dc_stream_state *stream) 3487 { 3488 if (get_seamless_boot_stream_count(context) > 0 && (surface_count > 0 || stream->dpms_off)) { 3489 /* Optimize seamless boot flag keeps clocks and watermarks high until 3490 * first flip. After first flip, optimization is required to lower 3491 * bandwidth. Important to note that it is expected UEFI will 3492 * only light up a single display on POST, therefore we only expect 3493 * one stream with seamless boot flag set. 3494 */ 3495 if (stream->apply_seamless_boot_optimization) { 3496 stream->apply_seamless_boot_optimization = false; 3497 3498 if (get_seamless_boot_stream_count(context) == 0) 3499 dc->optimized_required = true; 3500 } 3501 } 3502 } 3503 3504 static bool full_update_required_weak( 3505 const struct dc *dc, 3506 const struct dc_surface_update *srf_updates, 3507 int surface_count, 3508 const struct dc_stream_update *stream_update, 3509 const struct dc_stream_state *stream); 3510 3511 struct pipe_split_policy_backup { 3512 bool dynamic_odm_policy; 3513 bool subvp_policy; 3514 enum pipe_split_policy mpc_policy; 3515 char force_odm[MAX_PIPES]; 3516 }; 3517 3518 static void backup_and_set_minimal_pipe_split_policy(struct dc *dc, 3519 struct dc_state *context, 3520 struct pipe_split_policy_backup *policy) 3521 { 3522 int i; 3523 3524 if (!dc->config.is_vmin_only_asic) { 3525 policy->mpc_policy = dc->debug.pipe_split_policy; 3526 dc->debug.pipe_split_policy = MPC_SPLIT_AVOID; 3527 } 3528 policy->dynamic_odm_policy = dc->debug.enable_single_display_2to1_odm_policy; 3529 dc->debug.enable_single_display_2to1_odm_policy = false; 3530 policy->subvp_policy = dc->debug.force_disable_subvp; 3531 dc->debug.force_disable_subvp = true; 3532 for (i = 0; i < context->stream_count; i++) { 3533 policy->force_odm[i] = context->streams[i]->debug.force_odm_combine_segments; 3534 if (context->streams[i]->debug.allow_transition_for_forced_odm) 3535 context->streams[i]->debug.force_odm_combine_segments = 0; 3536 } 3537 } 3538 3539 static void restore_minimal_pipe_split_policy(struct dc *dc, 3540 struct dc_state *context, 3541 struct pipe_split_policy_backup *policy) 3542 { 3543 uint8_t i; 3544 3545 if (!dc->config.is_vmin_only_asic) 3546 dc->debug.pipe_split_policy = policy->mpc_policy; 3547 dc->debug.enable_single_display_2to1_odm_policy = 3548 policy->dynamic_odm_policy; 3549 dc->debug.force_disable_subvp = policy->subvp_policy; 3550 for (i = 0; i < context->stream_count; i++) 3551 context->streams[i]->debug.force_odm_combine_segments = policy->force_odm[i]; 3552 } 3553 3554 /** 3555 * update_planes_and_stream_state() - The function takes planes and stream 3556 * updates as inputs and determines the appropriate update type. If update type 3557 * is FULL, the function allocates a new context, populates and validates it. 3558 * Otherwise, it updates current dc context. The function will return both 3559 * new_context and new_update_type back to the caller. The function also backs 3560 * up both current and new contexts into corresponding dc state scratch memory. 3561 * TODO: The function does too many things, and even conditionally allocates dc 3562 * context memory implicitly. We should consider to break it down. 3563 * 3564 * @dc: Current DC state 3565 * @srf_updates: an array of surface updates 3566 * @surface_count: surface update count 3567 * @stream: Corresponding stream to be updated 3568 * @stream_update: stream update 3569 * @update_descriptor: describes what plane and stream changes to apply 3570 * @new_update_type: [out] determined update type by the function 3571 * @new_context: [out] new context allocated and validated if update type is 3572 * FULL, reference to current context if update type is less than FULL. 3573 * 3574 * Return: true if a valid update is populated into new_context, false 3575 * otherwise. 3576 */ 3577 static bool update_planes_and_stream_state(struct dc *dc, 3578 struct dc_surface_update *srf_updates, int surface_count, 3579 struct dc_stream_state *stream, 3580 struct dc_stream_update *stream_update, 3581 enum surface_update_type *new_update_type, 3582 struct dc_state **new_context) 3583 { 3584 struct dc_state *context; 3585 int i, j; 3586 enum surface_update_type update_type; 3587 const struct dc_stream_status *stream_status; 3588 struct dc_context *dc_ctx = dc->ctx; 3589 3590 stream_status = dc_stream_get_status(stream); 3591 3592 if (!stream_status) { 3593 if (surface_count) /* Only an error condition if surf_count non-zero*/ 3594 ASSERT(false); 3595 3596 return false; /* Cannot commit surface to stream that is not committed */ 3597 } 3598 3599 context = dc->current_state; 3600 update_type = dc_check_update_surfaces_for_stream( 3601 &dc->check_config, srf_updates, surface_count, stream_update).update_type; 3602 if (full_update_required_weak(dc, srf_updates, surface_count, stream_update, stream)) 3603 update_type = UPDATE_TYPE_FULL; 3604 3605 /* It is possible to receive a flip for one plane while there are multiple flip_immediate planes in the same stream. 3606 * E.g. Desktop and MPO plane are flip_immediate but only the MPO plane received a flip 3607 * Force the other flip_immediate planes to flip so GSL doesn't wait for a flip that won't come. 3608 */ 3609 force_immediate_gsl_plane_flip(dc, srf_updates, surface_count); 3610 if (update_type == UPDATE_TYPE_FULL) 3611 backup_planes_and_stream_state(&dc->scratch.current_state, stream); 3612 3613 /* update current stream with the new updates */ 3614 copy_stream_update_to_stream(dc, context, stream, stream_update); 3615 3616 /* do not perform surface update if surface has invalid dimensions 3617 * (all zero) and no scaling_info is provided 3618 */ 3619 if (surface_count > 0) { 3620 for (i = 0; i < surface_count; i++) { 3621 if ((srf_updates[i].surface->src_rect.width == 0 || 3622 srf_updates[i].surface->src_rect.height == 0 || 3623 srf_updates[i].surface->dst_rect.width == 0 || 3624 srf_updates[i].surface->dst_rect.height == 0) && 3625 (!srf_updates[i].scaling_info || 3626 srf_updates[i].scaling_info->src_rect.width == 0 || 3627 srf_updates[i].scaling_info->src_rect.height == 0 || 3628 srf_updates[i].scaling_info->dst_rect.width == 0 || 3629 srf_updates[i].scaling_info->dst_rect.height == 0)) { 3630 DC_ERROR("Invalid src/dst rects in surface update!\n"); 3631 return false; 3632 } 3633 } 3634 } 3635 3636 if (update_type == UPDATE_TYPE_FULL) { 3637 if (stream_update) { 3638 uint32_t dsc_changed = stream_update->stream->update_flags.bits.dsc_changed; 3639 stream_update->stream->update_flags.raw = 0xFFFFFFFF; 3640 stream_update->stream->update_flags.bits.dsc_changed = dsc_changed; 3641 } 3642 for (i = 0; i < surface_count; i++) 3643 srf_updates[i].surface->update_flags.raw = 0xFFFFFFFF; 3644 } 3645 3646 if (update_type >= update_surface_trace_level) 3647 update_surface_trace(dc, srf_updates, surface_count); 3648 3649 for (i = 0; i < surface_count; i++) 3650 copy_surface_update_to_plane(srf_updates[i].surface, &srf_updates[i]); 3651 3652 if (update_type >= UPDATE_TYPE_FULL) { 3653 struct dc_plane_state *new_planes[MAX_SURFACES] = {0}; 3654 3655 for (i = 0; i < surface_count; i++) 3656 new_planes[i] = srf_updates[i].surface; 3657 3658 /* initialize scratch memory for building context */ 3659 context = dc_state_create_copy(dc->current_state); 3660 if (context == NULL) { 3661 DC_ERROR("Failed to allocate new validate context!\n"); 3662 return false; 3663 } 3664 3665 /* For each full update, remove all existing phantom pipes first. 3666 * Ensures that we have enough pipes for newly added MPO planes 3667 */ 3668 dc_state_remove_phantom_streams_and_planes(dc, context); 3669 dc_state_release_phantom_streams_and_planes(dc, context); 3670 3671 /*remove old surfaces from context */ 3672 if (!dc_state_rem_all_planes_for_stream(dc, stream, context)) { 3673 3674 BREAK_TO_DEBUGGER(); 3675 goto fail; 3676 } 3677 3678 /* add surface to context */ 3679 if (!dc_state_add_all_planes_for_stream(dc, stream, new_planes, surface_count, context)) { 3680 3681 BREAK_TO_DEBUGGER(); 3682 goto fail; 3683 } 3684 } 3685 3686 /* save update parameters into surface */ 3687 for (i = 0; i < surface_count; i++) { 3688 struct dc_plane_state *surface = srf_updates[i].surface; 3689 3690 if (update_type != UPDATE_TYPE_MED) 3691 continue; 3692 if (surface->update_flags.bits.position_change) { 3693 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3694 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3695 3696 if (pipe_ctx->plane_state != surface) 3697 continue; 3698 3699 resource_build_scaling_params(pipe_ctx); 3700 } 3701 } 3702 } 3703 3704 if (update_type == UPDATE_TYPE_FULL) { 3705 struct pipe_split_policy_backup policy; 3706 bool minimize = false; 3707 3708 if (dc->check_config.deferred_transition_state) { 3709 if (dc->check_config.transition_countdown_to_steady_state) { 3710 /* During countdown, all new contexts created as minimal transition states */ 3711 minimize = true; 3712 } else { 3713 dc->check_config.deferred_transition_state = false; 3714 } 3715 } 3716 3717 if (minimize) 3718 backup_and_set_minimal_pipe_split_policy(dc, context, &policy); 3719 3720 if (dc->res_pool->funcs->validate_bandwidth(dc, context, DC_VALIDATE_MODE_AND_PROGRAMMING) != DC_OK) { 3721 if (minimize) 3722 restore_minimal_pipe_split_policy(dc, context, &policy); 3723 BREAK_TO_DEBUGGER(); 3724 goto fail; 3725 } 3726 3727 if (minimize) 3728 restore_minimal_pipe_split_policy(dc, context, &policy); 3729 } 3730 update_seamless_boot_flags(dc, context, surface_count, stream); 3731 3732 *new_context = context; 3733 *new_update_type = update_type; 3734 if (update_type == UPDATE_TYPE_FULL) 3735 backup_planes_and_stream_state(&dc->scratch.new_state, stream); 3736 3737 return true; 3738 3739 fail: 3740 dc_state_release(context); 3741 3742 return false; 3743 3744 } 3745 3746 static void commit_planes_do_stream_update(struct dc *dc, 3747 struct dc_stream_state *stream, 3748 struct dc_stream_update *stream_update, 3749 enum surface_update_type update_type, 3750 struct dc_state *context) 3751 { 3752 int j; 3753 3754 // Stream updates 3755 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3756 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3757 3758 if (resource_is_pipe_type(pipe_ctx, OTG_MASTER) && pipe_ctx->stream == stream) { 3759 3760 if (stream_update->periodic_interrupt && dc->hwss.setup_periodic_interrupt) 3761 dc->hwss.setup_periodic_interrupt(dc, pipe_ctx); 3762 3763 if ((stream_update->hdr_static_metadata && !stream->use_dynamic_meta) || 3764 stream_update->vrr_infopacket || 3765 stream_update->vsc_infopacket || 3766 stream_update->vsp_infopacket || 3767 stream_update->hfvsif_infopacket || 3768 stream_update->adaptive_sync_infopacket || 3769 stream_update->vtem_infopacket || 3770 stream_update->avi_infopacket) { 3771 resource_build_info_frame(pipe_ctx); 3772 dc->hwss.update_info_frame(pipe_ctx); 3773 3774 if (dc_is_dp_signal(pipe_ctx->stream->signal)) 3775 dc->link_srv->dp_trace_source_sequence( 3776 pipe_ctx->stream->link, 3777 DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME); 3778 } 3779 3780 if (stream_update->hdr_static_metadata && 3781 stream->use_dynamic_meta && 3782 dc->hwss.set_dmdata_attributes && 3783 pipe_ctx->stream->dmdata_address.quad_part != 0) 3784 dc->hwss.set_dmdata_attributes(pipe_ctx); 3785 3786 if (stream_update->gamut_remap) 3787 dc_stream_set_gamut_remap(dc, stream); 3788 3789 if (stream_update->output_csc_transform) 3790 dc_stream_program_csc_matrix(dc, stream); 3791 3792 if (stream_update->dither_option) { 3793 struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe; 3794 resource_build_bit_depth_reduction_params(pipe_ctx->stream, 3795 &pipe_ctx->stream->bit_depth_params); 3796 pipe_ctx->stream_res.opp->funcs->opp_program_fmt(pipe_ctx->stream_res.opp, 3797 &stream->bit_depth_params, 3798 &stream->clamping); 3799 while (odm_pipe) { 3800 odm_pipe->stream_res.opp->funcs->opp_program_fmt(odm_pipe->stream_res.opp, 3801 &stream->bit_depth_params, 3802 &stream->clamping); 3803 odm_pipe = odm_pipe->next_odm_pipe; 3804 } 3805 } 3806 3807 if (stream_update->cursor_attributes) 3808 program_cursor_attributes(dc, stream); 3809 3810 if (stream_update->cursor_position) 3811 program_cursor_position(dc, stream); 3812 3813 /* Full fe update*/ 3814 if (update_type == UPDATE_TYPE_FAST) 3815 continue; 3816 3817 if (stream_update->dsc_config) 3818 dc->link_srv->update_dsc_config(pipe_ctx); 3819 3820 if (stream_update->mst_bw_update) { 3821 if (stream_update->mst_bw_update->is_increase) 3822 dc->link_srv->increase_mst_payload(pipe_ctx, 3823 stream_update->mst_bw_update->mst_stream_bw); 3824 else 3825 dc->link_srv->reduce_mst_payload(pipe_ctx, 3826 stream_update->mst_bw_update->mst_stream_bw); 3827 } 3828 3829 if (stream_update->pending_test_pattern) { 3830 /* 3831 * test pattern params depends on ODM topology 3832 * changes that we could be applying to front 3833 * end. Since at the current stage front end 3834 * changes are not yet applied. We can only 3835 * apply test pattern in hw based on current 3836 * state and populate the final test pattern 3837 * params in new state. If current and new test 3838 * pattern params are different as result of 3839 * different ODM topology being used, it will be 3840 * detected and handle during front end 3841 * programming update. 3842 */ 3843 dc->link_srv->dp_set_test_pattern(stream->link, 3844 stream->test_pattern.type, 3845 stream->test_pattern.color_space, 3846 stream->test_pattern.p_link_settings, 3847 stream->test_pattern.p_custom_pattern, 3848 stream->test_pattern.cust_pattern_size); 3849 resource_build_test_pattern_params(&context->res_ctx, pipe_ctx); 3850 } 3851 3852 if (stream_update->dpms_off) { 3853 if (*stream_update->dpms_off) { 3854 dc->link_srv->set_dpms_off(pipe_ctx); 3855 /* for dpms, keep acquired resources*/ 3856 if (pipe_ctx->stream_res.audio && !dc->debug.az_endpoint_mute_only) 3857 pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio); 3858 3859 dc->optimized_required = true; 3860 3861 } else { 3862 if (get_seamless_boot_stream_count(context) == 0) 3863 dc->hwss.prepare_bandwidth(dc, dc->current_state); 3864 dc->link_srv->set_dpms_on(dc->current_state, pipe_ctx); 3865 } 3866 } else if (pipe_ctx->stream->link->wa_flags.blank_stream_on_ocs_change && stream_update->output_color_space 3867 && !stream->dpms_off && dc_is_dp_signal(pipe_ctx->stream->signal)) { 3868 /* 3869 * Workaround for firmware issue in some receivers where they don't pick up 3870 * correct output color space unless DP link is disabled/re-enabled 3871 */ 3872 dc->link_srv->set_dpms_on(dc->current_state, pipe_ctx); 3873 } 3874 3875 if (stream_update->abm_level && pipe_ctx->stream_res.abm) { 3876 bool should_program_abm = true; 3877 3878 // if otg funcs defined check if blanked before programming 3879 if (pipe_ctx->stream_res.tg->funcs->is_blanked) 3880 if (pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg)) 3881 should_program_abm = false; 3882 3883 if (should_program_abm) { 3884 if (*stream_update->abm_level == ABM_LEVEL_IMMEDIATE_DISABLE) { 3885 dc->hwss.set_abm_immediate_disable(pipe_ctx); 3886 } else { 3887 pipe_ctx->stream_res.abm->funcs->set_abm_level( 3888 pipe_ctx->stream_res.abm, stream->abm_level); 3889 } 3890 } 3891 } 3892 } 3893 } 3894 } 3895 3896 static bool dc_dmub_should_send_dirty_rect_cmd(struct dc *dc, struct dc_stream_state *stream) 3897 { 3898 (void)dc; 3899 if ((stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1 3900 || stream->link->psr_settings.psr_version == DC_PSR_VERSION_1) 3901 && stream->ctx->dce_version >= DCN_VERSION_3_1) 3902 return true; 3903 3904 if (stream->link->replay_settings.config.replay_supported) 3905 return true; 3906 3907 if (stream->ctx->dce_version >= DCN_VERSION_3_5 && stream->abm_level) 3908 return true; 3909 3910 return false; 3911 } 3912 3913 void dc_dmub_update_dirty_rect(struct dc *dc, 3914 int surface_count, 3915 struct dc_stream_state *stream, 3916 const struct dc_surface_update *srf_updates, 3917 struct dc_state *context) 3918 { 3919 union dmub_rb_cmd cmd; 3920 struct dmub_cmd_update_dirty_rect_data *update_dirty_rect; 3921 unsigned int i, j; 3922 unsigned int panel_inst = 0; 3923 3924 if (!dc_dmub_should_send_dirty_rect_cmd(dc, stream)) 3925 return; 3926 3927 if (!dc->config.frame_update_cmd_version2 && !dc_get_edp_link_panel_inst(dc, stream->link, &panel_inst)) 3928 return; 3929 3930 memset(&cmd, 0x0, sizeof(cmd)); 3931 cmd.update_dirty_rect.header.type = DMUB_CMD__UPDATE_DIRTY_RECT; 3932 cmd.update_dirty_rect.header.sub_type = 0; 3933 cmd.update_dirty_rect.header.payload_bytes = 3934 sizeof(cmd.update_dirty_rect) - 3935 sizeof(cmd.update_dirty_rect.header); 3936 update_dirty_rect = &cmd.update_dirty_rect.update_dirty_rect_data; 3937 for (i = 0; i < surface_count; i++) { 3938 struct dc_plane_state *plane_state = srf_updates[i].surface; 3939 const struct dc_flip_addrs *flip_addr = srf_updates[i].flip_addr; 3940 3941 if (!srf_updates[i].surface || !flip_addr) 3942 continue; 3943 /* Do not send in immediate flip mode */ 3944 if (srf_updates[i].surface->flip_immediate) 3945 continue; 3946 3947 if (dc->config.frame_update_cmd_version2) 3948 update_dirty_rect->cmd_version = DMUB_CMD_CURSOR_UPDATE_VERSION_2; 3949 else 3950 update_dirty_rect->cmd_version = DMUB_CMD_CURSOR_UPDATE_VERSION_1; 3951 3952 update_dirty_rect->dirty_rect_count = flip_addr->dirty_rect_count; 3953 memcpy(update_dirty_rect->src_dirty_rects, flip_addr->dirty_rects, 3954 sizeof(flip_addr->dirty_rects)); 3955 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3956 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3957 3958 if (pipe_ctx->stream != stream) 3959 continue; 3960 if (pipe_ctx->plane_state != plane_state) 3961 continue; 3962 3963 update_dirty_rect->panel_inst = panel_inst; 3964 update_dirty_rect->pipe_idx = j; 3965 update_dirty_rect->otg_inst = pipe_ctx->stream_res.tg->inst; 3966 dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT); 3967 } 3968 } 3969 } 3970 3971 static void build_dmub_update_dirty_rect( 3972 struct dc *dc, 3973 int surface_count, 3974 struct dc_stream_state *stream, 3975 struct dc_surface_update *srf_updates, 3976 struct dc_state *context, 3977 struct dc_dmub_cmd dc_dmub_cmd[], 3978 unsigned int *dmub_cmd_count) 3979 { 3980 union dmub_rb_cmd cmd; 3981 struct dmub_cmd_update_dirty_rect_data *update_dirty_rect; 3982 unsigned int i, j; 3983 unsigned int panel_inst = 0; 3984 3985 if (!dc_dmub_should_send_dirty_rect_cmd(dc, stream)) 3986 return; 3987 3988 if (!dc->config.frame_update_cmd_version2 && !dc_get_edp_link_panel_inst(dc, stream->link, &panel_inst)) 3989 return; 3990 3991 memset(&cmd, 0x0, sizeof(cmd)); 3992 cmd.update_dirty_rect.header.type = DMUB_CMD__UPDATE_DIRTY_RECT; 3993 cmd.update_dirty_rect.header.sub_type = 0; 3994 cmd.update_dirty_rect.header.payload_bytes = 3995 sizeof(cmd.update_dirty_rect) - 3996 sizeof(cmd.update_dirty_rect.header); 3997 update_dirty_rect = &cmd.update_dirty_rect.update_dirty_rect_data; 3998 for (i = 0; i < surface_count; i++) { 3999 struct dc_plane_state *plane_state = srf_updates[i].surface; 4000 const struct dc_flip_addrs *flip_addr = srf_updates[i].flip_addr; 4001 4002 if (!srf_updates[i].surface || !flip_addr) 4003 continue; 4004 /* Do not send in immediate flip mode */ 4005 if (srf_updates[i].surface->flip_immediate) 4006 continue; 4007 4008 if (dc->config.frame_update_cmd_version2) 4009 update_dirty_rect->cmd_version = DMUB_CMD_CURSOR_UPDATE_VERSION_2; 4010 else 4011 update_dirty_rect->cmd_version = DMUB_CMD_CURSOR_UPDATE_VERSION_1; 4012 4013 update_dirty_rect->dirty_rect_count = flip_addr->dirty_rect_count; 4014 memcpy(update_dirty_rect->src_dirty_rects, flip_addr->dirty_rects, 4015 sizeof(flip_addr->dirty_rects)); 4016 for (j = 0; j < dc->res_pool->pipe_count; j++) { 4017 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 4018 4019 if (pipe_ctx->stream != stream) 4020 continue; 4021 if (pipe_ctx->plane_state != plane_state) 4022 continue; 4023 update_dirty_rect->panel_inst = panel_inst; 4024 update_dirty_rect->pipe_idx = j; 4025 update_dirty_rect->otg_inst = pipe_ctx->stream_res.tg->inst; 4026 dc_dmub_cmd[*dmub_cmd_count].dmub_cmd = cmd; 4027 dc_dmub_cmd[*dmub_cmd_count].wait_type = DM_DMUB_WAIT_TYPE_NO_WAIT; 4028 (*dmub_cmd_count)++; 4029 } 4030 } 4031 } 4032 4033 static bool check_address_only_update(union surface_update_flags update_flags) 4034 { 4035 union surface_update_flags addr_only_update_flags; 4036 addr_only_update_flags.raw = 0; 4037 addr_only_update_flags.bits.addr_update = 1; 4038 4039 return update_flags.bits.addr_update && 4040 !(update_flags.raw & ~addr_only_update_flags.raw); 4041 } 4042 4043 /** 4044 * build_dmub_cmd_list() - Build an array of DMCUB commands to be sent to DMCUB 4045 * 4046 * @dc: Current DC state 4047 * @srf_updates: Array of surface updates 4048 * @surface_count: Number of surfaces that have an updated 4049 * @stream: Corresponding stream to be updated in the current flip 4050 * @context: New DC state to be programmed 4051 * 4052 * @dc_dmub_cmd: Array of DMCUB commands to be sent to DMCUB 4053 * @dmub_cmd_count: Count indicating the number of DMCUB commands in dc_dmub_cmd array 4054 * 4055 * This function builds an array of DMCUB commands to be sent to DMCUB. This function is required 4056 * to build an array of commands and have them sent while the OTG lock is acquired. 4057 * 4058 * Return: void 4059 */ 4060 static void build_dmub_cmd_list(struct dc *dc, 4061 struct dc_surface_update *srf_updates, 4062 int surface_count, 4063 struct dc_stream_state *stream, 4064 struct dc_state *context, 4065 struct dc_dmub_cmd dc_dmub_cmd[], 4066 unsigned int *dmub_cmd_count) 4067 { 4068 // Initialize cmd count to 0 4069 *dmub_cmd_count = 0; 4070 build_dmub_update_dirty_rect(dc, surface_count, stream, srf_updates, context, dc_dmub_cmd, dmub_cmd_count); 4071 } 4072 4073 static void commit_plane_for_stream_offload_fams2_flip(struct dc *dc, 4074 struct dc_surface_update *srf_updates, 4075 int surface_count, 4076 struct dc_stream_state *stream, 4077 struct dc_state *context) 4078 { 4079 int i, j; 4080 4081 /* update dirty rect for PSR */ 4082 dc_dmub_update_dirty_rect(dc, surface_count, stream, 4083 srf_updates, context); 4084 4085 /* Perform requested Updates */ 4086 for (i = 0; i < surface_count; i++) { 4087 struct dc_plane_state *plane_state = srf_updates[i].surface; 4088 4089 for (j = 0; j < dc->res_pool->pipe_count; j++) { 4090 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 4091 4092 if (!should_update_pipe_for_stream(context, pipe_ctx, stream)) 4093 continue; 4094 4095 if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state)) 4096 continue; 4097 4098 /* update pipe context for plane */ 4099 if (pipe_ctx->plane_state->update_flags.bits.addr_update) 4100 dc->hwss.update_plane_addr(dc, pipe_ctx); 4101 } 4102 } 4103 4104 /* Send commands to DMCUB */ 4105 dc_dmub_srv_fams2_passthrough_flip(dc, 4106 context, 4107 stream, 4108 srf_updates, 4109 surface_count); 4110 } 4111 4112 static void commit_planes_for_stream_fast(struct dc *dc, 4113 struct dc_surface_update *srf_updates, 4114 int surface_count, 4115 struct dc_stream_state *stream, 4116 struct dc_stream_update *stream_update, 4117 enum surface_update_type update_type, 4118 struct dc_state *context) 4119 { 4120 int i, j; 4121 struct pipe_ctx *top_pipe_to_program = NULL; 4122 struct dc_stream_status *stream_status = NULL; 4123 bool should_offload_fams2_flip = false; 4124 bool should_lock_all_pipes = (update_type != UPDATE_TYPE_FAST); 4125 4126 if (should_lock_all_pipes) 4127 determine_pipe_unlock_order(dc, context); 4128 4129 if (dc->debug.fams2_config.bits.enable && 4130 dc->debug.fams2_config.bits.enable_offload_flip && 4131 dc_state_is_fams2_in_use(dc, context)) { 4132 /* if not offloading to HWFQ, offload to FAMS2 if needed */ 4133 should_offload_fams2_flip = true; 4134 for (i = 0; i < surface_count; i++) { 4135 if (srf_updates[i].surface && 4136 srf_updates[i].surface->update_flags.raw && 4137 !check_address_only_update(srf_updates[i].surface->update_flags)) { 4138 /* more than address update, need to acquire FAMS2 lock */ 4139 should_offload_fams2_flip = false; 4140 break; 4141 } 4142 } 4143 if (stream_update) { 4144 /* more than address update, need to acquire FAMS2 lock */ 4145 should_offload_fams2_flip = false; 4146 } 4147 } 4148 4149 dc_exit_ips_for_hw_access(dc); 4150 4151 dc_z10_restore(dc); 4152 4153 top_pipe_to_program = resource_get_otg_master_for_stream( 4154 &context->res_ctx, 4155 stream); 4156 4157 if (!top_pipe_to_program) 4158 return; 4159 4160 for (i = 0; i < dc->res_pool->pipe_count; i++) { 4161 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 4162 4163 if (pipe->stream && pipe->plane_state) { 4164 if (!dc->debug.using_dml2) 4165 set_p_state_switch_method(dc, context, pipe); 4166 4167 if (dc->debug.visual_confirm) 4168 dc_update_visual_confirm_color(dc, context, pipe); 4169 } 4170 } 4171 4172 for (i = 0; i < surface_count; i++) { 4173 struct dc_plane_state *plane_state = srf_updates[i].surface; 4174 /*set logical flag for lock/unlock use*/ 4175 for (j = 0; j < dc->res_pool->pipe_count; j++) { 4176 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 4177 4178 if (!pipe_ctx->plane_state) 4179 continue; 4180 if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state)) 4181 continue; 4182 4183 pipe_ctx->plane_state->triplebuffer_flips = false; 4184 if (update_type == UPDATE_TYPE_FAST && 4185 dc->hwss.program_triplebuffer != NULL && 4186 !pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) { 4187 /*triple buffer for VUpdate only*/ 4188 pipe_ctx->plane_state->triplebuffer_flips = true; 4189 } 4190 } 4191 } 4192 4193 stream_status = dc_state_get_stream_status(context, stream); 4194 4195 if (should_offload_fams2_flip) { 4196 commit_plane_for_stream_offload_fams2_flip(dc, 4197 srf_updates, 4198 surface_count, 4199 stream, 4200 context); 4201 } else if (stream_status) { 4202 build_dmub_cmd_list(dc, 4203 srf_updates, 4204 surface_count, 4205 stream, 4206 context, 4207 context->dc_dmub_cmd, 4208 &(context->dmub_cmd_count)); 4209 hwss_build_fast_sequence(dc, 4210 context->dc_dmub_cmd, 4211 context->dmub_cmd_count, 4212 context->block_sequence, 4213 &(context->block_sequence_steps), 4214 top_pipe_to_program, 4215 stream_status, 4216 context); 4217 hwss_execute_sequence(dc, 4218 context->block_sequence, 4219 context->block_sequence_steps); 4220 } 4221 4222 /* Clear update flags so next flip doesn't have redundant programming 4223 * (if there's no stream update, the update flags are not cleared). 4224 * Surface updates are cleared unconditionally at the beginning of each flip, 4225 * so no need to clear here. 4226 */ 4227 if (top_pipe_to_program->stream) 4228 top_pipe_to_program->stream->update_flags.raw = 0; 4229 } 4230 4231 static void commit_planes_for_stream(struct dc *dc, 4232 const struct dc_surface_update *srf_updates, 4233 int surface_count, 4234 struct dc_stream_state *stream, 4235 struct dc_stream_update *stream_update, 4236 enum surface_update_type update_type, 4237 struct dc_state *context) 4238 { 4239 int i, j; 4240 struct pipe_ctx *top_pipe_to_program = NULL; 4241 bool should_lock_all_pipes = (update_type != UPDATE_TYPE_FAST); 4242 bool subvp_prev_use = false; 4243 bool subvp_curr_use = false; 4244 uint8_t current_stream_mask = 0; 4245 4246 if (should_lock_all_pipes) 4247 determine_pipe_unlock_order(dc, context); 4248 // Once we apply the new subvp context to hardware it won't be in the 4249 // dc->current_state anymore, so we have to cache it before we apply 4250 // the new SubVP context 4251 subvp_prev_use = false; 4252 dc_exit_ips_for_hw_access(dc); 4253 4254 dc_z10_restore(dc); 4255 if (update_type == UPDATE_TYPE_FULL && dc->optimized_required) 4256 hwss_process_outstanding_hw_updates(dc, dc->current_state); 4257 4258 if (update_type != UPDATE_TYPE_FAST && dc->res_pool->funcs->prepare_mcache_programming) 4259 dc->res_pool->funcs->prepare_mcache_programming(dc, context); 4260 4261 for (i = 0; i < dc->res_pool->pipe_count; i++) { 4262 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 4263 4264 if (pipe->stream && pipe->plane_state) { 4265 if (!dc->debug.using_dml2) 4266 set_p_state_switch_method(dc, context, pipe); 4267 4268 if (dc->debug.visual_confirm) 4269 dc_update_visual_confirm_color(dc, context, pipe); 4270 } 4271 } 4272 4273 if (update_type == UPDATE_TYPE_FULL) { 4274 dc_allow_idle_optimizations(dc, false); 4275 4276 if (get_seamless_boot_stream_count(context) == 0) 4277 dc->hwss.prepare_bandwidth(dc, context); 4278 4279 if (dc->hwss.update_dsc_pg) 4280 dc->hwss.update_dsc_pg(dc, context, false); 4281 4282 context_clock_trace(dc, context); 4283 } 4284 4285 if (update_type == UPDATE_TYPE_FULL) 4286 hwss_wait_for_outstanding_hw_updates(dc, dc->current_state); 4287 4288 top_pipe_to_program = resource_get_otg_master_for_stream( 4289 &context->res_ctx, 4290 stream); 4291 ASSERT(top_pipe_to_program != NULL); 4292 4293 for (i = 0; i < dc->res_pool->pipe_count; i++) { 4294 struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 4295 4296 // Check old context for SubVP 4297 subvp_prev_use |= (dc_state_get_pipe_subvp_type(dc->current_state, old_pipe) == SUBVP_PHANTOM); 4298 if (subvp_prev_use) 4299 break; 4300 } 4301 4302 for (i = 0; i < dc->res_pool->pipe_count; i++) { 4303 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 4304 4305 if (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) { 4306 subvp_curr_use = true; 4307 break; 4308 } 4309 } 4310 4311 if (stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) { 4312 struct pipe_ctx *mpcc_pipe; 4313 struct pipe_ctx *odm_pipe; 4314 4315 for (mpcc_pipe = top_pipe_to_program; mpcc_pipe; mpcc_pipe = mpcc_pipe->bottom_pipe) 4316 for (odm_pipe = mpcc_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) 4317 odm_pipe->ttu_regs.min_ttu_vblank = MAX_TTU; 4318 } 4319 4320 if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed) 4321 if (top_pipe_to_program && 4322 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) { 4323 if (should_use_dmub_inbox1_lock(dc, stream->link)) { 4324 union dmub_hw_lock_flags hw_locks = { 0 }; 4325 struct dmub_hw_lock_inst_flags inst_flags = { 0 }; 4326 4327 hw_locks.bits.lock_dig = 1; 4328 inst_flags.dig_inst = top_pipe_to_program->stream_res.tg->inst; 4329 4330 dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv, 4331 true, 4332 &hw_locks, 4333 &inst_flags); 4334 } else 4335 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable( 4336 top_pipe_to_program->stream_res.tg); 4337 } 4338 4339 if (dc->hwss.wait_for_dcc_meta_propagation) { 4340 dc->hwss.wait_for_dcc_meta_propagation(dc, top_pipe_to_program); 4341 } 4342 4343 if (dc->hwseq->funcs.wait_for_pipe_update_if_needed) 4344 dc->hwseq->funcs.wait_for_pipe_update_if_needed(dc, top_pipe_to_program, update_type < UPDATE_TYPE_FULL); 4345 4346 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) { 4347 if (dc->hwss.subvp_pipe_control_lock) 4348 dc->hwss.subvp_pipe_control_lock(dc, context, true, should_lock_all_pipes, NULL, subvp_prev_use); 4349 4350 if (dc->hwss.dmub_hw_control_lock) 4351 dc->hwss.dmub_hw_control_lock(dc, context, true); 4352 4353 dc->hwss.interdependent_update_lock(dc, context, true); 4354 } else { 4355 if (dc->hwss.subvp_pipe_control_lock) 4356 dc->hwss.subvp_pipe_control_lock(dc, context, true, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use); 4357 4358 if (dc->hwss.dmub_hw_control_lock) 4359 dc->hwss.dmub_hw_control_lock(dc, context, true); 4360 4361 /* Lock the top pipe while updating plane addrs, since freesync requires 4362 * plane addr update event triggers to be synchronized. 4363 * top_pipe_to_program is expected to never be NULL 4364 */ 4365 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true); 4366 } 4367 4368 dc_dmub_update_dirty_rect(dc, surface_count, stream, srf_updates, context); 4369 4370 // Stream updates 4371 if (stream_update) 4372 commit_planes_do_stream_update(dc, stream, stream_update, update_type, context); 4373 4374 if (surface_count == 0) { 4375 /* 4376 * In case of turning off screen, no need to program front end a second time. 4377 * just return after program blank. 4378 */ 4379 if (dc->hwss.apply_ctx_for_surface) 4380 dc->hwss.apply_ctx_for_surface(dc, stream, 0, context); 4381 if (dc->hwss.program_front_end_for_ctx) 4382 dc->hwss.program_front_end_for_ctx(dc, context); 4383 4384 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) { 4385 dc->hwss.interdependent_update_lock(dc, context, false); 4386 } else { 4387 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false); 4388 } 4389 dc->hwss.post_unlock_program_front_end(dc, context); 4390 4391 if (update_type != UPDATE_TYPE_FAST) 4392 if (dc->hwss.commit_subvp_config) 4393 dc->hwss.commit_subvp_config(dc, context); 4394 4395 /* Since phantom pipe programming is moved to post_unlock_program_front_end, 4396 * move the SubVP lock to after the phantom pipes have been setup 4397 */ 4398 if (dc->hwss.subvp_pipe_control_lock) 4399 dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, 4400 NULL, subvp_prev_use); 4401 4402 if (dc->hwss.dmub_hw_control_lock) 4403 dc->hwss.dmub_hw_control_lock(dc, context, false); 4404 return; 4405 } 4406 4407 if (update_type != UPDATE_TYPE_FAST) { 4408 for (j = 0; j < dc->res_pool->pipe_count; j++) { 4409 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 4410 4411 if ((dc->debug.visual_confirm == VISUAL_CONFIRM_SUBVP || 4412 dc->debug.visual_confirm == VISUAL_CONFIRM_MCLK_SWITCH) && 4413 pipe_ctx->stream && pipe_ctx->plane_state) { 4414 /* Only update visual confirm for SUBVP and Mclk switching here. 4415 * The bar appears on all pipes, so we need to update the bar on all displays, 4416 * so the information doesn't get stale. 4417 */ 4418 dc->hwss.update_visual_confirm_color(dc, pipe_ctx, 4419 pipe_ctx->plane_res.hubp->inst); 4420 } 4421 } 4422 } 4423 4424 for (i = 0; i < surface_count; i++) { 4425 struct dc_plane_state *plane_state = srf_updates[i].surface; 4426 4427 /*set logical flag for lock/unlock use*/ 4428 for (j = 0; j < dc->res_pool->pipe_count; j++) { 4429 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 4430 if (!pipe_ctx->plane_state) 4431 continue; 4432 if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state)) 4433 continue; 4434 pipe_ctx->plane_state->triplebuffer_flips = false; 4435 if (update_type == UPDATE_TYPE_FAST && 4436 dc->hwss.program_triplebuffer != NULL && 4437 !pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) { 4438 /*triple buffer for VUpdate only*/ 4439 pipe_ctx->plane_state->triplebuffer_flips = true; 4440 } 4441 } 4442 if (update_type == UPDATE_TYPE_FULL) { 4443 /* force vsync flip when reconfiguring pipes to prevent underflow */ 4444 plane_state->flip_immediate = false; 4445 plane_state->triplebuffer_flips = false; 4446 } 4447 } 4448 4449 // Update Type FULL, Surface updates 4450 for (j = 0; j < dc->res_pool->pipe_count; j++) { 4451 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 4452 4453 if (!pipe_ctx->top_pipe && 4454 !pipe_ctx->prev_odm_pipe && 4455 should_update_pipe_for_stream(context, pipe_ctx, stream)) { 4456 struct dc_stream_status *stream_status = NULL; 4457 4458 if (!pipe_ctx->plane_state) 4459 continue; 4460 4461 /* Full fe update*/ 4462 if (update_type == UPDATE_TYPE_FAST) 4463 continue; 4464 4465 stream_status = 4466 stream_get_status(context, pipe_ctx->stream); 4467 4468 if (dc->hwss.apply_ctx_for_surface && stream_status) 4469 dc->hwss.apply_ctx_for_surface( 4470 dc, pipe_ctx->stream, stream_status->plane_count, context); 4471 } 4472 } 4473 4474 for (j = 0; j < dc->res_pool->pipe_count; j++) { 4475 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 4476 4477 if (!pipe_ctx->plane_state) 4478 continue; 4479 4480 /* Full fe update*/ 4481 if (update_type == UPDATE_TYPE_FAST) 4482 continue; 4483 4484 ASSERT(!pipe_ctx->plane_state->triplebuffer_flips); 4485 if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) { 4486 /*turn off triple buffer for full update*/ 4487 dc->hwss.program_triplebuffer( 4488 dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips); 4489 } 4490 } 4491 4492 if (dc->hwss.program_front_end_for_ctx && update_type != UPDATE_TYPE_FAST) { 4493 dc->hwss.program_front_end_for_ctx(dc, context); 4494 4495 //Pipe busy until some frame and line # 4496 if (dc->hwseq->funcs.set_wait_for_update_needed_for_pipe && update_type == UPDATE_TYPE_FULL) { 4497 for (j = 0; j < dc->res_pool->pipe_count; j++) { 4498 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 4499 4500 dc->hwseq->funcs.set_wait_for_update_needed_for_pipe(dc, pipe_ctx); 4501 } 4502 } 4503 4504 if (dc->debug.validate_dml_output) { 4505 for (i = 0; i < dc->res_pool->pipe_count; i++) { 4506 struct pipe_ctx *cur_pipe = &context->res_ctx.pipe_ctx[i]; 4507 if (cur_pipe->stream == NULL) 4508 continue; 4509 4510 cur_pipe->plane_res.hubp->funcs->validate_dml_output( 4511 cur_pipe->plane_res.hubp, dc->ctx, 4512 &context->res_ctx.pipe_ctx[i].rq_regs, 4513 &context->res_ctx.pipe_ctx[i].dlg_regs, 4514 &context->res_ctx.pipe_ctx[i].ttu_regs); 4515 } 4516 } 4517 } 4518 4519 // Update Type FAST, Surface updates 4520 if (update_type == UPDATE_TYPE_FAST) { 4521 if (dc->hwss.set_flip_control_gsl) 4522 for (i = 0; i < surface_count; i++) { 4523 struct dc_plane_state *plane_state = srf_updates[i].surface; 4524 4525 for (j = 0; j < dc->res_pool->pipe_count; j++) { 4526 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 4527 4528 if (!should_update_pipe_for_stream(context, pipe_ctx, stream)) 4529 continue; 4530 4531 if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state)) 4532 continue; 4533 4534 // GSL has to be used for flip immediate 4535 dc->hwss.set_flip_control_gsl(pipe_ctx, 4536 pipe_ctx->plane_state->flip_immediate); 4537 } 4538 } 4539 4540 /* Perform requested Updates */ 4541 for (i = 0; i < surface_count; i++) { 4542 struct dc_plane_state *plane_state = srf_updates[i].surface; 4543 4544 for (j = 0; j < dc->res_pool->pipe_count; j++) { 4545 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 4546 4547 if (!should_update_pipe_for_stream(context, pipe_ctx, stream)) 4548 continue; 4549 4550 if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state)) 4551 continue; 4552 4553 if (srf_updates[i].cm && 4554 srf_updates[i].cm->flags.bits.lut3d_enable && 4555 srf_updates[i].cm->flags.bits.lut3d_dma_enable && 4556 dc->hwss.trigger_3dlut_dma_load) 4557 dc->hwss.trigger_3dlut_dma_load(dc, pipe_ctx); 4558 4559 /*program triple buffer after lock based on flip type*/ 4560 if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) { 4561 /*only enable triplebuffer for fast_update*/ 4562 dc->hwss.program_triplebuffer( 4563 dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips); 4564 } 4565 if (pipe_ctx->plane_state->update_flags.bits.addr_update) 4566 dc->hwss.update_plane_addr(dc, pipe_ctx); 4567 } 4568 } 4569 } 4570 4571 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) { 4572 dc->hwss.interdependent_update_lock(dc, context, false); 4573 } else { 4574 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false); 4575 } 4576 4577 if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed) 4578 if (top_pipe_to_program && 4579 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) { 4580 top_pipe_to_program->stream_res.tg->funcs->wait_for_state( 4581 top_pipe_to_program->stream_res.tg, 4582 CRTC_STATE_VACTIVE); 4583 top_pipe_to_program->stream_res.tg->funcs->wait_for_state( 4584 top_pipe_to_program->stream_res.tg, 4585 CRTC_STATE_VBLANK); 4586 top_pipe_to_program->stream_res.tg->funcs->wait_for_state( 4587 top_pipe_to_program->stream_res.tg, 4588 CRTC_STATE_VACTIVE); 4589 4590 if (should_use_dmub_inbox1_lock(dc, stream->link)) { 4591 union dmub_hw_lock_flags hw_locks = { 0 }; 4592 struct dmub_hw_lock_inst_flags inst_flags = { 0 }; 4593 4594 hw_locks.bits.lock_dig = 1; 4595 inst_flags.dig_inst = top_pipe_to_program->stream_res.tg->inst; 4596 4597 dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv, 4598 false, 4599 &hw_locks, 4600 &inst_flags); 4601 } else 4602 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_disable( 4603 top_pipe_to_program->stream_res.tg); 4604 } 4605 4606 if (subvp_curr_use) { 4607 /* If enabling subvp or transitioning from subvp->subvp, enable the 4608 * phantom streams before we program front end for the phantom pipes. 4609 */ 4610 if (update_type != UPDATE_TYPE_FAST) { 4611 if (dc->hwss.enable_phantom_streams) 4612 dc->hwss.enable_phantom_streams(dc, context); 4613 } 4614 } 4615 4616 if (update_type != UPDATE_TYPE_FAST) 4617 dc->hwss.post_unlock_program_front_end(dc, context); 4618 4619 if (subvp_prev_use && !subvp_curr_use) { 4620 /* If disabling subvp, disable phantom streams after front end 4621 * programming has completed (we turn on phantom OTG in order 4622 * to complete the plane disable for phantom pipes). 4623 */ 4624 4625 if (dc->hwss.disable_phantom_streams) 4626 dc->hwss.disable_phantom_streams(dc, context); 4627 } 4628 4629 if (update_type != UPDATE_TYPE_FAST) 4630 if (dc->hwss.commit_subvp_config) 4631 dc->hwss.commit_subvp_config(dc, context); 4632 /* Since phantom pipe programming is moved to post_unlock_program_front_end, 4633 * move the SubVP lock to after the phantom pipes have been setup 4634 */ 4635 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) { 4636 if (dc->hwss.subvp_pipe_control_lock) 4637 dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use); 4638 if (dc->hwss.dmub_hw_control_lock) 4639 dc->hwss.dmub_hw_control_lock(dc, context, false); 4640 } else { 4641 if (dc->hwss.subvp_pipe_control_lock) 4642 dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use); 4643 if (dc->hwss.dmub_hw_control_lock) 4644 dc->hwss.dmub_hw_control_lock(dc, context, false); 4645 } 4646 4647 // Fire manual trigger only when bottom plane is flipped 4648 for (j = 0; j < dc->res_pool->pipe_count; j++) { 4649 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 4650 4651 if (!pipe_ctx->plane_state) 4652 continue; 4653 4654 if (pipe_ctx->bottom_pipe || pipe_ctx->next_odm_pipe || 4655 !pipe_ctx->stream || !should_update_pipe_for_stream(context, pipe_ctx, stream) || 4656 !pipe_ctx->plane_state->update_flags.bits.addr_update || 4657 pipe_ctx->plane_state->skip_manual_trigger) 4658 continue; 4659 4660 if (dc->hwss.program_cursor_offload_now) 4661 dc->hwss.program_cursor_offload_now(dc, pipe_ctx); 4662 if (pipe_ctx->stream_res.tg->funcs->program_manual_trigger) 4663 pipe_ctx->stream_res.tg->funcs->program_manual_trigger(pipe_ctx->stream_res.tg); 4664 } 4665 4666 current_stream_mask = get_stream_mask(dc, context); 4667 if (current_stream_mask != context->stream_mask) { 4668 context->stream_mask = current_stream_mask; 4669 dc_dmub_srv_notify_stream_mask(dc->ctx->dmub_srv, current_stream_mask); 4670 } 4671 } 4672 4673 /** 4674 * could_mpcc_tree_change_for_active_pipes - Check if an OPP associated with MPCC might change 4675 * 4676 * @dc: Used to get the current state status 4677 * @stream: Target stream, which we want to remove the attached planes 4678 * @srf_updates: Array of surface updates 4679 * @surface_count: Number of surface update 4680 * @is_plane_addition: [in] Fill out with true if it is a plane addition case 4681 * 4682 * DCN32x and newer support a feature named Dynamic ODM which can conflict with 4683 * the MPO if used simultaneously in some specific configurations (e.g., 4684 * 4k@144). This function checks if the incoming context requires applying a 4685 * transition state with unnecessary pipe splitting and ODM disabled to 4686 * circumvent our hardware limitations to prevent this edge case. If the OPP 4687 * associated with an MPCC might change due to plane additions, this function 4688 * returns true. 4689 * 4690 * Return: 4691 * Return true if OPP and MPCC might change, otherwise, return false. 4692 */ 4693 static bool could_mpcc_tree_change_for_active_pipes(struct dc *dc, 4694 struct dc_stream_state *stream, 4695 struct dc_surface_update *srf_updates, 4696 int surface_count, 4697 bool *is_plane_addition) 4698 { 4699 (void)srf_updates; 4700 4701 struct dc_stream_status *cur_stream_status = stream_get_status(dc->current_state, stream); 4702 bool force_minimal_pipe_splitting = false; 4703 bool subvp_active = false; 4704 uint32_t i; 4705 4706 *is_plane_addition = false; 4707 4708 if (cur_stream_status && 4709 dc->current_state->stream_count > 0 && 4710 dc->debug.pipe_split_policy != MPC_SPLIT_AVOID) { 4711 /* determine if minimal transition is required due to MPC*/ 4712 if (surface_count > 0) { 4713 if (cur_stream_status->plane_count > surface_count) { 4714 force_minimal_pipe_splitting = true; 4715 } else if (cur_stream_status->plane_count < surface_count) { 4716 force_minimal_pipe_splitting = true; 4717 *is_plane_addition = true; 4718 } 4719 } 4720 } 4721 4722 if (cur_stream_status && 4723 dc->current_state->stream_count == 1 && 4724 dc->debug.enable_single_display_2to1_odm_policy) { 4725 /* determine if minimal transition is required due to dynamic ODM*/ 4726 if (surface_count > 0) { 4727 if (cur_stream_status->plane_count > 2 && cur_stream_status->plane_count > surface_count) { 4728 force_minimal_pipe_splitting = true; 4729 } else if (surface_count > 2 && cur_stream_status->plane_count < surface_count) { 4730 force_minimal_pipe_splitting = true; 4731 *is_plane_addition = true; 4732 } 4733 } 4734 } 4735 4736 for (i = 0; i < dc->res_pool->pipe_count; i++) { 4737 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 4738 4739 if (dc_state_get_pipe_subvp_type(dc->current_state, pipe) != SUBVP_NONE) { 4740 subvp_active = true; 4741 break; 4742 } 4743 } 4744 4745 /* For SubVP when adding or removing planes we need to add a minimal transition 4746 * (even when disabling all planes). Whenever disabling a phantom pipe, we 4747 * must use the minimal transition path to disable the pipe correctly. 4748 * 4749 * We want to use the minimal transition whenever subvp is active, not only if 4750 * a plane is being added / removed from a subvp stream (MPO plane can be added 4751 * to a DRR pipe of SubVP + DRR config, in which case we still want to run through 4752 * a min transition to disable subvp. 4753 */ 4754 if (cur_stream_status && subvp_active) { 4755 /* determine if minimal transition is required due to SubVP*/ 4756 if (cur_stream_status->plane_count > surface_count) { 4757 force_minimal_pipe_splitting = true; 4758 } else if (cur_stream_status->plane_count < surface_count) { 4759 force_minimal_pipe_splitting = true; 4760 *is_plane_addition = true; 4761 } 4762 } 4763 4764 return force_minimal_pipe_splitting; 4765 } 4766 4767 4768 static void release_minimal_transition_state(struct dc *dc, 4769 struct dc_state *minimal_transition_context, 4770 struct dc_state *base_context, 4771 struct pipe_split_policy_backup *policy) 4772 { 4773 restore_minimal_pipe_split_policy(dc, base_context, policy); 4774 dc_state_release(minimal_transition_context); 4775 } 4776 4777 static void force_vsync_flip_in_minimal_transition_context(struct dc_state *context) 4778 { 4779 uint8_t i; 4780 int j; 4781 struct dc_stream_status *stream_status; 4782 4783 for (i = 0; i < context->stream_count; i++) { 4784 stream_status = &context->stream_status[i]; 4785 4786 for (j = 0; j < stream_status->plane_count; j++) 4787 stream_status->plane_states[j]->flip_immediate = false; 4788 } 4789 } 4790 4791 static struct dc_state *create_minimal_transition_state(struct dc *dc, 4792 struct dc_state *base_context, struct pipe_split_policy_backup *policy) 4793 { 4794 struct dc_state *minimal_transition_context = NULL; 4795 4796 minimal_transition_context = dc_state_create_copy(base_context); 4797 if (!minimal_transition_context) 4798 return NULL; 4799 4800 backup_and_set_minimal_pipe_split_policy(dc, base_context, policy); 4801 /* commit minimal state */ 4802 if (dc->res_pool->funcs->validate_bandwidth(dc, minimal_transition_context, 4803 DC_VALIDATE_MODE_AND_PROGRAMMING) == DC_OK) { 4804 /* prevent underflow and corruption when reconfiguring pipes */ 4805 force_vsync_flip_in_minimal_transition_context(minimal_transition_context); 4806 } else { 4807 /* 4808 * This should never happen, minimal transition state should 4809 * always be validated first before adding pipe split features. 4810 */ 4811 release_minimal_transition_state(dc, minimal_transition_context, base_context, policy); 4812 BREAK_TO_DEBUGGER(); 4813 minimal_transition_context = NULL; 4814 } 4815 return minimal_transition_context; 4816 } 4817 4818 static bool is_pipe_topology_transition_seamless_with_intermediate_step( 4819 struct dc *dc, 4820 struct dc_state *initial_state, 4821 struct dc_state *intermediate_state, 4822 struct dc_state *final_state) 4823 { 4824 return dc->hwss.is_pipe_topology_transition_seamless(dc, initial_state, 4825 intermediate_state) && 4826 dc->hwss.is_pipe_topology_transition_seamless(dc, 4827 intermediate_state, final_state); 4828 } 4829 4830 static void swap_and_release_current_context(struct dc *dc, 4831 struct dc_state *new_context, struct dc_stream_state *stream) 4832 { 4833 4834 int i; 4835 struct dc_state *old = dc->current_state; 4836 struct pipe_ctx *pipe_ctx; 4837 4838 /* Since memory free requires elevated IRQ, an interrupt 4839 * request is generated by mem free. If this happens 4840 * between freeing and reassigning the context, our vsync 4841 * interrupt will call into dc and cause a memory 4842 * corruption. Hence, we first reassign the context, 4843 * then free the old context. 4844 */ 4845 dc->current_state = new_context; 4846 dc_state_release(old); 4847 4848 // clear any forced full updates 4849 for (i = 0; i < dc->res_pool->pipe_count; i++) { 4850 pipe_ctx = &new_context->res_ctx.pipe_ctx[i]; 4851 4852 if (pipe_ctx->plane_state && pipe_ctx->stream == stream) 4853 pipe_ctx->plane_state->force_full_update = false; 4854 } 4855 } 4856 4857 static int initialize_empty_surface_updates( 4858 struct dc_stream_state *stream, 4859 struct dc_surface_update *srf_updates) 4860 { 4861 struct dc_stream_status *status = dc_stream_get_status(stream); 4862 int i; 4863 4864 if (!status) 4865 return 0; 4866 4867 for (i = 0; i < status->plane_count; i++) 4868 srf_updates[i].surface = status->plane_states[i]; 4869 4870 return status->plane_count; 4871 } 4872 4873 static bool commit_minimal_transition_based_on_new_context(struct dc *dc, 4874 struct dc_state *new_context, 4875 struct dc_stream_state *stream, 4876 struct dc_stream_update *stream_update, 4877 struct dc_surface_update *srf_updates, 4878 int surface_count) 4879 { 4880 bool success = false; 4881 struct pipe_split_policy_backup policy; 4882 struct dc_state *intermediate_context = 4883 create_minimal_transition_state(dc, new_context, 4884 &policy); 4885 4886 if (intermediate_context) { 4887 if (is_pipe_topology_transition_seamless_with_intermediate_step( 4888 dc, 4889 dc->current_state, 4890 intermediate_context, 4891 new_context)) { 4892 DC_LOG_DC("commit minimal transition state: base = new state\n"); 4893 commit_planes_for_stream(dc, srf_updates, 4894 surface_count, stream, stream_update, 4895 UPDATE_TYPE_FULL, intermediate_context); 4896 swap_and_release_current_context( 4897 dc, intermediate_context, stream); 4898 dc_state_retain(dc->current_state); 4899 success = true; 4900 } 4901 release_minimal_transition_state( 4902 dc, intermediate_context, new_context, &policy); 4903 } 4904 return success; 4905 } 4906 4907 static bool commit_minimal_transition_based_on_current_context(struct dc *dc, 4908 struct dc_state *new_context, struct dc_stream_state *stream) 4909 { 4910 bool success = false; 4911 struct pipe_split_policy_backup policy; 4912 struct dc_state *intermediate_context; 4913 struct dc_state *old_current_state = dc->current_state; 4914 struct dc_surface_update srf_updates[MAX_SURFACES] = {0}; 4915 int surface_count; 4916 4917 /* 4918 * Both current and new contexts share the same stream and plane state 4919 * pointers. When new context is validated, stream and planes get 4920 * populated with new updates such as new plane addresses. This makes 4921 * the current context no longer valid because stream and planes are 4922 * modified from the original. We backup current stream and plane states 4923 * into scratch space whenever we are populating new context. So we can 4924 * restore the original values back by calling the restore function now. 4925 * This restores back the original stream and plane states associated 4926 * with the current state. 4927 */ 4928 restore_planes_and_stream_state(&dc->scratch.current_state, stream); 4929 dc_state_retain(old_current_state); 4930 intermediate_context = create_minimal_transition_state(dc, 4931 old_current_state, &policy); 4932 4933 if (intermediate_context) { 4934 if (is_pipe_topology_transition_seamless_with_intermediate_step( 4935 dc, 4936 dc->current_state, 4937 intermediate_context, 4938 new_context)) { 4939 DC_LOG_DC("commit minimal transition state: base = current state\n"); 4940 surface_count = initialize_empty_surface_updates( 4941 stream, srf_updates); 4942 commit_planes_for_stream(dc, srf_updates, 4943 surface_count, stream, NULL, 4944 UPDATE_TYPE_FULL, intermediate_context); 4945 swap_and_release_current_context( 4946 dc, intermediate_context, stream); 4947 dc_state_retain(dc->current_state); 4948 success = true; 4949 } 4950 release_minimal_transition_state(dc, intermediate_context, 4951 old_current_state, &policy); 4952 } 4953 dc_state_release(old_current_state); 4954 /* 4955 * Restore stream and plane states back to the values associated with 4956 * new context. 4957 */ 4958 restore_planes_and_stream_state(&dc->scratch.new_state, stream); 4959 return success; 4960 } 4961 4962 /** 4963 * commit_minimal_transition_state_in_dc_update - Commit a minimal state based 4964 * on current or new context 4965 * 4966 * @dc: DC structure, used to get the current state 4967 * @new_context: New context 4968 * @stream: Stream getting the update for the flip 4969 * @srf_updates: Surface updates 4970 * @surface_count: Number of surfaces 4971 * 4972 * The function takes in current state and new state and determine a minimal 4973 * transition state as the intermediate step which could make the transition 4974 * between current and new states seamless. If found, it will commit the minimal 4975 * transition state and update current state to this minimal transition state 4976 * and return true, if not, it will return false. 4977 * 4978 * Return: 4979 * Return True if the minimal transition succeeded, false otherwise 4980 */ 4981 static bool commit_minimal_transition_state_in_dc_update(struct dc *dc, 4982 struct dc_state *new_context, 4983 struct dc_stream_state *stream, 4984 struct dc_surface_update *srf_updates, 4985 int surface_count) 4986 { 4987 bool success = commit_minimal_transition_based_on_new_context( 4988 dc, new_context, stream, NULL, 4989 srf_updates, surface_count); 4990 if (!success) 4991 success = commit_minimal_transition_based_on_current_context(dc, 4992 new_context, stream); 4993 if (!success) 4994 DC_LOG_ERROR("Fail to commit a seamless minimal transition state between current and new states.\nThis pipe topology update is non-seamless!\n"); 4995 return success; 4996 } 4997 4998 /** 4999 * commit_minimal_transition_state - Create a transition pipe split state 5000 * 5001 * @dc: Used to get the current state status 5002 * @transition_base_context: New transition state 5003 * 5004 * In some specific configurations, such as pipe split on multi-display with 5005 * MPO and/or Dynamic ODM, removing a plane may cause unsupported pipe 5006 * programming when moving to new planes. To mitigate those types of problems, 5007 * this function adds a transition state that minimizes pipe usage before 5008 * programming the new configuration. When adding a new plane, the current 5009 * state requires the least pipes, so it is applied without splitting. When 5010 * removing a plane, the new state requires the least pipes, so it is applied 5011 * without splitting. 5012 * 5013 * Return: 5014 * Return false if something is wrong in the transition state. 5015 */ 5016 static bool commit_minimal_transition_state(struct dc *dc, 5017 struct dc_state *transition_base_context) 5018 { 5019 struct dc_state *transition_context; 5020 struct pipe_split_policy_backup policy; 5021 enum dc_status ret = DC_ERROR_UNEXPECTED; 5022 unsigned int i, j; 5023 unsigned int pipe_in_use = 0; 5024 bool subvp_in_use = false; 5025 bool odm_in_use = false; 5026 5027 /* check current pipes in use*/ 5028 for (i = 0; i < dc->res_pool->pipe_count; i++) { 5029 struct pipe_ctx *pipe = &transition_base_context->res_ctx.pipe_ctx[i]; 5030 5031 if (pipe->plane_state) 5032 pipe_in_use++; 5033 } 5034 5035 /* If SubVP is enabled and we are adding or removing planes from any main subvp 5036 * pipe, we must use the minimal transition. 5037 */ 5038 for (i = 0; i < dc->res_pool->pipe_count; i++) { 5039 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 5040 5041 if (pipe->stream && dc_state_get_pipe_subvp_type(dc->current_state, pipe) == SUBVP_PHANTOM) { 5042 subvp_in_use = true; 5043 break; 5044 } 5045 } 5046 5047 /* If ODM is enabled and we are adding or removing planes from any ODM 5048 * pipe, we must use the minimal transition. 5049 */ 5050 for (i = 0; i < dc->res_pool->pipe_count; i++) { 5051 struct pipe_ctx *pipe = &transition_base_context->res_ctx.pipe_ctx[i]; 5052 5053 if (resource_is_pipe_type(pipe, OTG_MASTER)) { 5054 odm_in_use = resource_get_odm_slice_count(pipe) > 1; 5055 break; 5056 } 5057 } 5058 5059 /* When the OS add a new surface if we have been used all of pipes with odm combine 5060 * and mpc split feature, it need use commit_minimal_transition_state to transition safely. 5061 * After OS exit MPO, it will back to use odm and mpc split with all of pipes, we need 5062 * call it again. Otherwise return true to skip. 5063 * 5064 * Reduce the scenarios to use dc_commit_state_no_check in the stage of flip. Especially 5065 * enter/exit MPO when DCN still have enough resources. 5066 */ 5067 if (pipe_in_use != dc->res_pool->pipe_count && !subvp_in_use && !odm_in_use) 5068 return true; 5069 5070 DC_LOG_DC("%s base = %s state, reason = %s\n", __func__, 5071 dc->current_state == transition_base_context ? "current" : "new", 5072 subvp_in_use ? "Subvp In Use" : 5073 odm_in_use ? "ODM in Use" : 5074 dc->debug.pipe_split_policy != MPC_SPLIT_AVOID ? "MPC in Use" : 5075 "Unknown"); 5076 5077 dc_state_retain(transition_base_context); 5078 transition_context = create_minimal_transition_state(dc, 5079 transition_base_context, &policy); 5080 if (transition_context) { 5081 ret = dc_commit_state_no_check(dc, transition_context); 5082 release_minimal_transition_state(dc, transition_context, transition_base_context, &policy); 5083 } 5084 dc_state_release(transition_base_context); 5085 5086 if (ret != DC_OK) { 5087 /* this should never happen */ 5088 BREAK_TO_DEBUGGER(); 5089 return false; 5090 } 5091 5092 /* force full surface update */ 5093 for (i = 0; i < dc->current_state->stream_count; i++) { 5094 for (j = 0; j < dc->current_state->stream_status[i].plane_count; j++) { 5095 dc->current_state->stream_status[i].plane_states[j]->update_flags.raw = 0xFFFFFFFF; 5096 } 5097 } 5098 5099 return true; 5100 } 5101 5102 void populate_fast_updates(struct dc_fast_update *fast_update, 5103 struct dc_surface_update *srf_updates, 5104 int surface_count, 5105 struct dc_stream_update *stream_update) 5106 { 5107 int i = 0; 5108 5109 if (stream_update) { 5110 fast_update[0].out_transfer_func = stream_update->out_transfer_func; 5111 fast_update[0].output_csc_transform = stream_update->output_csc_transform; 5112 } else { 5113 fast_update[0].out_transfer_func = NULL; 5114 fast_update[0].output_csc_transform = NULL; 5115 } 5116 5117 for (i = 0; i < surface_count; i++) { 5118 fast_update[i].flip_addr = srf_updates[i].flip_addr; 5119 fast_update[i].gamma = srf_updates[i].gamma; 5120 fast_update[i].gamut_remap_matrix = srf_updates[i].gamut_remap_matrix; 5121 fast_update[i].input_csc_color_matrix = srf_updates[i].input_csc_color_matrix; 5122 fast_update[i].coeff_reduction_factor = srf_updates[i].coeff_reduction_factor; 5123 fast_update[i].cursor_csc_color_matrix = srf_updates[i].cursor_csc_color_matrix; 5124 #if defined(CONFIG_DRM_AMD_DC_DCN4_2) 5125 fast_update[i].cm_hist_control = srf_updates[i].cm_hist_control; 5126 #endif 5127 } 5128 } 5129 5130 static bool fast_updates_exist(const struct dc_fast_update *fast_update, int surface_count) 5131 { 5132 int i; 5133 5134 if (fast_update[0].out_transfer_func || 5135 fast_update[0].output_csc_transform) 5136 return true; 5137 5138 for (i = 0; i < surface_count; i++) { 5139 if (fast_update[i].flip_addr || 5140 fast_update[i].gamma || 5141 fast_update[i].gamut_remap_matrix || 5142 fast_update[i].input_csc_color_matrix || 5143 fast_update[i].cursor_csc_color_matrix || 5144 #if defined(CONFIG_DRM_AMD_DC_DCN4_2) 5145 fast_update[i].cm_hist_control || 5146 #endif 5147 fast_update[i].coeff_reduction_factor) 5148 return true; 5149 } 5150 5151 return false; 5152 } 5153 5154 bool fast_nonaddr_updates_exist(struct dc_fast_update *fast_update, int surface_count) 5155 { 5156 int i; 5157 5158 if (fast_update[0].out_transfer_func || 5159 fast_update[0].output_csc_transform) 5160 return true; 5161 5162 for (i = 0; i < surface_count; i++) { 5163 if (fast_update[i].input_csc_color_matrix || 5164 fast_update[i].gamma || 5165 fast_update[i].gamut_remap_matrix || 5166 fast_update[i].coeff_reduction_factor || 5167 #if defined(CONFIG_DRM_AMD_DC_DCN4_2) 5168 fast_update[i].cm_hist_control || 5169 #endif 5170 fast_update[i].cursor_csc_color_matrix) 5171 return true; 5172 } 5173 5174 return false; 5175 } 5176 5177 static bool full_update_required_weak( 5178 const struct dc *dc, 5179 const struct dc_surface_update *srf_updates, 5180 int surface_count, 5181 const struct dc_stream_update *stream_update, 5182 const struct dc_stream_state *stream) 5183 { 5184 (void)stream_update; 5185 const struct dc_state *context = dc->current_state; 5186 if (srf_updates) 5187 for (int i = 0; i < surface_count; i++) 5188 if (!is_surface_in_context(context, srf_updates[i].surface)) 5189 return true; 5190 5191 if (stream) { 5192 const struct dc_stream_status *stream_status = dc_stream_get_status_const(stream); 5193 if (stream_status == NULL || stream_status->plane_count != surface_count) 5194 return true; 5195 } 5196 if (dc->idle_optimizations_allowed) 5197 return true; 5198 5199 if (dc_can_clear_cursor_limit(dc)) 5200 return true; 5201 5202 return false; 5203 } 5204 5205 static bool full_update_required( 5206 const struct dc *dc, 5207 const struct dc_surface_update *srf_updates, 5208 int surface_count, 5209 const struct dc_stream_update *stream_update, 5210 const struct dc_stream_state *stream) 5211 { 5212 const union dc_plane_cm_flags blend_only_flags = { 5213 .bits = { 5214 .blend_enable = 1, 5215 } 5216 }; 5217 5218 if (full_update_required_weak(dc, srf_updates, surface_count, stream_update, stream)) 5219 return true; 5220 5221 for (int i = 0; i < surface_count; i++) { 5222 if (srf_updates && 5223 (srf_updates[i].plane_info || 5224 srf_updates[i].scaling_info || 5225 (srf_updates[i].hdr_mult.value && 5226 srf_updates[i].hdr_mult.value != srf_updates->surface->hdr_mult.value) || 5227 (srf_updates[i].sdr_white_level_nits && 5228 srf_updates[i].sdr_white_level_nits != srf_updates->surface->sdr_white_level_nits) || 5229 srf_updates[i].in_transfer_func || 5230 srf_updates[i].surface->force_full_update || 5231 (srf_updates[i].flip_addr && 5232 srf_updates[i].flip_addr->address.tmz_surface != srf_updates[i].surface->address.tmz_surface) || 5233 (srf_updates[i].cm && 5234 ((srf_updates[i].cm->flags.all != blend_only_flags.all && srf_updates[i].cm->flags.all != 0) || 5235 (srf_updates[i].surface->cm.flags.all != blend_only_flags.all && srf_updates[i].surface->cm.flags.all != 0))))) 5236 return true; 5237 } 5238 5239 if (stream_update && 5240 (((stream_update->src.height != 0 && stream_update->src.width != 0) || 5241 (stream_update->dst.height != 0 && stream_update->dst.width != 0) || 5242 stream_update->integer_scaling_update) || 5243 stream_update->hdr_static_metadata || 5244 stream_update->abm_level || 5245 stream_update->periodic_interrupt || 5246 stream_update->vrr_infopacket || 5247 stream_update->vsc_infopacket || 5248 stream_update->vsp_infopacket || 5249 stream_update->hfvsif_infopacket || 5250 stream_update->vtem_infopacket || 5251 stream_update->adaptive_sync_infopacket || 5252 stream_update->avi_infopacket || 5253 stream_update->dpms_off || 5254 stream_update->allow_freesync || 5255 stream_update->vrr_active_variable || 5256 stream_update->vrr_active_fixed || 5257 stream_update->gamut_remap || 5258 stream_update->output_color_space || 5259 stream_update->dither_option || 5260 stream_update->wb_update || 5261 stream_update->dsc_config || 5262 stream_update->mst_bw_update || 5263 stream_update->func_shaper || 5264 stream_update->lut3d_func || 5265 stream_update->pending_test_pattern || 5266 stream_update->crtc_timing_adjust || 5267 stream_update->scaler_sharpener_update || 5268 stream_update->hw_cursor_req)) 5269 return true; 5270 5271 return false; 5272 } 5273 5274 static bool fast_update_only( 5275 const struct dc *dc, 5276 const struct dc_fast_update *fast_update, 5277 const struct dc_surface_update *srf_updates, 5278 int surface_count, 5279 const struct dc_stream_update *stream_update, 5280 const struct dc_stream_state *stream) 5281 { 5282 return fast_updates_exist(fast_update, surface_count) 5283 && !full_update_required(dc, srf_updates, surface_count, stream_update, stream); 5284 } 5285 5286 static bool update_planes_and_stream_v2(struct dc *dc, 5287 struct dc_surface_update *srf_updates, int surface_count, 5288 struct dc_stream_state *stream, 5289 struct dc_stream_update *stream_update) 5290 { 5291 struct dc_state *context; 5292 enum surface_update_type update_type; 5293 struct dc_fast_update fast_update[MAX_SURFACES] = {0}; 5294 5295 /* In cases where MPO and split or ODM are used transitions can 5296 * cause underflow. Apply stream configuration with minimal pipe 5297 * split first to avoid unsupported transitions for active pipes. 5298 */ 5299 bool force_minimal_pipe_splitting = 0; 5300 bool is_plane_addition = 0; 5301 bool is_fast_update_only; 5302 5303 populate_fast_updates(fast_update, srf_updates, surface_count, stream_update); 5304 is_fast_update_only = fast_update_only(dc, fast_update, srf_updates, 5305 surface_count, stream_update, stream); 5306 force_minimal_pipe_splitting = could_mpcc_tree_change_for_active_pipes( 5307 dc, 5308 stream, 5309 srf_updates, 5310 surface_count, 5311 &is_plane_addition); 5312 5313 /* on plane addition, minimal state is the current one */ 5314 if (force_minimal_pipe_splitting && is_plane_addition && 5315 !commit_minimal_transition_state(dc, dc->current_state)) 5316 return false; 5317 5318 if (!update_planes_and_stream_state( 5319 dc, 5320 srf_updates, 5321 surface_count, 5322 stream, 5323 stream_update, 5324 &update_type, 5325 &context)) 5326 return false; 5327 5328 /* on plane removal, minimal state is the new one */ 5329 if (force_minimal_pipe_splitting && !is_plane_addition) { 5330 if (!commit_minimal_transition_state(dc, context)) { 5331 dc_state_release(context); 5332 return false; 5333 } 5334 update_type = UPDATE_TYPE_FULL; 5335 } 5336 5337 if (dc->hwss.is_pipe_topology_transition_seamless && 5338 !dc->hwss.is_pipe_topology_transition_seamless( 5339 dc, dc->current_state, context)) 5340 commit_minimal_transition_state_in_dc_update(dc, context, stream, 5341 srf_updates, surface_count); 5342 5343 if (is_fast_update_only && !dc->check_config.enable_legacy_fast_update) { 5344 commit_planes_for_stream_fast(dc, 5345 srf_updates, 5346 surface_count, 5347 stream, 5348 stream_update, 5349 update_type, 5350 context); 5351 } else { 5352 if (!stream_update && 5353 dc->hwss.is_pipe_topology_transition_seamless && 5354 !dc->hwss.is_pipe_topology_transition_seamless( 5355 dc, dc->current_state, context)) { 5356 DC_LOG_ERROR("performing non-seamless pipe topology transition with surface only update!\n"); 5357 BREAK_TO_DEBUGGER(); 5358 } 5359 commit_planes_for_stream( 5360 dc, 5361 srf_updates, 5362 surface_count, 5363 stream, 5364 stream_update, 5365 update_type, 5366 context); 5367 } 5368 if (dc->current_state != context) 5369 swap_and_release_current_context(dc, context, stream); 5370 return true; 5371 } 5372 5373 static void commit_planes_and_stream_update_on_current_context(struct dc *dc, 5374 struct dc_surface_update *srf_updates, int surface_count, 5375 struct dc_stream_state *stream, 5376 struct dc_stream_update *stream_update, 5377 enum surface_update_type update_type) 5378 { 5379 struct dc_fast_update fast_update[MAX_SURFACES] = {0}; 5380 5381 ASSERT(update_type < UPDATE_TYPE_FULL); 5382 populate_fast_updates(fast_update, srf_updates, surface_count, 5383 stream_update); 5384 if (fast_update_only(dc, fast_update, srf_updates, surface_count, 5385 stream_update, stream) && 5386 !dc->check_config.enable_legacy_fast_update) 5387 commit_planes_for_stream_fast(dc, 5388 srf_updates, 5389 surface_count, 5390 stream, 5391 stream_update, 5392 update_type, 5393 dc->current_state); 5394 else 5395 commit_planes_for_stream( 5396 dc, 5397 srf_updates, 5398 surface_count, 5399 stream, 5400 stream_update, 5401 update_type, 5402 dc->current_state); 5403 } 5404 5405 static void commit_planes_and_stream_update_with_new_context(struct dc *dc, 5406 struct dc_surface_update *srf_updates, int surface_count, 5407 struct dc_stream_state *stream, 5408 struct dc_stream_update *stream_update, 5409 enum surface_update_type update_type, 5410 struct dc_state *new_context) 5411 { 5412 bool skip_new_context = false; 5413 ASSERT(update_type >= UPDATE_TYPE_FULL); 5414 /* 5415 * It is required by the feature design that all pipe topologies 5416 * using extra free pipes for power saving purposes such as 5417 * dynamic ODM or SubVp shall only be enabled when it can be 5418 * transitioned seamlessly to AND from its minimal transition 5419 * state. A minimal transition state is defined as the same dc 5420 * state but with all power saving features disabled. So it uses 5421 * the minimum pipe topology. When we can't seamlessly 5422 * transition from state A to state B, we will insert the 5423 * minimal transition state A' or B' in between so seamless 5424 * transition between A and B can be made possible. 5425 * 5426 * To optimize for the time it takes to execute flips, 5427 * the transition from the minimal state to the final state is 5428 * deferred until a steady state (no more transitions) is reached. 5429 */ 5430 if (!dc->hwss.is_pipe_topology_transition_seamless(dc, dc->current_state, new_context)) { 5431 if (!dc->debug.disable_deferred_minimal_transitions) { 5432 dc->check_config.deferred_transition_state = true; 5433 dc->check_config.transition_countdown_to_steady_state = 5434 dc->debug.num_fast_flips_to_steady_state_override ? 5435 dc->debug.num_fast_flips_to_steady_state_override : 5436 NUM_FAST_FLIPS_TO_STEADY_STATE; 5437 5438 if (commit_minimal_transition_based_on_new_context(dc, new_context, stream, stream_update, 5439 srf_updates, surface_count)) { 5440 skip_new_context = true; 5441 dc_state_release(new_context); 5442 new_context = dc->current_state; 5443 } else { 5444 /* 5445 * In this case a new mpo plane is being enabled on pipes that were 5446 * previously in use, and the surface update to the existing plane 5447 * includes an alpha box where the new plane will be, so the update 5448 * from minimal to final cannot be deferred as the alpha box would 5449 * be visible to the user 5450 */ 5451 commit_minimal_transition_based_on_current_context(dc, new_context, stream); 5452 } 5453 } else { 5454 commit_minimal_transition_state_in_dc_update(dc, new_context, stream, 5455 srf_updates, surface_count); 5456 } 5457 } else if (dc->check_config.deferred_transition_state) { 5458 /* reset countdown as steady state not reached */ 5459 dc->check_config.transition_countdown_to_steady_state = 5460 dc->debug.num_fast_flips_to_steady_state_override ? 5461 dc->debug.num_fast_flips_to_steady_state_override : 5462 NUM_FAST_FLIPS_TO_STEADY_STATE; 5463 } 5464 5465 if (!skip_new_context) { 5466 commit_planes_for_stream(dc, srf_updates, surface_count, stream, stream_update, update_type, new_context); 5467 swap_and_release_current_context(dc, new_context, stream); 5468 } 5469 } 5470 5471 static bool update_planes_and_stream_v3(struct dc *dc, 5472 struct dc_surface_update *srf_updates, int surface_count, 5473 struct dc_stream_state *stream, 5474 struct dc_stream_update *stream_update) 5475 { 5476 struct dc_state *new_context; 5477 enum surface_update_type update_type; 5478 5479 /* 5480 * When this function returns true and new_context is not equal to 5481 * current state, the function allocates and validates a new dc state 5482 * and assigns it to new_context. The function expects that the caller 5483 * is responsible to free this memory when new_context is no longer 5484 * used. We swap current with new context and free current instead. So 5485 * new_context's memory will live until the next full update after it is 5486 * replaced by a newer context. Refer to the use of 5487 * swap_and_free_current_context below. 5488 */ 5489 if (!update_planes_and_stream_state(dc, srf_updates, surface_count, 5490 stream, stream_update, &update_type, 5491 &new_context)) 5492 return false; 5493 5494 if (new_context == dc->current_state) { 5495 commit_planes_and_stream_update_on_current_context(dc, 5496 srf_updates, surface_count, stream, 5497 stream_update, update_type); 5498 5499 if (dc->check_config.transition_countdown_to_steady_state) 5500 dc->check_config.transition_countdown_to_steady_state--; 5501 } else { 5502 commit_planes_and_stream_update_with_new_context(dc, 5503 srf_updates, surface_count, stream, 5504 stream_update, update_type, new_context); 5505 } 5506 5507 return true; 5508 } 5509 5510 static void clear_update_flags(struct dc_surface_update *srf_updates, 5511 int surface_count, struct dc_stream_state *stream) 5512 { 5513 int i; 5514 5515 if (stream) 5516 stream->update_flags.raw = 0; 5517 5518 for (i = 0; i < surface_count; i++) 5519 if (srf_updates[i].surface) 5520 srf_updates[i].surface->update_flags.raw = 0; 5521 } 5522 5523 bool dc_update_planes_and_stream(struct dc *dc, 5524 struct dc_surface_update *srf_updates, int surface_count, 5525 struct dc_stream_state *stream, 5526 struct dc_stream_update *stream_update) 5527 { 5528 struct dc_update_scratch_space *scratch = dc_update_planes_and_stream_init( 5529 dc, 5530 srf_updates, 5531 surface_count, 5532 stream, 5533 stream_update 5534 ); 5535 bool more = true; 5536 5537 while (more) { 5538 if (!dc_update_planes_and_stream_prepare(scratch)) 5539 return false; 5540 5541 dc_update_planes_and_stream_execute(scratch); 5542 more = dc_update_planes_and_stream_cleanup(scratch); 5543 } 5544 return true; 5545 } 5546 5547 void dc_commit_updates_for_stream(struct dc *dc, 5548 struct dc_surface_update *srf_updates, 5549 int surface_count, 5550 struct dc_stream_state *stream, 5551 struct dc_stream_update *stream_update, 5552 struct dc_state *state) 5553 { 5554 (void)state; 5555 bool ret = false; 5556 5557 dc_exit_ips_for_hw_access(dc); 5558 /* TODO: Since change commit sequence can have a huge impact, 5559 * we decided to only enable it for DCN3x. However, as soon as 5560 * we get more confident about this change we'll need to enable 5561 * the new sequence for all ASICs. 5562 */ 5563 if (dc->ctx->dce_version >= DCN_VERSION_4_01) { 5564 ret = update_planes_and_stream_v3(dc, srf_updates, surface_count, 5565 stream, stream_update); 5566 } else { 5567 ret = update_planes_and_stream_v2(dc, srf_updates, surface_count, 5568 stream, stream_update); 5569 } 5570 5571 if (ret && dc->ctx->dce_version >= DCN_VERSION_3_2) 5572 clear_update_flags(srf_updates, surface_count, stream); 5573 } 5574 5575 uint8_t dc_get_current_stream_count(struct dc *dc) 5576 { 5577 return dc->current_state->stream_count; 5578 } 5579 5580 struct dc_stream_state *dc_get_stream_at_index(struct dc *dc, uint8_t i) 5581 { 5582 if (i < dc->current_state->stream_count) 5583 return dc->current_state->streams[i]; 5584 return NULL; 5585 } 5586 5587 enum dc_irq_source dc_interrupt_to_irq_source( 5588 struct dc *dc, 5589 uint32_t src_id, 5590 uint32_t ext_id) 5591 { 5592 return dal_irq_service_to_irq_source(dc->res_pool->irqs, src_id, ext_id); 5593 } 5594 5595 /* 5596 * dc_interrupt_set() - Enable/disable an AMD hw interrupt source 5597 */ 5598 bool dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable) 5599 { 5600 5601 if (dc == NULL) 5602 return false; 5603 5604 return dal_irq_service_set(dc->res_pool->irqs, src, enable); 5605 } 5606 5607 void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src) 5608 { 5609 dal_irq_service_ack(dc->res_pool->irqs, src); 5610 } 5611 5612 void dc_power_down_on_boot(struct dc *dc) 5613 { 5614 if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW && 5615 dc->hwss.power_down_on_boot) { 5616 if (dc->current_state->stream_count > 0) 5617 return; 5618 5619 if (dc->caps.ips_support) 5620 dc_exit_ips_for_hw_access(dc); 5621 dc->hwss.power_down_on_boot(dc); 5622 } 5623 } 5624 5625 void dc_set_power_state(struct dc *dc, enum dc_acpi_cm_power_state power_state) 5626 { 5627 if (!dc->current_state) 5628 return; 5629 5630 dc_exit_ips_for_hw_access(dc); 5631 5632 switch (power_state) { 5633 case DC_ACPI_CM_POWER_STATE_D0: 5634 dc_state_construct(dc, dc->current_state); 5635 5636 dc_z10_restore(dc); 5637 5638 dc_dmub_srv_notify_fw_dc_power_state(dc->ctx->dmub_srv, power_state); 5639 5640 dc->hwss.init_hw(dc); 5641 5642 if (dc->hwss.init_sys_ctx != NULL && 5643 dc->vm_pa_config.valid) { 5644 dc->hwss.init_sys_ctx(dc->hwseq, dc, &dc->vm_pa_config); 5645 } 5646 break; 5647 case DC_ACPI_CM_POWER_STATE_D3: 5648 if (dc->caps.ips_support) 5649 dc_dmub_srv_notify_fw_dc_power_state(dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D3); 5650 5651 if (dc->caps.ips_v2_support) { 5652 if (dc->clk_mgr->funcs->set_low_power_state) 5653 dc->clk_mgr->funcs->set_low_power_state(dc->clk_mgr); 5654 } 5655 break; 5656 default: 5657 ASSERT(dc->current_state->stream_count == 0); 5658 dc_dmub_srv_notify_fw_dc_power_state(dc->ctx->dmub_srv, power_state); 5659 5660 dc_state_destruct(dc->current_state); 5661 5662 break; 5663 } 5664 } 5665 5666 void dc_resume(struct dc *dc) 5667 { 5668 uint32_t i; 5669 5670 for (i = 0; i < dc->link_count; i++) 5671 dc->link_srv->resume(dc->links[i]); 5672 } 5673 5674 bool dc_is_dmcu_initialized(struct dc *dc) 5675 { 5676 struct dmcu *dmcu = dc->res_pool->dmcu; 5677 5678 if (dmcu) 5679 return dmcu->funcs->is_dmcu_initialized(dmcu); 5680 return false; 5681 } 5682 5683 enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, uint32_t stepping) 5684 { 5685 if (dc->hwss.set_clock) 5686 return dc->hwss.set_clock(dc, clock_type, clk_khz, stepping); 5687 return DC_ERROR_UNEXPECTED; 5688 } 5689 void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg) 5690 { 5691 if (dc->hwss.get_clock) 5692 dc->hwss.get_clock(dc, clock_type, clock_cfg); 5693 } 5694 5695 /* enable/disable eDP PSR without specify stream for eDP */ 5696 bool dc_set_psr_allow_active(struct dc *dc, bool enable) 5697 { 5698 int i; 5699 bool allow_active; 5700 5701 for (i = 0; i < dc->current_state->stream_count ; i++) { 5702 struct dc_link *link; 5703 struct dc_stream_state *stream = dc->current_state->streams[i]; 5704 5705 link = stream->link; 5706 if (!link) 5707 continue; 5708 5709 if (link->psr_settings.psr_feature_enabled) { 5710 if (enable && !link->psr_settings.psr_allow_active) { 5711 allow_active = true; 5712 if (!dc_link_set_psr_allow_active(link, &allow_active, false, false, NULL)) 5713 return false; 5714 } else if (!enable && link->psr_settings.psr_allow_active) { 5715 allow_active = false; 5716 if (!dc_link_set_psr_allow_active(link, &allow_active, true, false, NULL)) 5717 return false; 5718 } 5719 } 5720 } 5721 5722 return true; 5723 } 5724 5725 /* enable/disable eDP Replay without specify stream for eDP */ 5726 bool dc_set_replay_allow_active(struct dc *dc, bool active) 5727 { 5728 int i; 5729 bool allow_active; 5730 5731 for (i = 0; i < dc->current_state->stream_count; i++) { 5732 struct dc_link *link; 5733 struct dc_stream_state *stream = dc->current_state->streams[i]; 5734 5735 link = stream->link; 5736 if (!link) 5737 continue; 5738 5739 if (link->replay_settings.replay_feature_enabled) { 5740 if (active && !link->replay_settings.replay_allow_active) { 5741 allow_active = true; 5742 if (!dc_link_set_replay_allow_active(link, &allow_active, 5743 false, false, NULL)) 5744 return false; 5745 } else if (!active && link->replay_settings.replay_allow_active) { 5746 allow_active = false; 5747 if (!dc_link_set_replay_allow_active(link, &allow_active, 5748 true, false, NULL)) 5749 return false; 5750 } 5751 } 5752 } 5753 5754 return true; 5755 } 5756 5757 /* set IPS disable state */ 5758 bool dc_set_ips_disable(struct dc *dc, unsigned int disable_ips) 5759 { 5760 dc_exit_ips_for_hw_access(dc); 5761 5762 dc->config.disable_ips = disable_ips; 5763 5764 return true; 5765 } 5766 5767 void dc_allow_idle_optimizations_internal(struct dc *dc, bool allow, char const *caller_name) 5768 { 5769 int idle_fclk_khz = 0, idle_dramclk_khz = 0, i = 0; 5770 enum mall_stream_type subvp_pipe_type[MAX_PIPES] = {0}; 5771 struct pipe_ctx *pipe = NULL; 5772 struct dc_state *context = dc->current_state; 5773 5774 if (dc->debug.disable_idle_power_optimizations) { 5775 DC_LOG_DEBUG("%s: disabled\n", __func__); 5776 return; 5777 } 5778 5779 if (allow != dc->idle_optimizations_allowed) 5780 DC_LOG_IPS("%s: allow_idle old=%d new=%d (caller=%s)\n", __func__, 5781 dc->idle_optimizations_allowed, allow, caller_name); 5782 5783 if (dc->caps.ips_support && (dc->config.disable_ips == DMUB_IPS_DISABLE_ALL)) 5784 return; 5785 5786 if (dc->clk_mgr != NULL && dc->clk_mgr->funcs->is_smu_present) 5787 if (!dc->clk_mgr->funcs->is_smu_present(dc->clk_mgr)) 5788 return; 5789 5790 if (allow == dc->idle_optimizations_allowed) 5791 return; 5792 5793 if (dc->hwss.apply_idle_power_optimizations && dc->clk_mgr != NULL && 5794 dc->hwss.apply_idle_power_optimizations(dc, allow)) { 5795 dc->idle_optimizations_allowed = allow; 5796 DC_LOG_DEBUG("%s: %s\n", __func__, allow ? "enabled" : "disabled"); 5797 } 5798 5799 // log idle clocks and sub vp pipe types at idle optimization time 5800 if (dc->clk_mgr != NULL && dc->clk_mgr->funcs->get_hard_min_fclk) 5801 idle_fclk_khz = dc->clk_mgr->funcs->get_hard_min_fclk(dc->clk_mgr); 5802 5803 if (dc->clk_mgr != NULL && dc->clk_mgr->funcs->get_hard_min_memclk) 5804 idle_dramclk_khz = dc->clk_mgr->funcs->get_hard_min_memclk(dc->clk_mgr); 5805 5806 if (dc->res_pool && context) { 5807 for (i = 0; i < dc->res_pool->pipe_count; i++) { 5808 pipe = &context->res_ctx.pipe_ctx[i]; 5809 subvp_pipe_type[i] = dc_state_get_pipe_subvp_type(context, pipe); 5810 } 5811 } 5812 if (!dc->caps.is_apu) 5813 DC_LOG_DC("%s: allow_idle=%d\n HardMinUClk_Khz=%d HardMinDramclk_Khz=%d\n Pipe_0=%d Pipe_1=%d Pipe_2=%d Pipe_3=%d Pipe_4=%d Pipe_5=%d (caller=%s)\n", 5814 __func__, allow, idle_fclk_khz, idle_dramclk_khz, subvp_pipe_type[0], subvp_pipe_type[1], subvp_pipe_type[2], 5815 subvp_pipe_type[3], subvp_pipe_type[4], subvp_pipe_type[5], caller_name); 5816 5817 } 5818 5819 void dc_exit_ips_for_hw_access_internal(struct dc *dc, const char *caller_name) 5820 { 5821 if (dc->caps.ips_support) 5822 dc_allow_idle_optimizations_internal(dc, false, caller_name); 5823 } 5824 5825 bool dc_dmub_is_ips_idle_state(struct dc *dc) 5826 { 5827 if (dc->debug.disable_idle_power_optimizations) 5828 return false; 5829 5830 if (!dc->caps.ips_support || (dc->config.disable_ips == DMUB_IPS_DISABLE_ALL)) 5831 return false; 5832 5833 if (!dc->ctx->dmub_srv) 5834 return false; 5835 5836 return dc->ctx->dmub_srv->idle_allowed; 5837 } 5838 5839 /* set min and max memory clock to lowest and highest DPM level, respectively */ 5840 void dc_unlock_memory_clock_frequency(struct dc *dc) 5841 { 5842 if (dc->clk_mgr->funcs->set_hard_min_memclk) 5843 dc->clk_mgr->funcs->set_hard_min_memclk(dc->clk_mgr, false); 5844 5845 if (dc->clk_mgr->funcs->set_hard_max_memclk) 5846 dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr); 5847 } 5848 5849 /* set min memory clock to the min required for current mode, max to maxDPM */ 5850 void dc_lock_memory_clock_frequency(struct dc *dc) 5851 { 5852 if (dc->clk_mgr->funcs->get_memclk_states_from_smu) 5853 dc->clk_mgr->funcs->get_memclk_states_from_smu(dc->clk_mgr); 5854 5855 if (dc->clk_mgr->funcs->set_hard_min_memclk) 5856 dc->clk_mgr->funcs->set_hard_min_memclk(dc->clk_mgr, true); 5857 5858 if (dc->clk_mgr->funcs->set_hard_max_memclk) 5859 dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr); 5860 } 5861 5862 static void blank_and_force_memclk(struct dc *dc, bool apply, unsigned int memclk_mhz) 5863 { 5864 (void)apply; 5865 struct dc_state *context = dc->current_state; 5866 struct hubp *hubp; 5867 struct pipe_ctx *pipe; 5868 int i; 5869 5870 for (i = 0; i < dc->res_pool->pipe_count; i++) { 5871 pipe = &context->res_ctx.pipe_ctx[i]; 5872 5873 if (pipe->stream != NULL) { 5874 dc->hwss.disable_pixel_data(dc, pipe, true); 5875 5876 // wait for double buffer 5877 pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VACTIVE); 5878 pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VBLANK); 5879 pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VACTIVE); 5880 5881 hubp = pipe->plane_res.hubp; 5882 hubp->funcs->set_blank_regs(hubp, true); 5883 } 5884 } 5885 if (dc->clk_mgr->funcs->set_max_memclk) 5886 dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, memclk_mhz); 5887 if (dc->clk_mgr->funcs->set_min_memclk) 5888 dc->clk_mgr->funcs->set_min_memclk(dc->clk_mgr, memclk_mhz); 5889 5890 for (i = 0; i < dc->res_pool->pipe_count; i++) { 5891 pipe = &context->res_ctx.pipe_ctx[i]; 5892 5893 if (pipe->stream != NULL) { 5894 dc->hwss.disable_pixel_data(dc, pipe, false); 5895 5896 hubp = pipe->plane_res.hubp; 5897 hubp->funcs->set_blank_regs(hubp, false); 5898 } 5899 } 5900 } 5901 5902 5903 /** 5904 * dc_enable_dcmode_clk_limit() - lower clocks in dc (battery) mode 5905 * @dc: pointer to dc of the dm calling this 5906 * @enable: True = transition to DC mode, false = transition back to AC mode 5907 * 5908 * Some SoCs define additional clock limits when in DC mode, DM should 5909 * invoke this function when the platform undergoes a power source transition 5910 * so DC can apply/unapply the limit. This interface may be disruptive to 5911 * the onscreen content. 5912 * 5913 * Context: Triggered by OS through DM interface, or manually by escape calls. 5914 * Need to hold a dclock when doing so. 5915 * 5916 * Return: none (void function) 5917 * 5918 */ 5919 void dc_enable_dcmode_clk_limit(struct dc *dc, bool enable) 5920 { 5921 unsigned int softMax = 0, maxDPM = 0, funcMin = 0, i; 5922 bool p_state_change_support; 5923 5924 if (!dc->config.dc_mode_clk_limit_support) 5925 return; 5926 5927 softMax = dc->clk_mgr->bw_params->dc_mode_softmax_memclk; 5928 for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries; i++) { 5929 if (dc->clk_mgr->bw_params->clk_table.entries[i].memclk_mhz > maxDPM) 5930 maxDPM = dc->clk_mgr->bw_params->clk_table.entries[i].memclk_mhz; 5931 } 5932 funcMin = (dc->clk_mgr->clks.dramclk_khz + 999) / 1000; 5933 p_state_change_support = dc->clk_mgr->clks.p_state_change_support; 5934 5935 if (enable && !dc->clk_mgr->dc_mode_softmax_enabled) { 5936 if (p_state_change_support) { 5937 if (funcMin <= softMax && dc->clk_mgr->funcs->set_max_memclk) 5938 dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, softMax); 5939 // else: No-Op 5940 } else { 5941 if (funcMin <= softMax) 5942 blank_and_force_memclk(dc, true, softMax); 5943 // else: No-Op 5944 } 5945 } else if (!enable && dc->clk_mgr->dc_mode_softmax_enabled) { 5946 if (p_state_change_support) { 5947 if (funcMin <= softMax && dc->clk_mgr->funcs->set_max_memclk) 5948 dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, maxDPM); 5949 // else: No-Op 5950 } else { 5951 if (funcMin <= softMax) 5952 blank_and_force_memclk(dc, true, maxDPM); 5953 // else: No-Op 5954 } 5955 } 5956 dc->clk_mgr->dc_mode_softmax_enabled = enable; 5957 } 5958 bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc, 5959 unsigned int pitch, 5960 unsigned int height, 5961 enum surface_pixel_format format, 5962 struct dc_cursor_attributes *cursor_attr) 5963 { 5964 if (dc->hwss.does_plane_fit_in_mall && dc->hwss.does_plane_fit_in_mall(dc, pitch, height, format, cursor_attr)) 5965 return true; 5966 return false; 5967 } 5968 5969 /* cleanup on driver unload */ 5970 void dc_hardware_release(struct dc *dc) 5971 { 5972 dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(dc); 5973 5974 if (dc->hwss.hardware_release) 5975 dc->hwss.hardware_release(dc); 5976 } 5977 5978 void dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(struct dc *dc) 5979 { 5980 if (dc->current_state) 5981 dc->current_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching_shut_down = true; 5982 } 5983 5984 /** 5985 * dc_is_dmub_outbox_supported - Check if DMUB firmware support outbox notification 5986 * 5987 * @dc: [in] dc structure 5988 * 5989 * Checks whether DMUB FW supports outbox notifications, if supported DM 5990 * should register outbox interrupt prior to actually enabling interrupts 5991 * via dc_enable_dmub_outbox 5992 * 5993 * Return: 5994 * True if DMUB FW supports outbox notifications, False otherwise 5995 */ 5996 bool dc_is_dmub_outbox_supported(struct dc *dc) 5997 { 5998 if (!dc->caps.dmcub_support) 5999 return false; 6000 6001 switch (dc->ctx->asic_id.chip_family) { 6002 6003 case FAMILY_YELLOW_CARP: 6004 /* DCN31 B0 USB4 DPIA needs dmub notifications for interrupts */ 6005 if (dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0 && 6006 !dc->debug.dpia_debug.bits.disable_dpia) 6007 return true; 6008 break; 6009 6010 case AMDGPU_FAMILY_GC_11_0_1: 6011 case AMDGPU_FAMILY_GC_11_5_0: 6012 case AMDGPU_FAMILY_GC_11_5_4: 6013 if (!dc->debug.dpia_debug.bits.disable_dpia) 6014 return true; 6015 break; 6016 6017 default: 6018 break; 6019 } 6020 6021 /* dmub aux needs dmub notifications to be enabled */ 6022 return dc->debug.enable_dmub_aux_for_legacy_ddc; 6023 6024 } 6025 6026 /** 6027 * dc_enable_dmub_notifications - Check if dmub fw supports outbox 6028 * 6029 * @dc: [in] dc structure 6030 * 6031 * Calls dc_is_dmub_outbox_supported to check if dmub fw supports outbox 6032 * notifications. All DMs shall switch to dc_is_dmub_outbox_supported. This 6033 * API shall be removed after switching. 6034 * 6035 * Return: 6036 * True if DMUB FW supports outbox notifications, False otherwise 6037 */ 6038 bool dc_enable_dmub_notifications(struct dc *dc) 6039 { 6040 return dc_is_dmub_outbox_supported(dc); 6041 } 6042 6043 /** 6044 * dc_enable_dmub_outbox - Enables DMUB unsolicited notification 6045 * 6046 * @dc: [in] dc structure 6047 * 6048 * Enables DMUB unsolicited notifications to x86 via outbox. 6049 */ 6050 void dc_enable_dmub_outbox(struct dc *dc) 6051 { 6052 struct dc_context *dc_ctx = dc->ctx; 6053 6054 dmub_enable_outbox_notification(dc_ctx->dmub_srv); 6055 DC_LOG_DC("%s: dmub outbox notifications enabled\n", __func__); 6056 } 6057 6058 /** 6059 * dc_process_dmub_aux_transfer_async - Submits aux command to dmub via inbox message 6060 * Sets port index appropriately for legacy DDC 6061 * @dc: dc structure 6062 * @link_index: link index 6063 * @payload: aux payload 6064 * 6065 * Returns: True if successful, False if failure 6066 */ 6067 bool dc_process_dmub_aux_transfer_async(struct dc *dc, 6068 uint32_t link_index, 6069 struct aux_payload *payload) 6070 { 6071 uint8_t action; 6072 union dmub_rb_cmd cmd = {0}; 6073 6074 ASSERT(payload->length <= 16); 6075 6076 cmd.dp_aux_access.header.type = DMUB_CMD__DP_AUX_ACCESS; 6077 cmd.dp_aux_access.header.payload_bytes = 0; 6078 /* For dpia, ddc_pin is set to NULL */ 6079 if (!dc->links[link_index]->ddc->ddc_pin) 6080 cmd.dp_aux_access.aux_control.type = AUX_CHANNEL_DPIA; 6081 else 6082 cmd.dp_aux_access.aux_control.type = AUX_CHANNEL_LEGACY_DDC; 6083 6084 cmd.dp_aux_access.aux_control.instance = dc->links[link_index]->ddc_hw_inst; 6085 cmd.dp_aux_access.aux_control.sw_crc_enabled = 0; 6086 cmd.dp_aux_access.aux_control.timeout = 0; 6087 cmd.dp_aux_access.aux_control.dpaux.address = payload->address; 6088 cmd.dp_aux_access.aux_control.dpaux.is_i2c_over_aux = payload->i2c_over_aux; 6089 cmd.dp_aux_access.aux_control.dpaux.length = payload->length; 6090 6091 /* set aux action */ 6092 if (payload->i2c_over_aux) { 6093 if (payload->write) { 6094 if (payload->mot) 6095 action = DP_AUX_REQ_ACTION_I2C_WRITE_MOT; 6096 else 6097 action = DP_AUX_REQ_ACTION_I2C_WRITE; 6098 } else { 6099 if (payload->mot) 6100 action = DP_AUX_REQ_ACTION_I2C_READ_MOT; 6101 else 6102 action = DP_AUX_REQ_ACTION_I2C_READ; 6103 } 6104 } else { 6105 if (payload->write) 6106 action = DP_AUX_REQ_ACTION_DPCD_WRITE; 6107 else 6108 action = DP_AUX_REQ_ACTION_DPCD_READ; 6109 } 6110 6111 cmd.dp_aux_access.aux_control.dpaux.action = action; 6112 6113 if (payload->length && payload->write) { 6114 memcpy(cmd.dp_aux_access.aux_control.dpaux.data, 6115 payload->data, 6116 payload->length 6117 ); 6118 } 6119 6120 dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); 6121 6122 return true; 6123 } 6124 6125 bool dc_smart_power_oled_enable(const struct dc_link *link, bool enable, uint16_t peak_nits, 6126 uint8_t debug_control, uint16_t fixed_CLL, uint32_t triggerline) 6127 { 6128 bool status = false; 6129 struct dc *dc = link->ctx->dc; 6130 union dmub_rb_cmd cmd; 6131 uint8_t otg_inst = 0; 6132 unsigned int panel_inst = 0; 6133 struct pipe_ctx *pipe_ctx = NULL; 6134 struct resource_context *res_ctx = &link->ctx->dc->current_state->res_ctx; 6135 int i = 0; 6136 6137 // get panel_inst 6138 if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst)) 6139 return status; 6140 6141 // get otg_inst 6142 for (i = 0; i < MAX_PIPES; i++) { 6143 if (res_ctx && 6144 res_ctx->pipe_ctx[i].stream && 6145 res_ctx->pipe_ctx[i].stream->link && 6146 res_ctx->pipe_ctx[i].stream->link == link && 6147 res_ctx->pipe_ctx[i].stream->link->connector_signal == SIGNAL_TYPE_EDP) { 6148 pipe_ctx = &res_ctx->pipe_ctx[i]; 6149 //TODO: refactor for multi edp support 6150 break; 6151 } 6152 } 6153 6154 if (pipe_ctx) 6155 otg_inst = pipe_ctx->stream_res.tg->inst; 6156 6157 // before enable smart power OLED, we need to call set pipe for DMUB to set ABM config 6158 if (enable) { 6159 if (dc->hwss.set_pipe && pipe_ctx) 6160 dc->hwss.set_pipe(pipe_ctx); 6161 } 6162 6163 // fill in cmd 6164 memset(&cmd, 0, sizeof(cmd)); 6165 6166 cmd.smart_power_oled_enable.header.type = DMUB_CMD__SMART_POWER_OLED; 6167 cmd.smart_power_oled_enable.header.sub_type = DMUB_CMD__SMART_POWER_OLED_ENABLE; 6168 cmd.smart_power_oled_enable.header.payload_bytes = 6169 sizeof(struct dmub_rb_cmd_smart_power_oled_enable_data) - sizeof(struct dmub_cmd_header); 6170 cmd.smart_power_oled_enable.header.ret_status = 1; 6171 cmd.smart_power_oled_enable.data.enable = enable; 6172 cmd.smart_power_oled_enable.data.panel_inst = panel_inst; 6173 cmd.smart_power_oled_enable.data.peak_nits = peak_nits; 6174 cmd.smart_power_oled_enable.data.otg_inst = otg_inst; 6175 cmd.smart_power_oled_enable.data.digfe_inst = link->link_enc->preferred_engine; 6176 cmd.smart_power_oled_enable.data.digbe_inst = link->link_enc->transmitter; 6177 6178 cmd.smart_power_oled_enable.data.debugcontrol = debug_control; 6179 cmd.smart_power_oled_enable.data.triggerline = triggerline; 6180 cmd.smart_power_oled_enable.data.fixed_max_cll = fixed_CLL; 6181 6182 // send cmd 6183 status = dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); 6184 6185 return status; 6186 } 6187 6188 bool dc_smart_power_oled_get_max_cll(const struct dc_link *link, unsigned int *pCurrent_MaxCLL) 6189 { 6190 struct dc *dc = link->ctx->dc; 6191 union dmub_rb_cmd cmd; 6192 bool status = false; 6193 unsigned int panel_inst = 0; 6194 6195 // get panel_inst 6196 if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst)) 6197 return status; 6198 6199 // fill in cmd 6200 memset(&cmd, 0, sizeof(cmd)); 6201 6202 cmd.smart_power_oled_getmaxcll.header.type = DMUB_CMD__SMART_POWER_OLED; 6203 cmd.smart_power_oled_getmaxcll.header.sub_type = DMUB_CMD__SMART_POWER_OLED_GETMAXCLL; 6204 cmd.smart_power_oled_getmaxcll.header.payload_bytes = sizeof(cmd.smart_power_oled_getmaxcll.data); 6205 cmd.smart_power_oled_getmaxcll.header.ret_status = 1; 6206 6207 cmd.smart_power_oled_getmaxcll.data.input.panel_inst = panel_inst; 6208 6209 // send cmd and wait for reply 6210 status = dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY); 6211 6212 if (status) 6213 *pCurrent_MaxCLL = cmd.smart_power_oled_getmaxcll.data.output.current_max_cll; 6214 else 6215 *pCurrent_MaxCLL = 0; 6216 6217 return status; 6218 } 6219 6220 uint8_t get_link_index_from_dpia_port_index(const struct dc *dc, 6221 uint8_t dpia_port_index) 6222 { 6223 uint8_t index, link_index = 0xFF; 6224 6225 for (index = 0; index < dc->link_count; index++) { 6226 /* ddc_hw_inst has dpia port index for dpia links 6227 * and ddc instance for legacy links 6228 */ 6229 if (!dc->links[index]->ddc->ddc_pin) { 6230 if (dc->links[index]->ddc_hw_inst == dpia_port_index) { 6231 link_index = index; 6232 break; 6233 } 6234 } 6235 } 6236 ASSERT(link_index != 0xFF); 6237 return link_index; 6238 } 6239 6240 /** 6241 * dc_process_dmub_set_config_async - Submits set_config command 6242 * 6243 * @dc: [in] dc structure 6244 * @link_index: [in] link_index: link index 6245 * @payload: [in] aux payload 6246 * @notify: [out] set_config immediate reply 6247 * 6248 * Submits set_config command to dmub via inbox message. 6249 * 6250 * Return: 6251 * True if successful, False if failure 6252 */ 6253 bool dc_process_dmub_set_config_async(struct dc *dc, 6254 uint32_t link_index, 6255 struct set_config_cmd_payload *payload, 6256 struct dmub_notification *notify) 6257 { 6258 union dmub_rb_cmd cmd = {0}; 6259 bool is_cmd_complete = true; 6260 6261 /* prepare SET_CONFIG command */ 6262 cmd.set_config_access.header.type = DMUB_CMD__DPIA; 6263 cmd.set_config_access.header.sub_type = DMUB_CMD__DPIA_SET_CONFIG_ACCESS; 6264 6265 cmd.set_config_access.set_config_control.instance = dc->links[link_index]->ddc_hw_inst; 6266 cmd.set_config_access.set_config_control.cmd_pkt.msg_type = payload->msg_type; 6267 cmd.set_config_access.set_config_control.cmd_pkt.msg_data = payload->msg_data; 6268 6269 if (!dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) { 6270 /* command is not processed by dmub */ 6271 notify->sc_status = SET_CONFIG_UNKNOWN_ERROR; 6272 return is_cmd_complete; 6273 } 6274 6275 /* command processed by dmub, if ret_status is 1, it is completed instantly */ 6276 if (cmd.set_config_access.header.ret_status == 1) 6277 notify->sc_status = cmd.set_config_access.set_config_control.immed_status; 6278 else 6279 /* cmd pending, will receive notification via outbox */ 6280 is_cmd_complete = false; 6281 6282 return is_cmd_complete; 6283 } 6284 6285 /** 6286 * dc_process_dmub_set_mst_slots - Submits MST solt allocation 6287 * 6288 * @dc: [in] dc structure 6289 * @link_index: [in] link index 6290 * @mst_alloc_slots: [in] mst slots to be allotted 6291 * @mst_slots_in_use: [out] mst slots in use returned in failure case 6292 * 6293 * Submits mst slot allocation command to dmub via inbox message 6294 * 6295 * Return: 6296 * DC_OK if successful, DC_ERROR if failure 6297 */ 6298 enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc, 6299 uint32_t link_index, 6300 uint8_t mst_alloc_slots, 6301 uint8_t *mst_slots_in_use) 6302 { 6303 union dmub_rb_cmd cmd = {0}; 6304 6305 /* prepare MST_ALLOC_SLOTS command */ 6306 cmd.set_mst_alloc_slots.header.type = DMUB_CMD__DPIA; 6307 cmd.set_mst_alloc_slots.header.sub_type = DMUB_CMD__DPIA_MST_ALLOC_SLOTS; 6308 6309 cmd.set_mst_alloc_slots.mst_slots_control.instance = dc->links[link_index]->ddc_hw_inst; 6310 cmd.set_mst_alloc_slots.mst_slots_control.mst_alloc_slots = mst_alloc_slots; 6311 6312 if (!dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) 6313 /* command is not processed by dmub */ 6314 return DC_ERROR_UNEXPECTED; 6315 6316 /* command processed by dmub, if ret_status is 1 */ 6317 if (cmd.set_config_access.header.ret_status != 1) 6318 /* command processing error */ 6319 return DC_ERROR_UNEXPECTED; 6320 6321 /* command processed and we have a status of 2, mst not enabled in dpia */ 6322 if (cmd.set_mst_alloc_slots.mst_slots_control.immed_status == 2) 6323 return DC_FAIL_UNSUPPORTED_1; 6324 6325 /* previously configured mst alloc and used slots did not match */ 6326 if (cmd.set_mst_alloc_slots.mst_slots_control.immed_status == 3) { 6327 *mst_slots_in_use = cmd.set_mst_alloc_slots.mst_slots_control.mst_slots_in_use; 6328 return DC_NOT_SUPPORTED; 6329 } 6330 6331 return DC_OK; 6332 } 6333 6334 /** 6335 * dc_process_dmub_dpia_set_tps_notification - Submits tps notification 6336 * 6337 * @dc: [in] dc structure 6338 * @link_index: [in] link index 6339 * @tps: [in] request tps 6340 * 6341 * Submits set_tps_notification command to dmub via inbox message 6342 */ 6343 void dc_process_dmub_dpia_set_tps_notification(const struct dc *dc, uint32_t link_index, uint8_t tps) 6344 { 6345 union dmub_rb_cmd cmd = {0}; 6346 6347 cmd.set_tps_notification.header.type = DMUB_CMD__DPIA; 6348 cmd.set_tps_notification.header.sub_type = DMUB_CMD__DPIA_SET_TPS_NOTIFICATION; 6349 cmd.set_tps_notification.tps_notification.instance = dc->links[link_index]->ddc_hw_inst; 6350 cmd.set_tps_notification.tps_notification.tps = tps; 6351 6352 dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); 6353 } 6354 6355 /** 6356 * dc_process_dmub_dpia_hpd_int_enable - Submits DPIA DPD interruption 6357 * 6358 * @dc: [in] dc structure 6359 * @hpd_int_enable: [in] 1 for hpd int enable, 0 to disable 6360 * 6361 * Submits dpia hpd int enable command to dmub via inbox message 6362 */ 6363 void dc_process_dmub_dpia_hpd_int_enable(const struct dc *dc, 6364 uint32_t hpd_int_enable) 6365 { 6366 union dmub_rb_cmd cmd = {0}; 6367 6368 cmd.dpia_hpd_int_enable.header.type = DMUB_CMD__DPIA_HPD_INT_ENABLE; 6369 cmd.dpia_hpd_int_enable.enable = hpd_int_enable; 6370 6371 dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); 6372 6373 DC_LOG_DEBUG("%s: hpd_int_enable(%d)\n", __func__, hpd_int_enable); 6374 } 6375 6376 /** 6377 * dc_print_dmub_diagnostic_data - Print DMUB diagnostic data for debugging 6378 * 6379 * @dc: [in] dc structure 6380 * 6381 * 6382 */ 6383 void dc_print_dmub_diagnostic_data(const struct dc *dc) 6384 { 6385 dc_dmub_srv_log_diagnostic_data(dc->ctx->dmub_srv); 6386 } 6387 6388 /** 6389 * dc_disable_accelerated_mode - disable accelerated mode 6390 * @dc: dc structure 6391 */ 6392 void dc_disable_accelerated_mode(struct dc *dc) 6393 { 6394 bios_set_scratch_acc_mode_change(dc->ctx->dc_bios, 0); 6395 } 6396 6397 6398 /** 6399 * dc_notify_vsync_int_state - notifies vsync enable/disable state 6400 * @dc: dc structure 6401 * @stream: stream where vsync int state changed 6402 * @enable: whether vsync is enabled or disabled 6403 * 6404 * Called when vsync is enabled/disabled Will notify DMUB to start/stop ABM 6405 * interrupts after steady state is reached. 6406 */ 6407 void dc_notify_vsync_int_state(struct dc *dc, struct dc_stream_state *stream, bool enable) 6408 { 6409 unsigned int i, edp_num; 6410 struct pipe_ctx *pipe = NULL; 6411 struct dc_link *link = stream->sink->link; 6412 struct dc_link *edp_links[MAX_NUM_EDP]; 6413 6414 6415 if (link->psr_settings.psr_feature_enabled) 6416 return; 6417 6418 if (link->replay_settings.replay_feature_enabled) 6419 return; 6420 6421 /*find primary pipe associated with stream*/ 6422 for (i = 0; i < MAX_PIPES; i++) { 6423 pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 6424 6425 if (pipe->stream == stream && pipe->stream_res.tg) 6426 break; 6427 } 6428 6429 if (i == MAX_PIPES) { 6430 ASSERT(0); 6431 return; 6432 } 6433 6434 dc_get_edp_links(dc, edp_links, &edp_num); 6435 6436 /* Determine panel inst */ 6437 for (i = 0; i < edp_num; i++) { 6438 if (edp_links[i] == link) 6439 break; 6440 } 6441 6442 if (i == edp_num) { 6443 return; 6444 } 6445 6446 if (pipe->stream_res.abm && pipe->stream_res.abm->funcs->set_abm_pause) 6447 pipe->stream_res.abm->funcs->set_abm_pause(pipe->stream_res.abm, !enable, i, pipe->stream_res.tg->inst); 6448 } 6449 6450 /***************************************************************************** 6451 * dc_abm_save_restore() - Interface to DC for save+pause and restore+un-pause 6452 * ABM 6453 * @dc: dc structure 6454 * @stream: stream where vsync int state changed 6455 * @pData: abm hw states 6456 * 6457 ****************************************************************************/ 6458 bool dc_abm_save_restore( 6459 struct dc *dc, 6460 struct dc_stream_state *stream, 6461 struct abm_save_restore *pData) 6462 { 6463 unsigned int i, edp_num; 6464 struct pipe_ctx *pipe = NULL; 6465 struct dc_link *link = stream->sink->link; 6466 struct dc_link *edp_links[MAX_NUM_EDP]; 6467 6468 if (link->replay_settings.replay_feature_enabled) 6469 return false; 6470 6471 /*find primary pipe associated with stream*/ 6472 for (i = 0; i < MAX_PIPES; i++) { 6473 pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 6474 6475 if (pipe->stream == stream && pipe->stream_res.tg) 6476 break; 6477 } 6478 6479 if (i == MAX_PIPES) { 6480 ASSERT(0); 6481 return false; 6482 } 6483 6484 dc_get_edp_links(dc, edp_links, &edp_num); 6485 6486 /* Determine panel inst */ 6487 for (i = 0; i < edp_num; i++) 6488 if (edp_links[i] == link) 6489 break; 6490 6491 if (i == edp_num) 6492 return false; 6493 6494 if (pipe->stream_res.abm && 6495 pipe->stream_res.abm->funcs->save_restore) 6496 return pipe->stream_res.abm->funcs->save_restore( 6497 pipe->stream_res.abm, 6498 i, 6499 pData); 6500 return false; 6501 } 6502 6503 void dc_query_current_properties(struct dc *dc, struct dc_current_properties *properties) 6504 { 6505 unsigned int i; 6506 unsigned int max_cursor_size = dc->caps.max_cursor_size; 6507 unsigned int stream_cursor_size; 6508 6509 if (dc->debug.allow_sw_cursor_fallback && dc->res_pool->funcs->get_max_hw_cursor_size) { 6510 for (i = 0; i < dc->current_state->stream_count; i++) { 6511 stream_cursor_size = dc->res_pool->funcs->get_max_hw_cursor_size(dc, 6512 dc->current_state, 6513 dc->current_state->streams[i]); 6514 6515 if (stream_cursor_size < max_cursor_size) { 6516 max_cursor_size = stream_cursor_size; 6517 } 6518 } 6519 } 6520 6521 properties->cursor_size_limit = max_cursor_size; 6522 } 6523 6524 /** 6525 * dc_set_edp_power() - DM controls eDP power to be ON/OFF 6526 * 6527 * Called when DM wants to power on/off eDP. 6528 * Only work on links with flag skip_implict_edp_power_control is set. 6529 * 6530 * @dc: Current DC state 6531 * @edp_link: a link with eDP connector signal type 6532 * @powerOn: power on/off eDP 6533 * 6534 * Return: void 6535 */ 6536 void dc_set_edp_power(const struct dc *dc, struct dc_link *edp_link, 6537 bool powerOn) 6538 { 6539 (void)dc; 6540 if (edp_link->connector_signal != SIGNAL_TYPE_EDP) 6541 return; 6542 6543 if (edp_link->skip_implict_edp_power_control == false) 6544 return; 6545 6546 edp_link->dc->link_srv->edp_set_panel_power(edp_link, powerOn); 6547 } 6548 6549 /** 6550 * dc_get_power_profile_for_dc_state() - extracts power profile from dc state 6551 * 6552 * Called when DM wants to make power policy decisions based on dc_state 6553 * 6554 * @context: Pointer to the dc_state from which the power profile is extracted. 6555 * 6556 * Return: The power profile structure containing the power level information. 6557 */ 6558 struct dc_power_profile dc_get_power_profile_for_dc_state(const struct dc_state *context) 6559 { 6560 struct dc_power_profile profile = { 0 }; 6561 6562 profile.power_level = !context->bw_ctx.bw.dcn.clk.p_state_change_support; 6563 if (!context->clk_mgr || !context->clk_mgr->ctx || !context->clk_mgr->ctx->dc) 6564 return profile; 6565 struct dc *dc = context->clk_mgr->ctx->dc; 6566 6567 if (dc->res_pool->funcs->get_power_profile) 6568 profile.power_level = dc->res_pool->funcs->get_power_profile(context); 6569 return profile; 6570 } 6571 6572 /** 6573 * dc_get_det_buffer_size_from_state() - extracts detile buffer size from dc state 6574 * 6575 * This function is called to log the detile buffer size from the dc_state. 6576 * 6577 * @context: a pointer to the dc_state from which the detile buffer size is extracted. 6578 * 6579 * Return: the size of the detile buffer, or 0 if not available. 6580 */ 6581 unsigned int dc_get_det_buffer_size_from_state(const struct dc_state *context) 6582 { 6583 struct dc *dc = context->clk_mgr->ctx->dc; 6584 6585 if (dc->res_pool->funcs->get_det_buffer_size) 6586 return dc->res_pool->funcs->get_det_buffer_size(context); 6587 else 6588 return 0; 6589 } 6590 6591 /** 6592 * dc_get_host_router_index: Get index of host router from a dpia link 6593 * 6594 * This function return a host router index of the target link. If the target link is dpia link. 6595 * 6596 * @link: Pointer to the target link (input) 6597 * @host_router_index: Pointer to store the host router index of the target link (output). 6598 * 6599 * Return: true if the host router index is found and valid. 6600 * 6601 */ 6602 bool dc_get_host_router_index(const struct dc_link *link, unsigned int *host_router_index) 6603 { 6604 struct dc *dc; 6605 6606 if (!link || !host_router_index || link->ep_type != DISPLAY_ENDPOINT_USB4_DPIA) 6607 return false; 6608 6609 dc = link->ctx->dc; 6610 6611 if (link->link_index < dc->lowest_dpia_link_index) 6612 return false; 6613 6614 *host_router_index = (link->link_index - dc->lowest_dpia_link_index) / dc->caps.num_of_dpias_per_host_router; 6615 if (*host_router_index < dc->caps.num_of_host_routers) 6616 return true; 6617 else 6618 return false; 6619 } 6620 6621 bool dc_is_cursor_limit_pending(struct dc *dc) 6622 { 6623 uint32_t i; 6624 6625 for (i = 0; i < dc->current_state->stream_count; i++) { 6626 if (dc_stream_is_cursor_limit_pending(dc, dc->current_state->streams[i])) 6627 return true; 6628 } 6629 6630 return false; 6631 } 6632 6633 bool dc_can_clear_cursor_limit(const struct dc *dc) 6634 { 6635 uint32_t i; 6636 6637 for (i = 0; i < dc->current_state->stream_count; i++) { 6638 if (dc_state_can_clear_stream_cursor_subvp_limit(dc->current_state->streams[i], dc->current_state)) 6639 return true; 6640 } 6641 6642 return false; 6643 } 6644 6645 void dc_get_underflow_debug_data_for_otg(struct dc *dc, int primary_otg_inst, 6646 struct dc_underflow_debug_data *out_data) 6647 { 6648 struct timing_generator *tg = NULL; 6649 6650 for (int i = 0; i < MAX_PIPES; i++) { 6651 if (dc->res_pool->timing_generators[i] && 6652 dc->res_pool->timing_generators[i]->inst == primary_otg_inst) { 6653 tg = dc->res_pool->timing_generators[i]; 6654 break; 6655 } 6656 } 6657 6658 dc_exit_ips_for_hw_access(dc); 6659 if (dc->hwss.get_underflow_debug_data) 6660 dc->hwss.get_underflow_debug_data(dc, tg, out_data); 6661 } 6662 6663 void dc_get_power_feature_status(struct dc *dc, int primary_otg_inst, 6664 struct power_features *out_data) 6665 { 6666 (void)primary_otg_inst; 6667 out_data->uclk_p_state = dc->current_state->clk_mgr->clks.p_state_change_support; 6668 out_data->fams = dc->current_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching; 6669 } 6670 6671 bool dc_capture_register_software_state(struct dc *dc, struct dc_register_software_state *state) 6672 { 6673 struct dc_state *context; 6674 struct resource_context *res_ctx; 6675 int i; 6676 6677 if (!dc || !dc->current_state || !state) { 6678 if (state) 6679 state->state_valid = false; 6680 return false; 6681 } 6682 6683 /* Initialize the state structure */ 6684 memset(state, 0, sizeof(struct dc_register_software_state)); 6685 6686 context = dc->current_state; 6687 res_ctx = &context->res_ctx; 6688 6689 /* Count active pipes and streams */ 6690 state->active_pipe_count = 0; 6691 state->active_stream_count = context->stream_count; 6692 6693 for (i = 0; i < dc->res_pool->pipe_count; i++) { 6694 if (res_ctx->pipe_ctx[i].stream) 6695 state->active_pipe_count++; 6696 } 6697 6698 /* Capture HUBP programming state for each pipe */ 6699 for (i = 0; i < MAX_PIPES && i < dc->res_pool->pipe_count; i++) { 6700 struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i]; 6701 6702 state->hubp[i].valid_stream = false; 6703 if (!pipe_ctx->stream) 6704 continue; 6705 6706 state->hubp[i].valid_stream = true; 6707 6708 /* HUBP register programming variables */ 6709 if (pipe_ctx->stream_res.tg) 6710 state->hubp[i].vtg_sel = pipe_ctx->stream_res.tg->inst; 6711 6712 state->hubp[i].hubp_clock_enable = (pipe_ctx->plane_res.hubp != NULL) ? 1 : 0; 6713 6714 state->hubp[i].valid_plane_state = false; 6715 if (pipe_ctx->plane_state) { 6716 state->hubp[i].valid_plane_state = true; 6717 state->hubp[i].surface_pixel_format = pipe_ctx->plane_state->format; 6718 state->hubp[i].rotation_angle = pipe_ctx->plane_state->rotation; 6719 state->hubp[i].h_mirror_en = pipe_ctx->plane_state->horizontal_mirror ? 1 : 0; 6720 6721 /* Surface size */ 6722 if (pipe_ctx->plane_state->plane_size.surface_size.width > 0) { 6723 state->hubp[i].surface_size_width = pipe_ctx->plane_state->plane_size.surface_size.width; 6724 state->hubp[i].surface_size_height = pipe_ctx->plane_state->plane_size.surface_size.height; 6725 } 6726 6727 /* Viewport dimensions from scaler data */ 6728 if (pipe_ctx->plane_state->src_rect.width > 0) { 6729 state->hubp[i].pri_viewport_width = pipe_ctx->plane_state->src_rect.width; 6730 state->hubp[i].pri_viewport_height = pipe_ctx->plane_state->src_rect.height; 6731 state->hubp[i].pri_viewport_x_start = pipe_ctx->plane_state->src_rect.x; 6732 state->hubp[i].pri_viewport_y_start = pipe_ctx->plane_state->src_rect.y; 6733 } 6734 6735 /* DCC settings */ 6736 state->hubp[i].surface_dcc_en = (pipe_ctx->plane_state->dcc.enable) ? 1 : 0; 6737 state->hubp[i].surface_dcc_ind_64b_blk = pipe_ctx->plane_state->dcc.independent_64b_blks; 6738 state->hubp[i].surface_dcc_ind_128b_blk = pipe_ctx->plane_state->dcc.dcc_ind_blk; 6739 6740 /* Surface pitch */ 6741 state->hubp[i].surface_pitch = pipe_ctx->plane_state->plane_size.surface_pitch; 6742 state->hubp[i].meta_pitch = pipe_ctx->plane_state->dcc.meta_pitch; 6743 state->hubp[i].chroma_pitch = pipe_ctx->plane_state->plane_size.chroma_pitch; 6744 state->hubp[i].meta_pitch_c = pipe_ctx->plane_state->dcc.meta_pitch_c; 6745 6746 /* Surface addresses - primary */ 6747 state->hubp[i].primary_surface_address_low = pipe_ctx->plane_state->address.grph.addr.low_part; 6748 state->hubp[i].primary_surface_address_high = pipe_ctx->plane_state->address.grph.addr.high_part; 6749 state->hubp[i].primary_meta_surface_address_low = pipe_ctx->plane_state->address.grph.meta_addr.low_part; 6750 state->hubp[i].primary_meta_surface_address_high = pipe_ctx->plane_state->address.grph.meta_addr.high_part; 6751 6752 /* TMZ settings */ 6753 state->hubp[i].primary_surface_tmz = pipe_ctx->plane_state->address.tmz_surface; 6754 state->hubp[i].primary_meta_surface_tmz = pipe_ctx->plane_state->address.tmz_surface; 6755 6756 /* Tiling configuration */ 6757 state->hubp[i].min_dc_gfx_version9 = false; 6758 if (pipe_ctx->plane_state->tiling_info.gfxversion >= DcGfxVersion9) { 6759 state->hubp[i].min_dc_gfx_version9 = true; 6760 state->hubp[i].sw_mode = pipe_ctx->plane_state->tiling_info.gfx9.swizzle; 6761 state->hubp[i].num_pipes = pipe_ctx->plane_state->tiling_info.gfx9.num_pipes; 6762 state->hubp[i].num_banks = pipe_ctx->plane_state->tiling_info.gfx9.num_banks; 6763 state->hubp[i].pipe_interleave = pipe_ctx->plane_state->tiling_info.gfx9.pipe_interleave; 6764 state->hubp[i].num_shader_engines = pipe_ctx->plane_state->tiling_info.gfx9.num_shader_engines; 6765 state->hubp[i].num_rb_per_se = pipe_ctx->plane_state->tiling_info.gfx9.num_rb_per_se; 6766 state->hubp[i].num_pkrs = pipe_ctx->plane_state->tiling_info.gfx9.num_pkrs; 6767 } 6768 } 6769 6770 /* DML Request Size Configuration */ 6771 if (pipe_ctx->rq_regs.rq_regs_l.chunk_size > 0) { 6772 state->hubp[i].rq_chunk_size = pipe_ctx->rq_regs.rq_regs_l.chunk_size; 6773 state->hubp[i].rq_min_chunk_size = pipe_ctx->rq_regs.rq_regs_l.min_chunk_size; 6774 state->hubp[i].rq_meta_chunk_size = pipe_ctx->rq_regs.rq_regs_l.meta_chunk_size; 6775 state->hubp[i].rq_min_meta_chunk_size = pipe_ctx->rq_regs.rq_regs_l.min_meta_chunk_size; 6776 state->hubp[i].rq_dpte_group_size = pipe_ctx->rq_regs.rq_regs_l.dpte_group_size; 6777 state->hubp[i].rq_mpte_group_size = pipe_ctx->rq_regs.rq_regs_l.mpte_group_size; 6778 state->hubp[i].rq_swath_height_l = pipe_ctx->rq_regs.rq_regs_l.swath_height; 6779 state->hubp[i].rq_pte_row_height_l = pipe_ctx->rq_regs.rq_regs_l.pte_row_height_linear; 6780 } 6781 6782 /* Chroma request size configuration */ 6783 if (pipe_ctx->rq_regs.rq_regs_c.chunk_size > 0) { 6784 state->hubp[i].rq_chunk_size_c = pipe_ctx->rq_regs.rq_regs_c.chunk_size; 6785 state->hubp[i].rq_min_chunk_size_c = pipe_ctx->rq_regs.rq_regs_c.min_chunk_size; 6786 state->hubp[i].rq_meta_chunk_size_c = pipe_ctx->rq_regs.rq_regs_c.meta_chunk_size; 6787 state->hubp[i].rq_min_meta_chunk_size_c = pipe_ctx->rq_regs.rq_regs_c.min_meta_chunk_size; 6788 state->hubp[i].rq_dpte_group_size_c = pipe_ctx->rq_regs.rq_regs_c.dpte_group_size; 6789 state->hubp[i].rq_mpte_group_size_c = pipe_ctx->rq_regs.rq_regs_c.mpte_group_size; 6790 state->hubp[i].rq_swath_height_c = pipe_ctx->rq_regs.rq_regs_c.swath_height; 6791 state->hubp[i].rq_pte_row_height_c = pipe_ctx->rq_regs.rq_regs_c.pte_row_height_linear; 6792 } 6793 6794 /* DML expansion modes */ 6795 state->hubp[i].drq_expansion_mode = pipe_ctx->rq_regs.drq_expansion_mode; 6796 state->hubp[i].prq_expansion_mode = pipe_ctx->rq_regs.prq_expansion_mode; 6797 state->hubp[i].mrq_expansion_mode = pipe_ctx->rq_regs.mrq_expansion_mode; 6798 state->hubp[i].crq_expansion_mode = pipe_ctx->rq_regs.crq_expansion_mode; 6799 6800 /* DML DLG parameters - nominal */ 6801 state->hubp[i].dst_y_per_vm_vblank = pipe_ctx->dlg_regs.dst_y_per_vm_vblank; 6802 state->hubp[i].dst_y_per_row_vblank = pipe_ctx->dlg_regs.dst_y_per_row_vblank; 6803 state->hubp[i].dst_y_per_vm_flip = pipe_ctx->dlg_regs.dst_y_per_vm_flip; 6804 state->hubp[i].dst_y_per_row_flip = pipe_ctx->dlg_regs.dst_y_per_row_flip; 6805 6806 /* DML prefetch settings */ 6807 state->hubp[i].dst_y_prefetch = pipe_ctx->dlg_regs.dst_y_prefetch; 6808 state->hubp[i].vratio_prefetch = pipe_ctx->dlg_regs.vratio_prefetch; 6809 state->hubp[i].vratio_prefetch_c = pipe_ctx->dlg_regs.vratio_prefetch_c; 6810 6811 /* TTU parameters */ 6812 state->hubp[i].qos_level_low_wm = pipe_ctx->ttu_regs.qos_level_low_wm; 6813 state->hubp[i].qos_level_high_wm = pipe_ctx->ttu_regs.qos_level_high_wm; 6814 state->hubp[i].qos_level_flip = pipe_ctx->ttu_regs.qos_level_flip; 6815 state->hubp[i].min_ttu_vblank = pipe_ctx->ttu_regs.min_ttu_vblank; 6816 } 6817 6818 /* Capture HUBBUB programming state */ 6819 if (dc->res_pool->hubbub) { 6820 /* Individual DET buffer sizes - software state variables that program DET registers */ 6821 for (i = 0; i < 4 && i < dc->res_pool->pipe_count; i++) { 6822 uint32_t det_size = res_ctx->pipe_ctx[i].det_buffer_size_kb; 6823 switch (i) { 6824 case 0: 6825 state->hubbub.det0_size = det_size; 6826 break; 6827 case 1: 6828 state->hubbub.det1_size = det_size; 6829 break; 6830 case 2: 6831 state->hubbub.det2_size = det_size; 6832 break; 6833 case 3: 6834 state->hubbub.det3_size = det_size; 6835 break; 6836 } 6837 } 6838 6839 /* Compression buffer configuration - software state that programs COMPBUF_SIZE register */ 6840 // TODO: Handle logic for legacy DCN pre-DCN401 6841 state->hubbub.compbuf_size = context->bw_ctx.bw.dcn.arb_regs.compbuf_size; 6842 } 6843 6844 /* Capture DPP programming state for each pipe */ 6845 for (i = 0; i < MAX_PIPES && i < dc->res_pool->pipe_count; i++) { 6846 struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i]; 6847 6848 if (!pipe_ctx->stream) 6849 continue; 6850 6851 state->dpp[i].dpp_clock_enable = (pipe_ctx->plane_res.dpp != NULL) ? 1 : 0; 6852 6853 if (pipe_ctx->plane_state && pipe_ctx->plane_res.scl_data.recout.width > 0) { 6854 /* Access dscl_prog_data directly - this contains the actual software state used for register programming */ 6855 struct dscl_prog_data *dscl_data = &pipe_ctx->plane_res.scl_data.dscl_prog_data; 6856 6857 /* Recout (Rectangle of Interest) configuration - software state that programs RECOUT registers */ 6858 state->dpp[i].recout_start_x = dscl_data->recout.x; 6859 state->dpp[i].recout_start_y = dscl_data->recout.y; 6860 state->dpp[i].recout_width = dscl_data->recout.width; 6861 state->dpp[i].recout_height = dscl_data->recout.height; 6862 6863 /* MPC (Multiple Pipe/Plane Combiner) size - software state that programs MPC_SIZE registers */ 6864 state->dpp[i].mpc_width = dscl_data->mpc_size.width; 6865 state->dpp[i].mpc_height = dscl_data->mpc_size.height; 6866 6867 /* DSCL mode - software state that programs SCL_MODE registers */ 6868 state->dpp[i].dscl_mode = dscl_data->dscl_mode; 6869 6870 /* Scaler ratios - software state that programs scale ratio registers (use actual programmed ratios) */ 6871 state->dpp[i].horz_ratio_int = dscl_data->ratios.h_scale_ratio >> 19; // Extract integer part from programmed ratio 6872 state->dpp[i].vert_ratio_int = dscl_data->ratios.v_scale_ratio >> 19; // Extract integer part from programmed ratio 6873 6874 /* Basic scaler taps - software state that programs tap control registers (use actual programmed taps) */ 6875 state->dpp[i].h_taps = dscl_data->taps.h_taps + 1; // dscl_prog_data.taps stores (taps - 1), so add 1 back 6876 state->dpp[i].v_taps = dscl_data->taps.v_taps + 1; // dscl_prog_data.taps stores (taps - 1), so add 1 back 6877 } 6878 } 6879 6880 /* Capture essential clock state for underflow analysis */ 6881 if (dc->clk_mgr && dc->clk_mgr->clks.dispclk_khz > 0) { 6882 /* Core display clocks affecting bandwidth and timing */ 6883 state->dccg.dispclk_khz = dc->clk_mgr->clks.dispclk_khz; 6884 6885 /* Per-pipe clock configuration - only capture what's essential */ 6886 for (i = 0; i < MAX_PIPES && i < dc->res_pool->pipe_count; i++) { 6887 struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i]; 6888 if (pipe_ctx->stream) { 6889 /* Essential clocks that directly affect underflow risk */ 6890 state->dccg.dppclk_khz[i] = dc->clk_mgr->clks.dppclk_khz; 6891 state->dccg.pixclk_khz[i] = pipe_ctx->stream->timing.pix_clk_100hz / 10; 6892 state->dccg.dppclk_enable[i] = 1; 6893 6894 /* DP stream clock only for DP signals */ 6895 if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT || 6896 pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { 6897 state->dccg.dpstreamclk_enable[i] = 1; 6898 } else { 6899 state->dccg.dpstreamclk_enable[i] = 0; 6900 } 6901 } else { 6902 /* Inactive pipe - no clocks */ 6903 state->dccg.dppclk_khz[i] = 0; 6904 state->dccg.pixclk_khz[i] = 0; 6905 state->dccg.dppclk_enable[i] = 0; 6906 if (i < 4) { 6907 state->dccg.dpstreamclk_enable[i] = 0; 6908 } 6909 } 6910 } 6911 6912 /* DSC clock state - only when actually using DSC */ 6913 for (i = 0; i < MAX_PIPES; i++) { 6914 struct pipe_ctx *pipe_ctx = (i < dc->res_pool->pipe_count) ? &res_ctx->pipe_ctx[i] : NULL; 6915 if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->timing.dsc_cfg.num_slices_h > 0) { 6916 state->dccg.dscclk_khz[i] = 400000; /* Typical DSC clock frequency */ 6917 } else { 6918 state->dccg.dscclk_khz[i] = 0; 6919 } 6920 } 6921 6922 /* SYMCLK32 LE Control - only the essential HPO state for underflow analysis */ 6923 for (i = 0; i < 2; i++) { 6924 state->dccg.symclk32_le_enable[i] = 0; /* Default: disabled */ 6925 } 6926 6927 } 6928 6929 /* Capture essential DSC configuration for underflow analysis */ 6930 for (i = 0; i < MAX_PIPES && i < dc->res_pool->pipe_count; i++) { 6931 struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i]; 6932 6933 if (pipe_ctx->stream && pipe_ctx->stream->timing.dsc_cfg.num_slices_h > 0) { 6934 /* DSC is enabled - capture essential configuration */ 6935 state->dsc[i].dsc_clock_enable = 1; 6936 6937 /* DSC configuration affecting bandwidth and timing */ 6938 struct dc_dsc_config *dsc_cfg = &pipe_ctx->stream->timing.dsc_cfg; 6939 state->dsc[i].dsc_num_slices_h = dsc_cfg->num_slices_h; 6940 state->dsc[i].dsc_num_slices_v = dsc_cfg->num_slices_v; 6941 state->dsc[i].dsc_bits_per_pixel = dsc_cfg->bits_per_pixel; 6942 6943 /* OPP pipe source for DSC forwarding */ 6944 if (pipe_ctx->stream_res.opp) { 6945 state->dsc[i].dscrm_dsc_forward_enable = 1; 6946 state->dsc[i].dscrm_dsc_opp_pipe_source = pipe_ctx->stream_res.opp->inst; 6947 } else { 6948 state->dsc[i].dscrm_dsc_forward_enable = 0; 6949 state->dsc[i].dscrm_dsc_opp_pipe_source = 0; 6950 } 6951 } else { 6952 /* DSC not enabled - clear all fields */ 6953 memset(&state->dsc[i], 0, sizeof(state->dsc[i])); 6954 } 6955 } 6956 6957 /* Capture MPC programming state - comprehensive register field coverage */ 6958 for (i = 0; i < MAX_PIPES && i < dc->res_pool->pipe_count; i++) { 6959 struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i]; 6960 6961 if (pipe_ctx->plane_state && pipe_ctx->stream) { 6962 struct dc_plane_state *plane_state = pipe_ctx->plane_state; 6963 6964 /* MPCC blending tree and mode control - capture actual blend configuration */ 6965 state->mpc.mpcc_mode[i] = (plane_state->cm.blend_func.type != TF_TYPE_BYPASS) ? 1 : 0; 6966 state->mpc.mpcc_alpha_blend_mode[i] = plane_state->per_pixel_alpha ? 1 : 0; 6967 state->mpc.mpcc_alpha_multiplied_mode[i] = plane_state->pre_multiplied_alpha ? 1 : 0; 6968 state->mpc.mpcc_blnd_active_overlap_only[i] = 0; /* Default - no overlap restriction */ 6969 state->mpc.mpcc_global_alpha[i] = plane_state->global_alpha_value; 6970 state->mpc.mpcc_global_gain[i] = plane_state->global_alpha ? 255 : 0; 6971 state->mpc.mpcc_bg_bpc[i] = 8; /* Standard 8-bit background */ 6972 state->mpc.mpcc_bot_gain_mode[i] = 0; /* Standard gain mode */ 6973 6974 /* MPCC blending tree connections - capture tree topology */ 6975 if (pipe_ctx->bottom_pipe) { 6976 state->mpc.mpcc_bot_sel[i] = pipe_ctx->bottom_pipe->pipe_idx; 6977 } else { 6978 state->mpc.mpcc_bot_sel[i] = 0xF; /* No bottom connection */ 6979 } 6980 state->mpc.mpcc_top_sel[i] = pipe_ctx->pipe_idx; /* This pipe's DPP ID */ 6981 6982 /* MPCC output gamma control - capture gamma programming */ 6983 if (plane_state->gamma_correction.type != GAMMA_CS_TFM_1D && plane_state->gamma_correction.num_entries > 0) { 6984 state->mpc.mpcc_ogam_mode[i] = 1; /* Gamma enabled */ 6985 state->mpc.mpcc_ogam_select[i] = 0; /* Bank A selection */ 6986 state->mpc.mpcc_ogam_pwl_disable[i] = 0; /* PWL enabled */ 6987 } else { 6988 state->mpc.mpcc_ogam_mode[i] = 0; /* Bypass mode */ 6989 state->mpc.mpcc_ogam_select[i] = 0; 6990 state->mpc.mpcc_ogam_pwl_disable[i] = 1; /* PWL disabled */ 6991 } 6992 6993 /* MPCC pipe assignment and operational status */ 6994 if (pipe_ctx->stream_res.opp) { 6995 state->mpc.mpcc_opp_id[i] = pipe_ctx->stream_res.opp->inst; 6996 } else { 6997 state->mpc.mpcc_opp_id[i] = 0xF; /* No OPP assignment */ 6998 } 6999 7000 /* MPCC status indicators - active pipe state */ 7001 state->mpc.mpcc_idle[i] = 0; /* Active pipe - not idle */ 7002 state->mpc.mpcc_busy[i] = 1; /* Active pipe - busy processing */ 7003 7004 } else { 7005 /* Pipe not active - set disabled/idle state for all fields */ 7006 state->mpc.mpcc_mode[i] = 0; 7007 state->mpc.mpcc_alpha_blend_mode[i] = 0; 7008 state->mpc.mpcc_alpha_multiplied_mode[i] = 0; 7009 state->mpc.mpcc_blnd_active_overlap_only[i] = 0; 7010 state->mpc.mpcc_global_alpha[i] = 0; 7011 state->mpc.mpcc_global_gain[i] = 0; 7012 state->mpc.mpcc_bg_bpc[i] = 0; 7013 state->mpc.mpcc_bot_gain_mode[i] = 0; 7014 state->mpc.mpcc_bot_sel[i] = 0xF; /* No bottom connection */ 7015 state->mpc.mpcc_top_sel[i] = 0xF; /* No top connection */ 7016 state->mpc.mpcc_ogam_mode[i] = 0; /* Bypass */ 7017 state->mpc.mpcc_ogam_select[i] = 0; 7018 state->mpc.mpcc_ogam_pwl_disable[i] = 1; /* PWL disabled */ 7019 state->mpc.mpcc_opp_id[i] = 0xF; /* No OPP assignment */ 7020 state->mpc.mpcc_idle[i] = 1; /* Idle */ 7021 state->mpc.mpcc_busy[i] = 0; /* Not busy */ 7022 } 7023 } 7024 7025 /* Capture OPP programming state for each pipe - comprehensive register field coverage */ 7026 for (i = 0; i < MAX_PIPES && i < dc->res_pool->pipe_count; i++) { 7027 struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i]; 7028 7029 if (!pipe_ctx->stream) 7030 continue; 7031 7032 if (pipe_ctx->stream_res.opp) { 7033 struct dc_crtc_timing *timing = &pipe_ctx->stream->timing; 7034 7035 /* OPP Pipe Control */ 7036 state->opp[i].opp_pipe_clock_enable = 1; /* Active pipe has clock enabled */ 7037 7038 /* Display Pattern Generator (DPG) Control - 19 fields */ 7039 if (pipe_ctx->stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) { 7040 state->opp[i].dpg_enable = 1; 7041 } else { 7042 /* Video mode - DPG disabled */ 7043 state->opp[i].dpg_enable = 0; 7044 } 7045 7046 /* Format Control (FMT) - 18 fields */ 7047 state->opp[i].fmt_pixel_encoding = timing->pixel_encoding; 7048 7049 /* Chroma subsampling mode based on pixel encoding */ 7050 if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) { 7051 state->opp[i].fmt_subsampling_mode = 1; /* 4:2:0 subsampling */ 7052 } else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) { 7053 state->opp[i].fmt_subsampling_mode = 2; /* 4:2:2 subsampling */ 7054 } else { 7055 state->opp[i].fmt_subsampling_mode = 0; /* No subsampling (4:4:4) */ 7056 } 7057 7058 state->opp[i].fmt_cbcr_bit_reduction_bypass = (timing->pixel_encoding == PIXEL_ENCODING_RGB) ? 1 : 0; 7059 state->opp[i].fmt_stereosync_override = (timing->timing_3d_format != TIMING_3D_FORMAT_NONE) ? 1 : 0; 7060 7061 /* Dithering control based on bit depth */ 7062 if (timing->display_color_depth < COLOR_DEPTH_121212) { 7063 state->opp[i].fmt_spatial_dither_frame_counter_max = 15; /* Typical frame counter max */ 7064 state->opp[i].fmt_spatial_dither_frame_counter_bit_swap = 0; /* No bit swapping */ 7065 state->opp[i].fmt_spatial_dither_enable = 1; 7066 state->opp[i].fmt_spatial_dither_mode = 0; /* Spatial dithering mode */ 7067 state->opp[i].fmt_spatial_dither_depth = timing->display_color_depth; 7068 state->opp[i].fmt_temporal_dither_enable = 0; /* Spatial dithering preferred */ 7069 } else { 7070 state->opp[i].fmt_spatial_dither_frame_counter_max = 0; 7071 state->opp[i].fmt_spatial_dither_frame_counter_bit_swap = 0; 7072 state->opp[i].fmt_spatial_dither_enable = 0; 7073 state->opp[i].fmt_spatial_dither_mode = 0; 7074 state->opp[i].fmt_spatial_dither_depth = 0; 7075 state->opp[i].fmt_temporal_dither_enable = 0; 7076 } 7077 7078 /* Truncation control for bit depth reduction */ 7079 if (timing->display_color_depth < COLOR_DEPTH_121212) { 7080 state->opp[i].fmt_truncate_enable = 1; 7081 state->opp[i].fmt_truncate_depth = timing->display_color_depth; 7082 state->opp[i].fmt_truncate_mode = 0; /* Round mode */ 7083 } else { 7084 state->opp[i].fmt_truncate_enable = 0; 7085 state->opp[i].fmt_truncate_depth = 0; 7086 state->opp[i].fmt_truncate_mode = 0; 7087 } 7088 7089 /* Data clamping control */ 7090 state->opp[i].fmt_clamp_data_enable = 1; /* Clamping typically enabled */ 7091 state->opp[i].fmt_clamp_color_format = timing->pixel_encoding; 7092 7093 /* Dynamic expansion for limited range content */ 7094 if (timing->pixel_encoding != PIXEL_ENCODING_RGB) { 7095 state->opp[i].fmt_dynamic_exp_enable = 1; /* YCbCr typically needs expansion */ 7096 state->opp[i].fmt_dynamic_exp_mode = 0; /* Standard expansion */ 7097 } else { 7098 state->opp[i].fmt_dynamic_exp_enable = 0; /* RGB typically full range */ 7099 state->opp[i].fmt_dynamic_exp_mode = 0; 7100 } 7101 7102 /* Legacy field for compatibility */ 7103 state->opp[i].fmt_bit_depth_control = timing->display_color_depth; 7104 7105 /* Output Buffer (OPPBUF) Control - 6 fields */ 7106 state->opp[i].oppbuf_active_width = timing->h_addressable; 7107 state->opp[i].oppbuf_pixel_repetition = 0; /* No pixel repetition by default */ 7108 7109 /* Multi-Stream Output (MSO) / ODM segmentation */ 7110 if (pipe_ctx->next_odm_pipe) { 7111 state->opp[i].oppbuf_display_segmentation = 1; /* Segmented display */ 7112 state->opp[i].oppbuf_overlap_pixel_num = 0; /* ODM overlap pixels */ 7113 } else { 7114 state->opp[i].oppbuf_display_segmentation = 0; /* Single segment */ 7115 state->opp[i].oppbuf_overlap_pixel_num = 0; 7116 } 7117 7118 /* 3D/Stereo control */ 7119 if (timing->timing_3d_format != TIMING_3D_FORMAT_NONE) { 7120 state->opp[i].oppbuf_3d_vact_space1_size = 30; /* Typical stereo blanking */ 7121 state->opp[i].oppbuf_3d_vact_space2_size = 30; 7122 } else { 7123 state->opp[i].oppbuf_3d_vact_space1_size = 0; 7124 state->opp[i].oppbuf_3d_vact_space2_size = 0; 7125 } 7126 7127 /* DSC Forward Config - 3 fields */ 7128 if (timing->dsc_cfg.num_slices_h > 0) { 7129 state->opp[i].dscrm_dsc_forward_enable = 1; 7130 state->opp[i].dscrm_dsc_opp_pipe_source = pipe_ctx->stream_res.opp->inst; 7131 state->opp[i].dscrm_dsc_forward_enable_status = 1; /* Status follows enable */ 7132 } else { 7133 state->opp[i].dscrm_dsc_forward_enable = 0; 7134 state->opp[i].dscrm_dsc_opp_pipe_source = 0; 7135 state->opp[i].dscrm_dsc_forward_enable_status = 0; 7136 } 7137 } else { 7138 /* No OPP resource - set all fields to disabled state */ 7139 memset(&state->opp[i], 0, sizeof(state->opp[i])); 7140 } 7141 } 7142 7143 /* Capture OPTC programming state for each pipe - comprehensive register field coverage */ 7144 for (i = 0; i < MAX_PIPES && i < dc->res_pool->pipe_count; i++) { 7145 struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i]; 7146 7147 if (!pipe_ctx->stream) 7148 continue; 7149 7150 if (pipe_ctx->stream_res.tg) { 7151 struct dc_crtc_timing *timing = &pipe_ctx->stream->timing; 7152 7153 state->optc[i].otg_master_inst = pipe_ctx->stream_res.tg->inst; 7154 7155 /* OTG_CONTROL register - 5 fields */ 7156 state->optc[i].otg_master_enable = 1; /* Active stream */ 7157 state->optc[i].otg_disable_point_cntl = 0; /* Normal operation */ 7158 state->optc[i].otg_start_point_cntl = 0; /* Normal start */ 7159 state->optc[i].otg_field_number_cntl = (timing->flags.INTERLACE) ? 1 : 0; 7160 state->optc[i].otg_out_mux = 0; /* Direct output */ 7161 7162 /* OTG Horizontal Timing - 7 fields */ 7163 state->optc[i].otg_h_total = timing->h_total; 7164 state->optc[i].otg_h_blank_start = timing->h_addressable; 7165 state->optc[i].otg_h_blank_end = timing->h_total - timing->h_front_porch; 7166 state->optc[i].otg_h_sync_start = timing->h_addressable + timing->h_front_porch; 7167 state->optc[i].otg_h_sync_end = timing->h_addressable + timing->h_front_porch + timing->h_sync_width; 7168 state->optc[i].otg_h_sync_polarity = timing->flags.HSYNC_POSITIVE_POLARITY ? 0 : 1; 7169 state->optc[i].otg_h_timing_div_mode = (pipe_ctx->next_odm_pipe) ? 1 : 0; /* ODM divide mode */ 7170 7171 /* OTG Vertical Timing - 7 fields */ 7172 state->optc[i].otg_v_total = timing->v_total; 7173 state->optc[i].otg_v_blank_start = timing->v_addressable; 7174 state->optc[i].otg_v_blank_end = timing->v_total - timing->v_front_porch; 7175 state->optc[i].otg_v_sync_start = timing->v_addressable + timing->v_front_porch; 7176 state->optc[i].otg_v_sync_end = timing->v_addressable + timing->v_front_porch + timing->v_sync_width; 7177 state->optc[i].otg_v_sync_polarity = timing->flags.VSYNC_POSITIVE_POLARITY ? 0 : 1; 7178 state->optc[i].otg_v_sync_mode = 0; /* Normal sync mode */ 7179 7180 /* Initialize remaining core fields with appropriate defaults */ 7181 // TODO: Update logic for accurate vtotal min/max 7182 state->optc[i].otg_v_total_max = timing->v_total + 100; /* Typical DRR range */ 7183 state->optc[i].otg_v_total_min = timing->v_total - 50; 7184 state->optc[i].otg_v_total_mid = timing->v_total; 7185 7186 /* ODM configuration */ 7187 // TODO: Update logic to have complete ODM mappings (e.g. 3:1 and 4:1) stored in single pipe 7188 if (pipe_ctx->next_odm_pipe) { 7189 state->optc[i].optc_seg0_src_sel = pipe_ctx->stream_res.opp ? pipe_ctx->stream_res.opp->inst : 0; 7190 state->optc[i].optc_seg1_src_sel = pipe_ctx->next_odm_pipe->stream_res.opp ? pipe_ctx->next_odm_pipe->stream_res.opp->inst : 0; 7191 state->optc[i].optc_num_of_input_segment = 1; /* 2 segments - 1 */ 7192 } else { 7193 state->optc[i].optc_seg0_src_sel = pipe_ctx->stream_res.opp ? pipe_ctx->stream_res.opp->inst : 0; 7194 state->optc[i].optc_seg1_src_sel = 0; 7195 state->optc[i].optc_num_of_input_segment = 0; /* Single segment */ 7196 } 7197 7198 /* DSC configuration */ 7199 if (timing->dsc_cfg.num_slices_h > 0) { 7200 state->optc[i].optc_dsc_mode = 1; /* DSC enabled */ 7201 state->optc[i].optc_dsc_bytes_per_pixel = timing->dsc_cfg.bits_per_pixel / 16; /* Convert to bytes */ 7202 state->optc[i].optc_dsc_slice_width = timing->h_addressable / timing->dsc_cfg.num_slices_h; 7203 } else { 7204 state->optc[i].optc_dsc_mode = 0; 7205 state->optc[i].optc_dsc_bytes_per_pixel = 0; 7206 state->optc[i].optc_dsc_slice_width = 0; 7207 } 7208 7209 /* Essential control fields */ 7210 state->optc[i].otg_stereo_enable = (timing->timing_3d_format != TIMING_3D_FORMAT_NONE) ? 1 : 0; 7211 state->optc[i].otg_interlace_enable = timing->flags.INTERLACE ? 1 : 0; 7212 state->optc[i].otg_clock_enable = 1; /* OTG clock enabled */ 7213 state->optc[i].vtg0_enable = 1; /* VTG enabled for timing generation */ 7214 7215 /* Initialize other key fields to defaults */ 7216 state->optc[i].optc_input_pix_clk_en = 1; 7217 state->optc[i].optc_segment_width = (pipe_ctx->next_odm_pipe) ? (timing->h_addressable / 2) : timing->h_addressable; 7218 state->optc[i].otg_vready_offset = 1; 7219 state->optc[i].otg_vstartup_start = timing->v_addressable + 10; 7220 state->optc[i].otg_vupdate_offset = 0; 7221 state->optc[i].otg_vupdate_width = 5; 7222 } else { 7223 /* No timing generator resource - initialize all fields to 0 */ 7224 memset(&state->optc[i], 0, sizeof(state->optc[i])); 7225 } 7226 } 7227 7228 state->state_valid = true; 7229 return true; 7230 } 7231 7232 void dc_log_preos_dmcub_info(const struct dc *dc) 7233 { 7234 dc_dmub_srv_log_preos_dmcub_info(dc->ctx->dmub_srv); 7235 } 7236 7237 bool dc_get_qos_info(struct dc *dc, struct dc_qos_info *info) 7238 { 7239 const struct dc_clocks *clk = &dc->current_state->bw_ctx.bw.dcn.clk; 7240 struct memory_qos qos; 7241 7242 memset(info, 0, sizeof(*info)); 7243 7244 // Check if measurement function is available 7245 if (!dc->hwss.measure_memory_qos) { 7246 return false; 7247 } 7248 7249 // Call unified measurement function 7250 dc->hwss.measure_memory_qos(dc, &qos); 7251 7252 // Populate info from measured qos 7253 info->actual_peak_bw_in_mbps = qos.peak_bw_mbps; 7254 info->actual_avg_bw_in_mbps = qos.avg_bw_mbps; 7255 info->actual_min_latency_in_ns = qos.min_latency_ns; 7256 info->actual_max_latency_in_ns = qos.max_latency_ns; 7257 info->actual_avg_latency_in_ns = qos.avg_latency_ns; 7258 info->dcn_bandwidth_ub_in_mbps = (uint32_t)(clk->fclk_khz / 1000 * 64); 7259 7260 return true; 7261 } 7262 7263 enum update_v3_flow { 7264 UPDATE_V3_FLOW_INVALID, 7265 UPDATE_V3_FLOW_NO_NEW_CONTEXT_CONTEXT_FAST, 7266 UPDATE_V3_FLOW_NO_NEW_CONTEXT_CONTEXT_FULL, 7267 UPDATE_V3_FLOW_NEW_CONTEXT_SEAMLESS, 7268 UPDATE_V3_FLOW_NEW_CONTEXT_MINIMAL_NEW, 7269 UPDATE_V3_FLOW_NEW_CONTEXT_MINIMAL_CURRENT, 7270 }; 7271 7272 struct dc_update_scratch_space { 7273 struct dc *dc; 7274 struct dc_surface_update *surface_updates; 7275 int surface_count; 7276 struct dc_stream_state *stream; 7277 struct dc_stream_update *stream_update; 7278 bool update_v3; 7279 bool do_clear_update_flags; 7280 enum surface_update_type update_type; 7281 struct dc_state *new_context; 7282 enum update_v3_flow flow; 7283 struct dc_state *backup_context; 7284 struct dc_state *intermediate_context; 7285 struct pipe_split_policy_backup intermediate_policy; 7286 struct dc_surface_update intermediate_updates[MAX_SURFACES]; 7287 int intermediate_count; 7288 }; 7289 7290 size_t dc_update_scratch_space_size(void) 7291 { 7292 return sizeof(struct dc_update_scratch_space); 7293 } 7294 7295 static bool update_planes_and_stream_prepare_v2( 7296 struct dc_update_scratch_space *scratch 7297 ) 7298 { 7299 // v2 is too tangled to break into stages, so just execute everything under lock 7300 dc_exit_ips_for_hw_access(scratch->dc); 7301 return update_planes_and_stream_v2( 7302 scratch->dc, 7303 scratch->surface_updates, 7304 scratch->surface_count, 7305 scratch->stream, 7306 scratch->stream_update 7307 ); 7308 } 7309 7310 static void update_planes_and_stream_execute_v2( 7311 const struct dc_update_scratch_space *scratch 7312 ) 7313 { 7314 // Nothing to do, see `update_planes_and_stream_prepare_v2` 7315 (void) scratch; 7316 } 7317 7318 static bool update_planes_and_stream_cleanup_v2( 7319 const struct dc_update_scratch_space *scratch 7320 ) 7321 { 7322 if (scratch->do_clear_update_flags) 7323 clear_update_flags(scratch->surface_updates, scratch->surface_count, scratch->stream); 7324 7325 return false; 7326 } 7327 7328 static void update_planes_and_stream_cleanup_v3_release_minimal( 7329 struct dc_update_scratch_space *scratch, 7330 bool backup 7331 ); 7332 7333 static bool update_planes_and_stream_prepare_v3_intermediate_seamless( 7334 struct dc_update_scratch_space *scratch 7335 ) 7336 { 7337 return is_pipe_topology_transition_seamless_with_intermediate_step( 7338 scratch->dc, 7339 scratch->dc->current_state, 7340 scratch->intermediate_context, 7341 scratch->new_context 7342 ); 7343 } 7344 7345 static void transition_countdown_init(struct dc *dc) 7346 { 7347 dc->check_config.transition_countdown_to_steady_state = 7348 dc->debug.num_fast_flips_to_steady_state_override ? 7349 dc->debug.num_fast_flips_to_steady_state_override : 7350 NUM_FAST_FLIPS_TO_STEADY_STATE; 7351 } 7352 7353 static bool update_planes_and_stream_prepare_v3( 7354 struct dc_update_scratch_space *scratch 7355 ) 7356 { 7357 if (scratch->flow == UPDATE_V3_FLOW_NEW_CONTEXT_SEAMLESS) { 7358 return true; 7359 } 7360 ASSERT(scratch->flow == UPDATE_V3_FLOW_INVALID); 7361 dc_exit_ips_for_hw_access(scratch->dc); 7362 7363 /* HWSS path determination needs to be done prior to updating the surface and stream states. */ 7364 struct dc_fast_update fast_update[MAX_SURFACES] = { 0 }; 7365 7366 populate_fast_updates(fast_update, 7367 scratch->surface_updates, 7368 scratch->surface_count, 7369 scratch->stream_update); 7370 7371 const bool is_hwss_fast_path_only = 7372 fast_update_only(scratch->dc, 7373 fast_update, 7374 scratch->surface_updates, 7375 scratch->surface_count, 7376 scratch->stream_update, 7377 scratch->stream) && 7378 !scratch->dc->check_config.enable_legacy_fast_update; 7379 7380 if (!update_planes_and_stream_state( 7381 scratch->dc, 7382 scratch->surface_updates, 7383 scratch->surface_count, 7384 scratch->stream, 7385 scratch->stream_update, 7386 &scratch->update_type, 7387 &scratch->new_context 7388 )) { 7389 return false; 7390 } 7391 7392 if (scratch->new_context == scratch->dc->current_state) { 7393 ASSERT(scratch->update_type < UPDATE_TYPE_FULL); 7394 7395 scratch->flow = is_hwss_fast_path_only 7396 ? UPDATE_V3_FLOW_NO_NEW_CONTEXT_CONTEXT_FAST 7397 : UPDATE_V3_FLOW_NO_NEW_CONTEXT_CONTEXT_FULL; 7398 return true; 7399 } 7400 7401 ASSERT(scratch->update_type >= UPDATE_TYPE_FULL); 7402 7403 const bool seamless = scratch->dc->hwss.is_pipe_topology_transition_seamless( 7404 scratch->dc, 7405 scratch->dc->current_state, 7406 scratch->new_context 7407 ); 7408 if (seamless) { 7409 scratch->flow = UPDATE_V3_FLOW_NEW_CONTEXT_SEAMLESS; 7410 if (scratch->dc->check_config.deferred_transition_state) 7411 /* reset countdown as steady state not reached */ 7412 transition_countdown_init(scratch->dc); 7413 return true; 7414 } 7415 7416 if (!scratch->dc->debug.disable_deferred_minimal_transitions) { 7417 scratch->dc->check_config.deferred_transition_state = true; 7418 transition_countdown_init(scratch->dc); 7419 } 7420 7421 scratch->intermediate_context = create_minimal_transition_state( 7422 scratch->dc, 7423 scratch->new_context, 7424 &scratch->intermediate_policy 7425 ); 7426 if (scratch->intermediate_context) { 7427 if (update_planes_and_stream_prepare_v3_intermediate_seamless(scratch)) { 7428 scratch->flow = UPDATE_V3_FLOW_NEW_CONTEXT_MINIMAL_NEW; 7429 return true; 7430 } 7431 7432 update_planes_and_stream_cleanup_v3_release_minimal(scratch, false); 7433 } 7434 7435 scratch->backup_context = scratch->dc->current_state; 7436 restore_planes_and_stream_state(&scratch->dc->scratch.current_state, scratch->stream); 7437 dc_state_retain(scratch->backup_context); 7438 scratch->intermediate_context = create_minimal_transition_state( 7439 scratch->dc, 7440 scratch->backup_context, 7441 &scratch->intermediate_policy 7442 ); 7443 if (scratch->intermediate_context) { 7444 if (update_planes_and_stream_prepare_v3_intermediate_seamless(scratch)) { 7445 scratch->flow = UPDATE_V3_FLOW_NEW_CONTEXT_MINIMAL_CURRENT; 7446 scratch->intermediate_count = initialize_empty_surface_updates( 7447 scratch->stream, scratch->intermediate_updates 7448 ); 7449 return true; 7450 } 7451 7452 update_planes_and_stream_cleanup_v3_release_minimal(scratch, true); 7453 } 7454 7455 scratch->flow = UPDATE_V3_FLOW_INVALID; 7456 dc_state_release(scratch->backup_context); 7457 restore_planes_and_stream_state(&scratch->dc->scratch.new_state, scratch->stream); 7458 return false; 7459 } 7460 7461 static void update_planes_and_stream_execute_v3_commit( 7462 const struct dc_update_scratch_space *scratch, 7463 bool intermediate_update, 7464 bool intermediate_context, 7465 bool use_stream_update 7466 ) 7467 { 7468 commit_planes_for_stream( 7469 scratch->dc, 7470 intermediate_update ? scratch->intermediate_updates : scratch->surface_updates, 7471 intermediate_update ? scratch->intermediate_count : scratch->surface_count, 7472 scratch->stream, 7473 use_stream_update ? scratch->stream_update : NULL, 7474 intermediate_context ? UPDATE_TYPE_FULL : scratch->update_type, 7475 // `dc->current_state` only used in `NO_NEW_CONTEXT`, where it is equal to `new_context` 7476 intermediate_context ? scratch->intermediate_context : scratch->new_context 7477 ); 7478 } 7479 7480 static void update_planes_and_stream_execute_v3( 7481 const struct dc_update_scratch_space *scratch 7482 ) 7483 { 7484 switch (scratch->flow) { 7485 case UPDATE_V3_FLOW_NO_NEW_CONTEXT_CONTEXT_FAST: 7486 commit_planes_for_stream_fast( 7487 scratch->dc, 7488 scratch->surface_updates, 7489 scratch->surface_count, 7490 scratch->stream, 7491 scratch->stream_update, 7492 scratch->update_type, 7493 scratch->new_context 7494 ); 7495 break; 7496 7497 case UPDATE_V3_FLOW_NO_NEW_CONTEXT_CONTEXT_FULL: 7498 case UPDATE_V3_FLOW_NEW_CONTEXT_SEAMLESS: 7499 update_planes_and_stream_execute_v3_commit(scratch, false, false, true); 7500 break; 7501 7502 case UPDATE_V3_FLOW_NEW_CONTEXT_MINIMAL_NEW: 7503 update_planes_and_stream_execute_v3_commit(scratch, false, true, 7504 scratch->dc->check_config.deferred_transition_state); 7505 break; 7506 7507 case UPDATE_V3_FLOW_NEW_CONTEXT_MINIMAL_CURRENT: 7508 update_planes_and_stream_execute_v3_commit(scratch, true, true, false); 7509 break; 7510 7511 case UPDATE_V3_FLOW_INVALID: 7512 default: 7513 ASSERT(false); 7514 } 7515 } 7516 7517 static void update_planes_and_stream_cleanup_v3_release_minimal( 7518 struct dc_update_scratch_space *scratch, 7519 bool backup 7520 ) 7521 { 7522 release_minimal_transition_state( 7523 scratch->dc, 7524 scratch->intermediate_context, 7525 backup ? scratch->backup_context : scratch->new_context, 7526 &scratch->intermediate_policy 7527 ); 7528 } 7529 7530 static void update_planes_and_stream_cleanup_v3_intermediate( 7531 struct dc_update_scratch_space *scratch, 7532 bool backup 7533 ) 7534 { 7535 swap_and_release_current_context(scratch->dc, scratch->intermediate_context, scratch->stream); 7536 dc_state_retain(scratch->dc->current_state); 7537 update_planes_and_stream_cleanup_v3_release_minimal(scratch, backup); 7538 } 7539 7540 static bool update_planes_and_stream_cleanup_v3( 7541 struct dc_update_scratch_space *scratch 7542 ) 7543 { 7544 switch (scratch->flow) { 7545 case UPDATE_V3_FLOW_NO_NEW_CONTEXT_CONTEXT_FAST: 7546 case UPDATE_V3_FLOW_NO_NEW_CONTEXT_CONTEXT_FULL: 7547 if (scratch->dc->check_config.transition_countdown_to_steady_state) 7548 scratch->dc->check_config.transition_countdown_to_steady_state--; 7549 break; 7550 7551 case UPDATE_V3_FLOW_NEW_CONTEXT_SEAMLESS: 7552 swap_and_release_current_context(scratch->dc, scratch->new_context, scratch->stream); 7553 break; 7554 7555 case UPDATE_V3_FLOW_NEW_CONTEXT_MINIMAL_NEW: 7556 update_planes_and_stream_cleanup_v3_intermediate(scratch, false); 7557 if (scratch->dc->check_config.deferred_transition_state) { 7558 dc_state_release(scratch->new_context); 7559 } else { 7560 scratch->flow = UPDATE_V3_FLOW_NEW_CONTEXT_SEAMLESS; 7561 return true; 7562 } 7563 break; 7564 7565 case UPDATE_V3_FLOW_NEW_CONTEXT_MINIMAL_CURRENT: 7566 update_planes_and_stream_cleanup_v3_intermediate(scratch, true); 7567 dc_state_release(scratch->backup_context); 7568 restore_planes_and_stream_state(&scratch->dc->scratch.new_state, scratch->stream); 7569 scratch->flow = UPDATE_V3_FLOW_NEW_CONTEXT_SEAMLESS; 7570 return true; 7571 7572 case UPDATE_V3_FLOW_INVALID: 7573 default: 7574 ASSERT(false); 7575 } 7576 7577 if (scratch->do_clear_update_flags) 7578 clear_update_flags(scratch->surface_updates, scratch->surface_count, scratch->stream); 7579 7580 return false; 7581 } 7582 7583 struct dc_update_scratch_space *dc_update_planes_and_stream_init( 7584 struct dc *dc, 7585 struct dc_surface_update *surface_updates, 7586 int surface_count, 7587 struct dc_stream_state *stream, 7588 struct dc_stream_update *stream_update 7589 ) 7590 { 7591 const enum dce_version version = dc->ctx->dce_version; 7592 struct dc_update_scratch_space *scratch = stream->update_scratch; 7593 7594 *scratch = (struct dc_update_scratch_space){ 7595 .dc = dc, 7596 .surface_updates = surface_updates, 7597 .surface_count = surface_count, 7598 .stream = stream, 7599 .stream_update = stream_update, 7600 .update_v3 = version >= DCN_VERSION_4_01 || version == DCN_VERSION_3_2 || version == DCN_VERSION_3_21, 7601 .do_clear_update_flags = version >= DCN_VERSION_1_0, 7602 }; 7603 7604 return scratch; 7605 } 7606 7607 bool dc_update_planes_and_stream_prepare( 7608 struct dc_update_scratch_space *scratch 7609 ) 7610 { 7611 return scratch->update_v3 7612 ? update_planes_and_stream_prepare_v3(scratch) 7613 : update_planes_and_stream_prepare_v2(scratch); 7614 } 7615 7616 void dc_update_planes_and_stream_execute( 7617 const struct dc_update_scratch_space *scratch 7618 ) 7619 { 7620 scratch->update_v3 7621 ? update_planes_and_stream_execute_v3(scratch) 7622 : update_planes_and_stream_execute_v2(scratch); 7623 } 7624 7625 bool dc_update_planes_and_stream_cleanup( 7626 struct dc_update_scratch_space *scratch 7627 ) 7628 { 7629 return scratch->update_v3 7630 ? update_planes_and_stream_cleanup_v3(scratch) 7631 : update_planes_and_stream_cleanup_v2(scratch); 7632 } 7633 7634