1 /* 2 * Copyright 2015 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: AMD 23 */ 24 25 #include "dm_services.h" 26 27 #include "amdgpu.h" 28 29 #include "dc.h" 30 31 #include "core_status.h" 32 #include "core_types.h" 33 #include "hw_sequencer.h" 34 #include "dce/dce_hwseq.h" 35 36 #include "resource.h" 37 #include "dc_state.h" 38 #include "dc_state_priv.h" 39 #include "dc_plane_priv.h" 40 41 #include "gpio_service_interface.h" 42 #include "clk_mgr.h" 43 #include "clock_source.h" 44 #include "dc_bios_types.h" 45 46 #include "bios_parser_interface.h" 47 #include "bios/bios_parser_helper.h" 48 #include "include/irq_service_interface.h" 49 #include "transform.h" 50 #include "dmcu.h" 51 #include "dpp.h" 52 #include "timing_generator.h" 53 #include "abm.h" 54 #include "virtual/virtual_link_encoder.h" 55 #include "hubp.h" 56 57 #include "link_hwss.h" 58 #include "link_encoder.h" 59 #include "link_enc_cfg.h" 60 61 #include "link.h" 62 #include "dm_helpers.h" 63 #include "mem_input.h" 64 65 #include "dc_dmub_srv.h" 66 67 #include "dsc.h" 68 69 #include "vm_helper.h" 70 71 #include "dce/dce_i2c.h" 72 73 #include "dmub/dmub_srv.h" 74 75 #include "dce/dmub_psr.h" 76 77 #include "dce/dmub_hw_lock_mgr.h" 78 79 #include "dc_trace.h" 80 81 #include "hw_sequencer_private.h" 82 83 #if defined(CONFIG_DRM_AMD_DC_FP) 84 #include "dml2/dml2_internal_types.h" 85 #endif 86 87 #include "dce/dmub_outbox.h" 88 89 #define CTX \ 90 dc->ctx 91 92 #define DC_LOGGER \ 93 dc->ctx->logger 94 95 static const char DC_BUILD_ID[] = "production-build"; 96 97 /** 98 * DOC: Overview 99 * 100 * DC is the OS-agnostic component of the amdgpu DC driver. 101 * 102 * DC maintains and validates a set of structs representing the state of the 103 * driver and writes that state to AMD hardware 104 * 105 * Main DC HW structs: 106 * 107 * struct dc - The central struct. One per driver. Created on driver load, 108 * destroyed on driver unload. 109 * 110 * struct dc_context - One per driver. 111 * Used as a backpointer by most other structs in dc. 112 * 113 * struct dc_link - One per connector (the physical DP, HDMI, miniDP, or eDP 114 * plugpoints). Created on driver load, destroyed on driver unload. 115 * 116 * struct dc_sink - One per display. Created on boot or hotplug. 117 * Destroyed on shutdown or hotunplug. A dc_link can have a local sink 118 * (the display directly attached). It may also have one or more remote 119 * sinks (in the Multi-Stream Transport case) 120 * 121 * struct resource_pool - One per driver. Represents the hw blocks not in the 122 * main pipeline. Not directly accessible by dm. 123 * 124 * Main dc state structs: 125 * 126 * These structs can be created and destroyed as needed. There is a full set of 127 * these structs in dc->current_state representing the currently programmed state. 128 * 129 * struct dc_state - The global DC state to track global state information, 130 * such as bandwidth values. 131 * 132 * struct dc_stream_state - Represents the hw configuration for the pipeline from 133 * a framebuffer to a display. Maps one-to-one with dc_sink. 134 * 135 * struct dc_plane_state - Represents a framebuffer. Each stream has at least one, 136 * and may have more in the Multi-Plane Overlay case. 137 * 138 * struct resource_context - Represents the programmable state of everything in 139 * the resource_pool. Not directly accessible by dm. 140 * 141 * struct pipe_ctx - A member of struct resource_context. Represents the 142 * internal hardware pipeline components. Each dc_plane_state has either 143 * one or two (in the pipe-split case). 144 */ 145 146 /* Private functions */ 147 148 static inline void elevate_update_type(enum surface_update_type *original, enum surface_update_type new) 149 { 150 if (new > *original) 151 *original = new; 152 } 153 154 static void destroy_links(struct dc *dc) 155 { 156 uint32_t i; 157 158 for (i = 0; i < dc->link_count; i++) { 159 if (NULL != dc->links[i]) 160 dc->link_srv->destroy_link(&dc->links[i]); 161 } 162 } 163 164 static uint32_t get_num_of_internal_disp(struct dc_link **links, uint32_t num_links) 165 { 166 int i; 167 uint32_t count = 0; 168 169 for (i = 0; i < num_links; i++) { 170 if (links[i]->connector_signal == SIGNAL_TYPE_EDP || 171 links[i]->is_internal_display) 172 count++; 173 } 174 175 return count; 176 } 177 178 static int get_seamless_boot_stream_count(struct dc_state *ctx) 179 { 180 uint8_t i; 181 uint8_t seamless_boot_stream_count = 0; 182 183 for (i = 0; i < ctx->stream_count; i++) 184 if (ctx->streams[i]->apply_seamless_boot_optimization) 185 seamless_boot_stream_count++; 186 187 return seamless_boot_stream_count; 188 } 189 190 static bool create_links( 191 struct dc *dc, 192 uint32_t num_virtual_links) 193 { 194 int i; 195 int connectors_num; 196 struct dc_bios *bios = dc->ctx->dc_bios; 197 198 dc->link_count = 0; 199 200 connectors_num = bios->funcs->get_connectors_number(bios); 201 202 DC_LOG_DC("BIOS object table - number of connectors: %d", connectors_num); 203 204 if (connectors_num > ENUM_ID_COUNT) { 205 dm_error( 206 "DC: Number of connectors %d exceeds maximum of %d!\n", 207 connectors_num, 208 ENUM_ID_COUNT); 209 return false; 210 } 211 212 dm_output_to_console( 213 "DC: %s: connectors_num: physical:%d, virtual:%d\n", 214 __func__, 215 connectors_num, 216 num_virtual_links); 217 218 // condition loop on link_count to allow skipping invalid indices 219 for (i = 0; dc->link_count < connectors_num && i < MAX_LINKS; i++) { 220 struct link_init_data link_init_params = {0}; 221 struct dc_link *link; 222 223 DC_LOG_DC("BIOS object table - printing link object info for connector number: %d, link_index: %d", i, dc->link_count); 224 225 link_init_params.ctx = dc->ctx; 226 /* next BIOS object table connector */ 227 link_init_params.connector_index = i; 228 link_init_params.link_index = dc->link_count; 229 link_init_params.dc = dc; 230 link = dc->link_srv->create_link(&link_init_params); 231 232 if (link) { 233 dc->links[dc->link_count] = link; 234 link->dc = dc; 235 ++dc->link_count; 236 } 237 } 238 239 DC_LOG_DC("BIOS object table - end"); 240 241 /* Create a link for each usb4 dpia port */ 242 for (i = 0; i < dc->res_pool->usb4_dpia_count; i++) { 243 struct link_init_data link_init_params = {0}; 244 struct dc_link *link; 245 246 link_init_params.ctx = dc->ctx; 247 link_init_params.connector_index = i; 248 link_init_params.link_index = dc->link_count; 249 link_init_params.dc = dc; 250 link_init_params.is_dpia_link = true; 251 252 link = dc->link_srv->create_link(&link_init_params); 253 if (link) { 254 dc->links[dc->link_count] = link; 255 link->dc = dc; 256 ++dc->link_count; 257 } 258 } 259 260 for (i = 0; i < num_virtual_links; i++) { 261 struct dc_link *link = kzalloc(sizeof(*link), GFP_KERNEL); 262 struct encoder_init_data enc_init = {0}; 263 264 if (link == NULL) { 265 BREAK_TO_DEBUGGER(); 266 goto failed_alloc; 267 } 268 269 link->link_index = dc->link_count; 270 dc->links[dc->link_count] = link; 271 dc->link_count++; 272 273 link->ctx = dc->ctx; 274 link->dc = dc; 275 link->connector_signal = SIGNAL_TYPE_VIRTUAL; 276 link->link_id.type = OBJECT_TYPE_CONNECTOR; 277 link->link_id.id = CONNECTOR_ID_VIRTUAL; 278 link->link_id.enum_id = ENUM_ID_1; 279 link->link_enc = kzalloc(sizeof(*link->link_enc), GFP_KERNEL); 280 281 if (!link->link_enc) { 282 BREAK_TO_DEBUGGER(); 283 goto failed_alloc; 284 } 285 286 link->link_status.dpcd_caps = &link->dpcd_caps; 287 288 enc_init.ctx = dc->ctx; 289 enc_init.channel = CHANNEL_ID_UNKNOWN; 290 enc_init.hpd_source = HPD_SOURCEID_UNKNOWN; 291 enc_init.transmitter = TRANSMITTER_UNKNOWN; 292 enc_init.connector = link->link_id; 293 enc_init.encoder.type = OBJECT_TYPE_ENCODER; 294 enc_init.encoder.id = ENCODER_ID_INTERNAL_VIRTUAL; 295 enc_init.encoder.enum_id = ENUM_ID_1; 296 virtual_link_encoder_construct(link->link_enc, &enc_init); 297 } 298 299 dc->caps.num_of_internal_disp = get_num_of_internal_disp(dc->links, dc->link_count); 300 301 return true; 302 303 failed_alloc: 304 return false; 305 } 306 307 /* Create additional DIG link encoder objects if fewer than the platform 308 * supports were created during link construction. This can happen if the 309 * number of physical connectors is less than the number of DIGs. 310 */ 311 static bool create_link_encoders(struct dc *dc) 312 { 313 bool res = true; 314 unsigned int num_usb4_dpia = dc->res_pool->res_cap->num_usb4_dpia; 315 unsigned int num_dig_link_enc = dc->res_pool->res_cap->num_dig_link_enc; 316 int i; 317 318 /* A platform without USB4 DPIA endpoints has a fixed mapping between DIG 319 * link encoders and physical display endpoints and does not require 320 * additional link encoder objects. 321 */ 322 if (num_usb4_dpia == 0) 323 return res; 324 325 /* Create as many link encoder objects as the platform supports. DPIA 326 * endpoints can be programmably mapped to any DIG. 327 */ 328 if (num_dig_link_enc > dc->res_pool->dig_link_enc_count) { 329 for (i = 0; i < num_dig_link_enc; i++) { 330 struct link_encoder *link_enc = dc->res_pool->link_encoders[i]; 331 332 if (!link_enc && dc->res_pool->funcs->link_enc_create_minimal) { 333 link_enc = dc->res_pool->funcs->link_enc_create_minimal(dc->ctx, 334 (enum engine_id)(ENGINE_ID_DIGA + i)); 335 if (link_enc) { 336 dc->res_pool->link_encoders[i] = link_enc; 337 dc->res_pool->dig_link_enc_count++; 338 } else { 339 res = false; 340 } 341 } 342 } 343 } 344 345 return res; 346 } 347 348 /* Destroy any additional DIG link encoder objects created by 349 * create_link_encoders(). 350 * NB: Must only be called after destroy_links(). 351 */ 352 static void destroy_link_encoders(struct dc *dc) 353 { 354 unsigned int num_usb4_dpia; 355 unsigned int num_dig_link_enc; 356 int i; 357 358 if (!dc->res_pool) 359 return; 360 361 num_usb4_dpia = dc->res_pool->res_cap->num_usb4_dpia; 362 num_dig_link_enc = dc->res_pool->res_cap->num_dig_link_enc; 363 364 /* A platform without USB4 DPIA endpoints has a fixed mapping between DIG 365 * link encoders and physical display endpoints and does not require 366 * additional link encoder objects. 367 */ 368 if (num_usb4_dpia == 0) 369 return; 370 371 for (i = 0; i < num_dig_link_enc; i++) { 372 struct link_encoder *link_enc = dc->res_pool->link_encoders[i]; 373 374 if (link_enc) { 375 link_enc->funcs->destroy(&link_enc); 376 dc->res_pool->link_encoders[i] = NULL; 377 dc->res_pool->dig_link_enc_count--; 378 } 379 } 380 } 381 382 static struct dc_perf_trace *dc_perf_trace_create(void) 383 { 384 return kzalloc(sizeof(struct dc_perf_trace), GFP_KERNEL); 385 } 386 387 static void dc_perf_trace_destroy(struct dc_perf_trace **perf_trace) 388 { 389 kfree(*perf_trace); 390 *perf_trace = NULL; 391 } 392 393 static bool set_long_vtotal(struct dc *dc, struct dc_stream_state *stream, struct dc_crtc_timing_adjust *adjust) 394 { 395 if (!dc || !stream || !adjust) 396 return false; 397 398 if (!dc->current_state) 399 return false; 400 401 int i; 402 403 for (i = 0; i < MAX_PIPES; i++) { 404 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 405 406 if (pipe->stream == stream && pipe->stream_res.tg) { 407 if (dc->hwss.set_long_vtotal) 408 dc->hwss.set_long_vtotal(&pipe, 1, adjust->v_total_min, adjust->v_total_max); 409 410 return true; 411 } 412 } 413 414 return false; 415 } 416 417 /** 418 * dc_stream_adjust_vmin_vmax - look up pipe context & update parts of DRR 419 * @dc: dc reference 420 * @stream: Initial dc stream state 421 * @adjust: Updated parameters for vertical_total_min and vertical_total_max 422 * 423 * Looks up the pipe context of dc_stream_state and updates the 424 * vertical_total_min and vertical_total_max of the DRR, Dynamic Refresh 425 * Rate, which is a power-saving feature that targets reducing panel 426 * refresh rate while the screen is static 427 * 428 * Return: %true if the pipe context is found and adjusted; 429 * %false if the pipe context is not found. 430 */ 431 bool dc_stream_adjust_vmin_vmax(struct dc *dc, 432 struct dc_stream_state *stream, 433 struct dc_crtc_timing_adjust *adjust) 434 { 435 int i; 436 437 /* 438 * Don't adjust DRR while there's bandwidth optimizations pending to 439 * avoid conflicting with firmware updates. 440 */ 441 if (dc->ctx->dce_version > DCE_VERSION_MAX) 442 if (dc->optimized_required || dc->wm_optimized_required) 443 return false; 444 445 dc_exit_ips_for_hw_access(dc); 446 447 stream->adjust.v_total_max = adjust->v_total_max; 448 stream->adjust.v_total_mid = adjust->v_total_mid; 449 stream->adjust.v_total_mid_frame_num = adjust->v_total_mid_frame_num; 450 stream->adjust.v_total_min = adjust->v_total_min; 451 stream->adjust.allow_otg_v_count_halt = adjust->allow_otg_v_count_halt; 452 453 if (dc->caps.max_v_total != 0 && 454 (adjust->v_total_max > dc->caps.max_v_total || adjust->v_total_min > dc->caps.max_v_total)) { 455 if (adjust->allow_otg_v_count_halt) 456 return set_long_vtotal(dc, stream, adjust); 457 else 458 return false; 459 } 460 461 for (i = 0; i < MAX_PIPES; i++) { 462 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 463 464 if (pipe->stream == stream && pipe->stream_res.tg) { 465 dc->hwss.set_drr(&pipe, 466 1, 467 *adjust); 468 469 return true; 470 } 471 } 472 return false; 473 } 474 475 /** 476 * dc_stream_get_last_used_drr_vtotal - Looks up the pipe context of 477 * dc_stream_state and gets the last VTOTAL used by DRR (Dynamic Refresh Rate) 478 * 479 * @dc: [in] dc reference 480 * @stream: [in] Initial dc stream state 481 * @refresh_rate: [in] new refresh_rate 482 * 483 * Return: %true if the pipe context is found and there is an associated 484 * timing_generator for the DC; 485 * %false if the pipe context is not found or there is no 486 * timing_generator for the DC. 487 */ 488 bool dc_stream_get_last_used_drr_vtotal(struct dc *dc, 489 struct dc_stream_state *stream, 490 uint32_t *refresh_rate) 491 { 492 bool status = false; 493 494 int i = 0; 495 496 dc_exit_ips_for_hw_access(dc); 497 498 for (i = 0; i < MAX_PIPES; i++) { 499 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 500 501 if (pipe->stream == stream && pipe->stream_res.tg) { 502 /* Only execute if a function pointer has been defined for 503 * the DC version in question 504 */ 505 if (pipe->stream_res.tg->funcs->get_last_used_drr_vtotal) { 506 pipe->stream_res.tg->funcs->get_last_used_drr_vtotal(pipe->stream_res.tg, refresh_rate); 507 508 status = true; 509 510 break; 511 } 512 } 513 } 514 515 return status; 516 } 517 518 bool dc_stream_get_crtc_position(struct dc *dc, 519 struct dc_stream_state **streams, int num_streams, 520 unsigned int *v_pos, unsigned int *nom_v_pos) 521 { 522 /* TODO: Support multiple streams */ 523 const struct dc_stream_state *stream = streams[0]; 524 int i; 525 bool ret = false; 526 struct crtc_position position; 527 528 dc_exit_ips_for_hw_access(dc); 529 530 for (i = 0; i < MAX_PIPES; i++) { 531 struct pipe_ctx *pipe = 532 &dc->current_state->res_ctx.pipe_ctx[i]; 533 534 if (pipe->stream == stream && pipe->stream_res.stream_enc) { 535 dc->hwss.get_position(&pipe, 1, &position); 536 537 *v_pos = position.vertical_count; 538 *nom_v_pos = position.nominal_vcount; 539 ret = true; 540 } 541 } 542 return ret; 543 } 544 545 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 546 static inline void 547 dc_stream_forward_dmub_crc_window(struct dc_dmub_srv *dmub_srv, 548 struct rect *rect, struct otg_phy_mux *mux_mapping, bool is_stop) 549 { 550 union dmub_rb_cmd cmd = {0}; 551 552 cmd.secure_display.roi_info.phy_id = mux_mapping->phy_output_num; 553 cmd.secure_display.roi_info.otg_id = mux_mapping->otg_output_num; 554 555 if (is_stop) { 556 cmd.secure_display.header.type = DMUB_CMD__SECURE_DISPLAY; 557 cmd.secure_display.header.sub_type = DMUB_CMD__SECURE_DISPLAY_CRC_STOP_UPDATE; 558 } else { 559 cmd.secure_display.header.type = DMUB_CMD__SECURE_DISPLAY; 560 cmd.secure_display.header.sub_type = DMUB_CMD__SECURE_DISPLAY_CRC_WIN_NOTIFY; 561 cmd.secure_display.roi_info.x_start = rect->x; 562 cmd.secure_display.roi_info.y_start = rect->y; 563 cmd.secure_display.roi_info.x_end = rect->x + rect->width; 564 cmd.secure_display.roi_info.y_end = rect->y + rect->height; 565 } 566 567 dc_wake_and_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT); 568 } 569 570 static inline void 571 dc_stream_forward_dmcu_crc_window(struct dmcu *dmcu, 572 struct rect *rect, struct otg_phy_mux *mux_mapping, bool is_stop) 573 { 574 if (is_stop) 575 dmcu->funcs->stop_crc_win_update(dmcu, mux_mapping); 576 else 577 dmcu->funcs->forward_crc_window(dmcu, rect, mux_mapping); 578 } 579 580 bool 581 dc_stream_forward_crc_window(struct dc_stream_state *stream, 582 struct rect *rect, bool is_stop) 583 { 584 struct dmcu *dmcu; 585 struct dc_dmub_srv *dmub_srv; 586 struct otg_phy_mux mux_mapping; 587 struct pipe_ctx *pipe; 588 int i; 589 struct dc *dc = stream->ctx->dc; 590 591 for (i = 0; i < MAX_PIPES; i++) { 592 pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 593 if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe) 594 break; 595 } 596 597 /* Stream not found */ 598 if (i == MAX_PIPES) 599 return false; 600 601 mux_mapping.phy_output_num = stream->link->link_enc_hw_inst; 602 mux_mapping.otg_output_num = pipe->stream_res.tg->inst; 603 604 dmcu = dc->res_pool->dmcu; 605 dmub_srv = dc->ctx->dmub_srv; 606 607 /* forward to dmub */ 608 if (dmub_srv) 609 dc_stream_forward_dmub_crc_window(dmub_srv, rect, &mux_mapping, is_stop); 610 /* forward to dmcu */ 611 else if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) 612 dc_stream_forward_dmcu_crc_window(dmcu, rect, &mux_mapping, is_stop); 613 else 614 return false; 615 616 return true; 617 } 618 #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */ 619 620 /** 621 * dc_stream_configure_crc() - Configure CRC capture for the given stream. 622 * @dc: DC Object 623 * @stream: The stream to configure CRC on. 624 * @enable: Enable CRC if true, disable otherwise. 625 * @crc_window: CRC window (x/y start/end) information 626 * @continuous: Capture CRC on every frame if true. Otherwise, only capture 627 * once. 628 * 629 * By default, only CRC0 is configured, and the entire frame is used to 630 * calculate the CRC. 631 * 632 * Return: %false if the stream is not found or CRC capture is not supported; 633 * %true if the stream has been configured. 634 */ 635 bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream, 636 struct crc_params *crc_window, bool enable, bool continuous) 637 { 638 struct pipe_ctx *pipe; 639 struct crc_params param; 640 struct timing_generator *tg; 641 642 pipe = resource_get_otg_master_for_stream( 643 &dc->current_state->res_ctx, stream); 644 645 /* Stream not found */ 646 if (pipe == NULL) 647 return false; 648 649 dc_exit_ips_for_hw_access(dc); 650 651 /* By default, capture the full frame */ 652 param.windowa_x_start = 0; 653 param.windowa_y_start = 0; 654 param.windowa_x_end = pipe->stream->timing.h_addressable; 655 param.windowa_y_end = pipe->stream->timing.v_addressable; 656 param.windowb_x_start = 0; 657 param.windowb_y_start = 0; 658 param.windowb_x_end = pipe->stream->timing.h_addressable; 659 param.windowb_y_end = pipe->stream->timing.v_addressable; 660 661 if (crc_window) { 662 param.windowa_x_start = crc_window->windowa_x_start; 663 param.windowa_y_start = crc_window->windowa_y_start; 664 param.windowa_x_end = crc_window->windowa_x_end; 665 param.windowa_y_end = crc_window->windowa_y_end; 666 param.windowb_x_start = crc_window->windowb_x_start; 667 param.windowb_y_start = crc_window->windowb_y_start; 668 param.windowb_x_end = crc_window->windowb_x_end; 669 param.windowb_y_end = crc_window->windowb_y_end; 670 } 671 672 param.dsc_mode = pipe->stream->timing.flags.DSC ? 1:0; 673 param.odm_mode = pipe->next_odm_pipe ? 1:0; 674 675 /* Default to the union of both windows */ 676 param.selection = UNION_WINDOW_A_B; 677 param.continuous_mode = continuous; 678 param.enable = enable; 679 680 tg = pipe->stream_res.tg; 681 682 /* Only call if supported */ 683 if (tg->funcs->configure_crc) 684 return tg->funcs->configure_crc(tg, ¶m); 685 DC_LOG_WARNING("CRC capture not supported."); 686 return false; 687 } 688 689 /** 690 * dc_stream_get_crc() - Get CRC values for the given stream. 691 * 692 * @dc: DC object. 693 * @stream: The DC stream state of the stream to get CRCs from. 694 * @r_cr: CRC value for the red component. 695 * @g_y: CRC value for the green component. 696 * @b_cb: CRC value for the blue component. 697 * 698 * dc_stream_configure_crc needs to be called beforehand to enable CRCs. 699 * 700 * Return: 701 * %false if stream is not found, or if CRCs are not enabled. 702 */ 703 bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream, 704 uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb) 705 { 706 int i; 707 struct pipe_ctx *pipe; 708 struct timing_generator *tg; 709 710 dc_exit_ips_for_hw_access(dc); 711 712 for (i = 0; i < MAX_PIPES; i++) { 713 pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 714 if (pipe->stream == stream) 715 break; 716 } 717 /* Stream not found */ 718 if (i == MAX_PIPES) 719 return false; 720 721 tg = pipe->stream_res.tg; 722 723 if (tg->funcs->get_crc) 724 return tg->funcs->get_crc(tg, r_cr, g_y, b_cb); 725 DC_LOG_WARNING("CRC capture not supported."); 726 return false; 727 } 728 729 void dc_stream_set_dyn_expansion(struct dc *dc, struct dc_stream_state *stream, 730 enum dc_dynamic_expansion option) 731 { 732 /* OPP FMT dyn expansion updates*/ 733 int i; 734 struct pipe_ctx *pipe_ctx; 735 736 dc_exit_ips_for_hw_access(dc); 737 738 for (i = 0; i < MAX_PIPES; i++) { 739 if (dc->current_state->res_ctx.pipe_ctx[i].stream 740 == stream) { 741 pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; 742 pipe_ctx->stream_res.opp->dyn_expansion = option; 743 pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion( 744 pipe_ctx->stream_res.opp, 745 COLOR_SPACE_YCBCR601, 746 stream->timing.display_color_depth, 747 stream->signal); 748 } 749 } 750 } 751 752 void dc_stream_set_dither_option(struct dc_stream_state *stream, 753 enum dc_dither_option option) 754 { 755 struct bit_depth_reduction_params params; 756 struct dc_link *link = stream->link; 757 struct pipe_ctx *pipes = NULL; 758 int i; 759 760 for (i = 0; i < MAX_PIPES; i++) { 761 if (link->dc->current_state->res_ctx.pipe_ctx[i].stream == 762 stream) { 763 pipes = &link->dc->current_state->res_ctx.pipe_ctx[i]; 764 break; 765 } 766 } 767 768 if (!pipes) 769 return; 770 if (option > DITHER_OPTION_MAX) 771 return; 772 773 dc_exit_ips_for_hw_access(stream->ctx->dc); 774 775 stream->dither_option = option; 776 777 memset(¶ms, 0, sizeof(params)); 778 resource_build_bit_depth_reduction_params(stream, ¶ms); 779 stream->bit_depth_params = params; 780 781 if (pipes->plane_res.xfm && 782 pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth) { 783 pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth( 784 pipes->plane_res.xfm, 785 pipes->plane_res.scl_data.lb_params.depth, 786 &stream->bit_depth_params); 787 } 788 789 pipes->stream_res.opp->funcs-> 790 opp_program_bit_depth_reduction(pipes->stream_res.opp, ¶ms); 791 } 792 793 bool dc_stream_set_gamut_remap(struct dc *dc, const struct dc_stream_state *stream) 794 { 795 int i; 796 bool ret = false; 797 struct pipe_ctx *pipes; 798 799 dc_exit_ips_for_hw_access(dc); 800 801 for (i = 0; i < MAX_PIPES; i++) { 802 if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) { 803 pipes = &dc->current_state->res_ctx.pipe_ctx[i]; 804 dc->hwss.program_gamut_remap(pipes); 805 ret = true; 806 } 807 } 808 809 return ret; 810 } 811 812 bool dc_stream_program_csc_matrix(struct dc *dc, struct dc_stream_state *stream) 813 { 814 int i; 815 bool ret = false; 816 struct pipe_ctx *pipes; 817 818 dc_exit_ips_for_hw_access(dc); 819 820 for (i = 0; i < MAX_PIPES; i++) { 821 if (dc->current_state->res_ctx.pipe_ctx[i].stream 822 == stream) { 823 824 pipes = &dc->current_state->res_ctx.pipe_ctx[i]; 825 dc->hwss.program_output_csc(dc, 826 pipes, 827 stream->output_color_space, 828 stream->csc_color_matrix.matrix, 829 pipes->stream_res.opp->inst); 830 ret = true; 831 } 832 } 833 834 return ret; 835 } 836 837 void dc_stream_set_static_screen_params(struct dc *dc, 838 struct dc_stream_state **streams, 839 int num_streams, 840 const struct dc_static_screen_params *params) 841 { 842 int i, j; 843 struct pipe_ctx *pipes_affected[MAX_PIPES]; 844 int num_pipes_affected = 0; 845 846 dc_exit_ips_for_hw_access(dc); 847 848 for (i = 0; i < num_streams; i++) { 849 struct dc_stream_state *stream = streams[i]; 850 851 for (j = 0; j < MAX_PIPES; j++) { 852 if (dc->current_state->res_ctx.pipe_ctx[j].stream 853 == stream) { 854 pipes_affected[num_pipes_affected++] = 855 &dc->current_state->res_ctx.pipe_ctx[j]; 856 } 857 } 858 } 859 860 dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, params); 861 } 862 863 static void dc_destruct(struct dc *dc) 864 { 865 // reset link encoder assignment table on destruct 866 if (dc->res_pool && dc->res_pool->funcs->link_encs_assign) 867 link_enc_cfg_init(dc, dc->current_state); 868 869 if (dc->current_state) { 870 dc_state_release(dc->current_state); 871 dc->current_state = NULL; 872 } 873 874 destroy_links(dc); 875 876 destroy_link_encoders(dc); 877 878 if (dc->clk_mgr) { 879 dc_destroy_clk_mgr(dc->clk_mgr); 880 dc->clk_mgr = NULL; 881 } 882 883 dc_destroy_resource_pool(dc); 884 885 if (dc->link_srv) 886 link_destroy_link_service(&dc->link_srv); 887 888 if (dc->ctx->gpio_service) 889 dal_gpio_service_destroy(&dc->ctx->gpio_service); 890 891 if (dc->ctx->created_bios) 892 dal_bios_parser_destroy(&dc->ctx->dc_bios); 893 894 kfree(dc->ctx->logger); 895 dc_perf_trace_destroy(&dc->ctx->perf_trace); 896 897 kfree(dc->ctx); 898 dc->ctx = NULL; 899 900 kfree(dc->bw_vbios); 901 dc->bw_vbios = NULL; 902 903 kfree(dc->bw_dceip); 904 dc->bw_dceip = NULL; 905 906 kfree(dc->dcn_soc); 907 dc->dcn_soc = NULL; 908 909 kfree(dc->dcn_ip); 910 dc->dcn_ip = NULL; 911 912 kfree(dc->vm_helper); 913 dc->vm_helper = NULL; 914 915 } 916 917 static bool dc_construct_ctx(struct dc *dc, 918 const struct dc_init_data *init_params) 919 { 920 struct dc_context *dc_ctx; 921 922 dc_ctx = kzalloc(sizeof(*dc_ctx), GFP_KERNEL); 923 if (!dc_ctx) 924 return false; 925 926 dc_ctx->cgs_device = init_params->cgs_device; 927 dc_ctx->driver_context = init_params->driver; 928 dc_ctx->dc = dc; 929 dc_ctx->asic_id = init_params->asic_id; 930 dc_ctx->dc_sink_id_count = 0; 931 dc_ctx->dc_stream_id_count = 0; 932 dc_ctx->dce_environment = init_params->dce_environment; 933 dc_ctx->dcn_reg_offsets = init_params->dcn_reg_offsets; 934 dc_ctx->nbio_reg_offsets = init_params->nbio_reg_offsets; 935 dc_ctx->clk_reg_offsets = init_params->clk_reg_offsets; 936 937 /* Create logger */ 938 dc_ctx->logger = kmalloc(sizeof(*dc_ctx->logger), GFP_KERNEL); 939 940 if (!dc_ctx->logger) { 941 kfree(dc_ctx); 942 return false; 943 } 944 945 dc_ctx->logger->dev = adev_to_drm(init_params->driver); 946 dc->dml.logger = dc_ctx->logger; 947 948 dc_ctx->dce_version = resource_parse_asic_id(init_params->asic_id); 949 950 dc_ctx->perf_trace = dc_perf_trace_create(); 951 if (!dc_ctx->perf_trace) { 952 kfree(dc_ctx); 953 ASSERT_CRITICAL(false); 954 return false; 955 } 956 957 dc->ctx = dc_ctx; 958 959 dc->link_srv = link_create_link_service(); 960 if (!dc->link_srv) 961 return false; 962 963 return true; 964 } 965 966 static bool dc_construct(struct dc *dc, 967 const struct dc_init_data *init_params) 968 { 969 struct dc_context *dc_ctx; 970 struct bw_calcs_dceip *dc_dceip; 971 struct bw_calcs_vbios *dc_vbios; 972 struct dcn_soc_bounding_box *dcn_soc; 973 struct dcn_ip_params *dcn_ip; 974 975 dc->config = init_params->flags; 976 977 // Allocate memory for the vm_helper 978 dc->vm_helper = kzalloc(sizeof(struct vm_helper), GFP_KERNEL); 979 if (!dc->vm_helper) { 980 dm_error("%s: failed to create dc->vm_helper\n", __func__); 981 goto fail; 982 } 983 984 memcpy(&dc->bb_overrides, &init_params->bb_overrides, sizeof(dc->bb_overrides)); 985 986 dc_dceip = kzalloc(sizeof(*dc_dceip), GFP_KERNEL); 987 if (!dc_dceip) { 988 dm_error("%s: failed to create dceip\n", __func__); 989 goto fail; 990 } 991 992 dc->bw_dceip = dc_dceip; 993 994 dc_vbios = kzalloc(sizeof(*dc_vbios), GFP_KERNEL); 995 if (!dc_vbios) { 996 dm_error("%s: failed to create vbios\n", __func__); 997 goto fail; 998 } 999 1000 dc->bw_vbios = dc_vbios; 1001 dcn_soc = kzalloc(sizeof(*dcn_soc), GFP_KERNEL); 1002 if (!dcn_soc) { 1003 dm_error("%s: failed to create dcn_soc\n", __func__); 1004 goto fail; 1005 } 1006 1007 dc->dcn_soc = dcn_soc; 1008 1009 dcn_ip = kzalloc(sizeof(*dcn_ip), GFP_KERNEL); 1010 if (!dcn_ip) { 1011 dm_error("%s: failed to create dcn_ip\n", __func__); 1012 goto fail; 1013 } 1014 1015 dc->dcn_ip = dcn_ip; 1016 1017 if (init_params->bb_from_dmub) 1018 dc->dml2_options.bb_from_dmub = init_params->bb_from_dmub; 1019 else 1020 dc->dml2_options.bb_from_dmub = NULL; 1021 1022 if (!dc_construct_ctx(dc, init_params)) { 1023 dm_error("%s: failed to create ctx\n", __func__); 1024 goto fail; 1025 } 1026 1027 dc_ctx = dc->ctx; 1028 1029 /* Resource should construct all asic specific resources. 1030 * This should be the only place where we need to parse the asic id 1031 */ 1032 if (init_params->vbios_override) 1033 dc_ctx->dc_bios = init_params->vbios_override; 1034 else { 1035 /* Create BIOS parser */ 1036 struct bp_init_data bp_init_data; 1037 1038 bp_init_data.ctx = dc_ctx; 1039 bp_init_data.bios = init_params->asic_id.atombios_base_address; 1040 1041 dc_ctx->dc_bios = dal_bios_parser_create( 1042 &bp_init_data, dc_ctx->dce_version); 1043 1044 if (!dc_ctx->dc_bios) { 1045 ASSERT_CRITICAL(false); 1046 goto fail; 1047 } 1048 1049 dc_ctx->created_bios = true; 1050 } 1051 1052 dc->vendor_signature = init_params->vendor_signature; 1053 1054 /* Create GPIO service */ 1055 dc_ctx->gpio_service = dal_gpio_service_create( 1056 dc_ctx->dce_version, 1057 dc_ctx->dce_environment, 1058 dc_ctx); 1059 1060 if (!dc_ctx->gpio_service) { 1061 ASSERT_CRITICAL(false); 1062 goto fail; 1063 } 1064 1065 dc->res_pool = dc_create_resource_pool(dc, init_params, dc_ctx->dce_version); 1066 if (!dc->res_pool) 1067 goto fail; 1068 1069 /* set i2c speed if not done by the respective dcnxxx__resource.c */ 1070 if (dc->caps.i2c_speed_in_khz_hdcp == 0) 1071 dc->caps.i2c_speed_in_khz_hdcp = dc->caps.i2c_speed_in_khz; 1072 if (dc->caps.max_optimizable_video_width == 0) 1073 dc->caps.max_optimizable_video_width = 5120; 1074 dc->clk_mgr = dc_clk_mgr_create(dc->ctx, dc->res_pool->pp_smu, dc->res_pool->dccg); 1075 if (!dc->clk_mgr) 1076 goto fail; 1077 #ifdef CONFIG_DRM_AMD_DC_FP 1078 dc->clk_mgr->force_smu_not_present = init_params->force_smu_not_present; 1079 1080 if (dc->res_pool->funcs->update_bw_bounding_box) { 1081 DC_FP_START(); 1082 dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params); 1083 DC_FP_END(); 1084 } 1085 #endif 1086 1087 if (!create_links(dc, init_params->num_virtual_links)) 1088 goto fail; 1089 1090 /* Create additional DIG link encoder objects if fewer than the platform 1091 * supports were created during link construction. 1092 */ 1093 if (!create_link_encoders(dc)) 1094 goto fail; 1095 1096 /* Creation of current_state must occur after dc->dml 1097 * is initialized in dc_create_resource_pool because 1098 * on creation it copies the contents of dc->dml 1099 */ 1100 dc->current_state = dc_state_create(dc, NULL); 1101 1102 if (!dc->current_state) { 1103 dm_error("%s: failed to create validate ctx\n", __func__); 1104 goto fail; 1105 } 1106 1107 return true; 1108 1109 fail: 1110 return false; 1111 } 1112 1113 static void disable_all_writeback_pipes_for_stream( 1114 const struct dc *dc, 1115 struct dc_stream_state *stream, 1116 struct dc_state *context) 1117 { 1118 int i; 1119 1120 for (i = 0; i < stream->num_wb_info; i++) 1121 stream->writeback_info[i].wb_enabled = false; 1122 } 1123 1124 static void apply_ctx_interdependent_lock(struct dc *dc, 1125 struct dc_state *context, 1126 struct dc_stream_state *stream, 1127 bool lock) 1128 { 1129 int i; 1130 1131 /* Checks if interdependent update function pointer is NULL or not, takes care of DCE110 case */ 1132 if (dc->hwss.interdependent_update_lock) 1133 dc->hwss.interdependent_update_lock(dc, context, lock); 1134 else { 1135 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1136 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 1137 struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; 1138 1139 // Copied conditions that were previously in dce110_apply_ctx_for_surface 1140 if (stream == pipe_ctx->stream) { 1141 if (resource_is_pipe_type(pipe_ctx, OPP_HEAD) && 1142 (pipe_ctx->plane_state || old_pipe_ctx->plane_state)) 1143 dc->hwss.pipe_control_lock(dc, pipe_ctx, lock); 1144 } 1145 } 1146 } 1147 } 1148 1149 static void dc_update_visual_confirm_color(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx) 1150 { 1151 if (dc->ctx->dce_version >= DCN_VERSION_1_0) { 1152 memset(&pipe_ctx->visual_confirm_color, 0, sizeof(struct tg_color)); 1153 1154 if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR) 1155 get_hdr_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color)); 1156 else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE) 1157 get_surface_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color)); 1158 else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SWIZZLE) 1159 get_surface_tile_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color)); 1160 else { 1161 if (dc->ctx->dce_version < DCN_VERSION_2_0) 1162 color_space_to_black_color( 1163 dc, pipe_ctx->stream->output_color_space, &(pipe_ctx->visual_confirm_color)); 1164 } 1165 if (dc->ctx->dce_version >= DCN_VERSION_2_0) { 1166 if (dc->debug.visual_confirm == VISUAL_CONFIRM_MPCTREE) 1167 get_mpctree_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color)); 1168 else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SUBVP) 1169 get_subvp_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color)); 1170 else if (dc->debug.visual_confirm == VISUAL_CONFIRM_MCLK_SWITCH) 1171 get_mclk_switch_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color)); 1172 else if (dc->debug.visual_confirm == VISUAL_CONFIRM_FAMS2) 1173 get_fams2_visual_confirm_color(dc, context, pipe_ctx, &(pipe_ctx->visual_confirm_color)); 1174 } 1175 } 1176 } 1177 1178 static void disable_dangling_plane(struct dc *dc, struct dc_state *context) 1179 { 1180 int i, j; 1181 struct dc_state *dangling_context = dc_state_create_current_copy(dc); 1182 struct dc_state *current_ctx; 1183 struct pipe_ctx *pipe; 1184 struct timing_generator *tg; 1185 1186 if (dangling_context == NULL) 1187 return; 1188 1189 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1190 struct dc_stream_state *old_stream = 1191 dc->current_state->res_ctx.pipe_ctx[i].stream; 1192 bool should_disable = true; 1193 bool pipe_split_change = false; 1194 1195 if ((context->res_ctx.pipe_ctx[i].top_pipe) && 1196 (dc->current_state->res_ctx.pipe_ctx[i].top_pipe)) 1197 pipe_split_change = context->res_ctx.pipe_ctx[i].top_pipe->pipe_idx != 1198 dc->current_state->res_ctx.pipe_ctx[i].top_pipe->pipe_idx; 1199 else 1200 pipe_split_change = context->res_ctx.pipe_ctx[i].top_pipe != 1201 dc->current_state->res_ctx.pipe_ctx[i].top_pipe; 1202 1203 for (j = 0; j < context->stream_count; j++) { 1204 if (old_stream == context->streams[j]) { 1205 should_disable = false; 1206 break; 1207 } 1208 } 1209 if (!should_disable && pipe_split_change && 1210 dc->current_state->stream_count != context->stream_count) 1211 should_disable = true; 1212 1213 if (old_stream && !dc->current_state->res_ctx.pipe_ctx[i].top_pipe && 1214 !dc->current_state->res_ctx.pipe_ctx[i].prev_odm_pipe) { 1215 struct pipe_ctx *old_pipe, *new_pipe; 1216 1217 old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 1218 new_pipe = &context->res_ctx.pipe_ctx[i]; 1219 1220 if (old_pipe->plane_state && !new_pipe->plane_state) 1221 should_disable = true; 1222 } 1223 1224 if (should_disable && old_stream) { 1225 bool is_phantom = dc_state_get_stream_subvp_type(dc->current_state, old_stream) == SUBVP_PHANTOM; 1226 pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 1227 tg = pipe->stream_res.tg; 1228 /* When disabling plane for a phantom pipe, we must turn on the 1229 * phantom OTG so the disable programming gets the double buffer 1230 * update. Otherwise the pipe will be left in a partially disabled 1231 * state that can result in underflow or hang when enabling it 1232 * again for different use. 1233 */ 1234 if (is_phantom) { 1235 if (tg->funcs->enable_crtc) { 1236 int main_pipe_width = 0, main_pipe_height = 0; 1237 struct dc_stream_state *old_paired_stream = dc_state_get_paired_subvp_stream(dc->current_state, old_stream); 1238 1239 if (old_paired_stream) { 1240 main_pipe_width = old_paired_stream->dst.width; 1241 main_pipe_height = old_paired_stream->dst.height; 1242 } 1243 1244 if (dc->hwss.blank_phantom) 1245 dc->hwss.blank_phantom(dc, tg, main_pipe_width, main_pipe_height); 1246 tg->funcs->enable_crtc(tg); 1247 } 1248 } 1249 1250 if (is_phantom) 1251 dc_state_rem_all_phantom_planes_for_stream(dc, old_stream, dangling_context, true); 1252 else 1253 dc_state_rem_all_planes_for_stream(dc, old_stream, dangling_context); 1254 disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context); 1255 1256 if (pipe->stream && pipe->plane_state) { 1257 if (!dc->debug.using_dml2) 1258 set_p_state_switch_method(dc, context, pipe); 1259 dc_update_visual_confirm_color(dc, context, pipe); 1260 } 1261 1262 if (dc->hwss.apply_ctx_for_surface) { 1263 apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, true); 1264 dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context); 1265 apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, false); 1266 dc->hwss.post_unlock_program_front_end(dc, dangling_context); 1267 } 1268 1269 if (dc->res_pool->funcs->prepare_mcache_programming) 1270 dc->res_pool->funcs->prepare_mcache_programming(dc, dangling_context); 1271 if (dc->hwss.program_front_end_for_ctx) { 1272 dc->hwss.interdependent_update_lock(dc, dc->current_state, true); 1273 dc->hwss.program_front_end_for_ctx(dc, dangling_context); 1274 dc->hwss.interdependent_update_lock(dc, dc->current_state, false); 1275 dc->hwss.post_unlock_program_front_end(dc, dangling_context); 1276 } 1277 /* We need to put the phantom OTG back into it's default (disabled) state or we 1278 * can get corruption when transition from one SubVP config to a different one. 1279 * The OTG is set to disable on falling edge of VUPDATE so the plane disable 1280 * will still get it's double buffer update. 1281 */ 1282 if (is_phantom) { 1283 if (tg->funcs->disable_phantom_crtc) 1284 tg->funcs->disable_phantom_crtc(tg); 1285 } 1286 } 1287 } 1288 1289 current_ctx = dc->current_state; 1290 dc->current_state = dangling_context; 1291 dc_state_release(current_ctx); 1292 } 1293 1294 static void disable_vbios_mode_if_required( 1295 struct dc *dc, 1296 struct dc_state *context) 1297 { 1298 unsigned int i, j; 1299 1300 /* check if timing_changed, disable stream*/ 1301 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1302 struct dc_stream_state *stream = NULL; 1303 struct dc_link *link = NULL; 1304 struct pipe_ctx *pipe = NULL; 1305 1306 pipe = &context->res_ctx.pipe_ctx[i]; 1307 stream = pipe->stream; 1308 if (stream == NULL) 1309 continue; 1310 1311 if (stream->apply_seamless_boot_optimization) 1312 continue; 1313 1314 // only looking for first odm pipe 1315 if (pipe->prev_odm_pipe) 1316 continue; 1317 1318 if (stream->link->local_sink && 1319 stream->link->local_sink->sink_signal == SIGNAL_TYPE_EDP) { 1320 link = stream->link; 1321 } 1322 1323 if (link != NULL && link->link_enc->funcs->is_dig_enabled(link->link_enc)) { 1324 unsigned int enc_inst, tg_inst = 0; 1325 unsigned int pix_clk_100hz = 0; 1326 1327 enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc); 1328 if (enc_inst != ENGINE_ID_UNKNOWN) { 1329 for (j = 0; j < dc->res_pool->stream_enc_count; j++) { 1330 if (dc->res_pool->stream_enc[j]->id == enc_inst) { 1331 tg_inst = dc->res_pool->stream_enc[j]->funcs->dig_source_otg( 1332 dc->res_pool->stream_enc[j]); 1333 break; 1334 } 1335 } 1336 1337 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz( 1338 dc->res_pool->dp_clock_source, 1339 tg_inst, &pix_clk_100hz); 1340 1341 if (link->link_status.link_active) { 1342 uint32_t requested_pix_clk_100hz = 1343 pipe->stream_res.pix_clk_params.requested_pix_clk_100hz; 1344 1345 if (pix_clk_100hz != requested_pix_clk_100hz) { 1346 dc->link_srv->set_dpms_off(pipe); 1347 pipe->stream->dpms_off = false; 1348 } 1349 } 1350 } 1351 } 1352 } 1353 } 1354 1355 /* Public functions */ 1356 1357 struct dc *dc_create(const struct dc_init_data *init_params) 1358 { 1359 struct dc *dc = kzalloc(sizeof(*dc), GFP_KERNEL); 1360 unsigned int full_pipe_count; 1361 1362 if (!dc) 1363 return NULL; 1364 1365 if (init_params->dce_environment == DCE_ENV_VIRTUAL_HW) { 1366 dc->caps.linear_pitch_alignment = 64; 1367 if (!dc_construct_ctx(dc, init_params)) 1368 goto destruct_dc; 1369 } else { 1370 if (!dc_construct(dc, init_params)) 1371 goto destruct_dc; 1372 1373 full_pipe_count = dc->res_pool->pipe_count; 1374 if (dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE) 1375 full_pipe_count--; 1376 dc->caps.max_streams = min( 1377 full_pipe_count, 1378 dc->res_pool->stream_enc_count); 1379 1380 dc->caps.max_links = dc->link_count; 1381 dc->caps.max_audios = dc->res_pool->audio_count; 1382 dc->caps.linear_pitch_alignment = 64; 1383 1384 dc->caps.max_dp_protocol_version = DP_VERSION_1_4; 1385 1386 dc->caps.max_otg_num = dc->res_pool->res_cap->num_timing_generator; 1387 1388 if (dc->res_pool->dmcu != NULL) 1389 dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version; 1390 } 1391 1392 dc->dcn_reg_offsets = init_params->dcn_reg_offsets; 1393 dc->nbio_reg_offsets = init_params->nbio_reg_offsets; 1394 dc->clk_reg_offsets = init_params->clk_reg_offsets; 1395 1396 /* Populate versioning information */ 1397 dc->versions.dc_ver = DC_VER; 1398 1399 dc->build_id = DC_BUILD_ID; 1400 1401 DC_LOG_DC("Display Core initialized\n"); 1402 1403 return dc; 1404 1405 destruct_dc: 1406 dc_destruct(dc); 1407 kfree(dc); 1408 return NULL; 1409 } 1410 1411 static void detect_edp_presence(struct dc *dc) 1412 { 1413 struct dc_link *edp_links[MAX_NUM_EDP]; 1414 struct dc_link *edp_link = NULL; 1415 enum dc_connection_type type; 1416 int i; 1417 int edp_num; 1418 1419 dc_get_edp_links(dc, edp_links, &edp_num); 1420 if (!edp_num) 1421 return; 1422 1423 for (i = 0; i < edp_num; i++) { 1424 edp_link = edp_links[i]; 1425 if (dc->config.edp_not_connected) { 1426 edp_link->edp_sink_present = false; 1427 } else { 1428 dc_link_detect_connection_type(edp_link, &type); 1429 edp_link->edp_sink_present = (type != dc_connection_none); 1430 } 1431 } 1432 } 1433 1434 void dc_hardware_init(struct dc *dc) 1435 { 1436 1437 detect_edp_presence(dc); 1438 if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW) 1439 dc->hwss.init_hw(dc); 1440 } 1441 1442 void dc_init_callbacks(struct dc *dc, 1443 const struct dc_callback_init *init_params) 1444 { 1445 dc->ctx->cp_psp = init_params->cp_psp; 1446 } 1447 1448 void dc_deinit_callbacks(struct dc *dc) 1449 { 1450 memset(&dc->ctx->cp_psp, 0, sizeof(dc->ctx->cp_psp)); 1451 } 1452 1453 void dc_destroy(struct dc **dc) 1454 { 1455 dc_destruct(*dc); 1456 kfree(*dc); 1457 *dc = NULL; 1458 } 1459 1460 static void enable_timing_multisync( 1461 struct dc *dc, 1462 struct dc_state *ctx) 1463 { 1464 int i, multisync_count = 0; 1465 int pipe_count = dc->res_pool->pipe_count; 1466 struct pipe_ctx *multisync_pipes[MAX_PIPES] = { NULL }; 1467 1468 for (i = 0; i < pipe_count; i++) { 1469 if (!ctx->res_ctx.pipe_ctx[i].stream || 1470 !ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.enabled) 1471 continue; 1472 if (ctx->res_ctx.pipe_ctx[i].stream == ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.event_source) 1473 continue; 1474 multisync_pipes[multisync_count] = &ctx->res_ctx.pipe_ctx[i]; 1475 multisync_count++; 1476 } 1477 1478 if (multisync_count > 0) { 1479 dc->hwss.enable_per_frame_crtc_position_reset( 1480 dc, multisync_count, multisync_pipes); 1481 } 1482 } 1483 1484 static void program_timing_sync( 1485 struct dc *dc, 1486 struct dc_state *ctx) 1487 { 1488 int i, j, k; 1489 int group_index = 0; 1490 int num_group = 0; 1491 int pipe_count = dc->res_pool->pipe_count; 1492 struct pipe_ctx *unsynced_pipes[MAX_PIPES] = { NULL }; 1493 1494 for (i = 0; i < pipe_count; i++) { 1495 if (!ctx->res_ctx.pipe_ctx[i].stream 1496 || ctx->res_ctx.pipe_ctx[i].top_pipe 1497 || ctx->res_ctx.pipe_ctx[i].prev_odm_pipe) 1498 continue; 1499 1500 unsynced_pipes[i] = &ctx->res_ctx.pipe_ctx[i]; 1501 } 1502 1503 for (i = 0; i < pipe_count; i++) { 1504 int group_size = 1; 1505 enum timing_synchronization_type sync_type = NOT_SYNCHRONIZABLE; 1506 struct pipe_ctx *pipe_set[MAX_PIPES]; 1507 1508 if (!unsynced_pipes[i]) 1509 continue; 1510 1511 pipe_set[0] = unsynced_pipes[i]; 1512 unsynced_pipes[i] = NULL; 1513 1514 /* Add tg to the set, search rest of the tg's for ones with 1515 * same timing, add all tgs with same timing to the group 1516 */ 1517 for (j = i + 1; j < pipe_count; j++) { 1518 if (!unsynced_pipes[j]) 1519 continue; 1520 if (sync_type != TIMING_SYNCHRONIZABLE && 1521 dc->hwss.enable_vblanks_synchronization && 1522 unsynced_pipes[j]->stream_res.tg->funcs->align_vblanks && 1523 resource_are_vblanks_synchronizable( 1524 unsynced_pipes[j]->stream, 1525 pipe_set[0]->stream)) { 1526 sync_type = VBLANK_SYNCHRONIZABLE; 1527 pipe_set[group_size] = unsynced_pipes[j]; 1528 unsynced_pipes[j] = NULL; 1529 group_size++; 1530 } else 1531 if (sync_type != VBLANK_SYNCHRONIZABLE && 1532 resource_are_streams_timing_synchronizable( 1533 unsynced_pipes[j]->stream, 1534 pipe_set[0]->stream)) { 1535 sync_type = TIMING_SYNCHRONIZABLE; 1536 pipe_set[group_size] = unsynced_pipes[j]; 1537 unsynced_pipes[j] = NULL; 1538 group_size++; 1539 } 1540 } 1541 1542 /* set first unblanked pipe as master */ 1543 for (j = 0; j < group_size; j++) { 1544 bool is_blanked; 1545 1546 if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked) 1547 is_blanked = 1548 pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp); 1549 else 1550 is_blanked = 1551 pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg); 1552 if (!is_blanked) { 1553 if (j == 0) 1554 break; 1555 1556 swap(pipe_set[0], pipe_set[j]); 1557 break; 1558 } 1559 } 1560 1561 for (k = 0; k < group_size; k++) { 1562 struct dc_stream_status *status = dc_state_get_stream_status(ctx, pipe_set[k]->stream); 1563 1564 if (!status) 1565 continue; 1566 1567 status->timing_sync_info.group_id = num_group; 1568 status->timing_sync_info.group_size = group_size; 1569 if (k == 0) 1570 status->timing_sync_info.master = true; 1571 else 1572 status->timing_sync_info.master = false; 1573 1574 } 1575 1576 /* remove any other unblanked pipes as they have already been synced */ 1577 if (dc->config.use_pipe_ctx_sync_logic) { 1578 /* check pipe's syncd to decide which pipe to be removed */ 1579 for (j = 1; j < group_size; j++) { 1580 if (pipe_set[j]->pipe_idx_syncd == pipe_set[0]->pipe_idx_syncd) { 1581 group_size--; 1582 pipe_set[j] = pipe_set[group_size]; 1583 j--; 1584 } else 1585 /* link slave pipe's syncd with master pipe */ 1586 pipe_set[j]->pipe_idx_syncd = pipe_set[0]->pipe_idx_syncd; 1587 } 1588 } else { 1589 /* remove any other pipes by checking valid plane */ 1590 for (j = j + 1; j < group_size; j++) { 1591 bool is_blanked; 1592 1593 if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked) 1594 is_blanked = 1595 pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp); 1596 else 1597 is_blanked = 1598 pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg); 1599 if (!is_blanked) { 1600 group_size--; 1601 pipe_set[j] = pipe_set[group_size]; 1602 j--; 1603 } 1604 } 1605 } 1606 1607 if (group_size > 1) { 1608 if (sync_type == TIMING_SYNCHRONIZABLE) { 1609 dc->hwss.enable_timing_synchronization( 1610 dc, ctx, group_index, group_size, pipe_set); 1611 } else 1612 if (sync_type == VBLANK_SYNCHRONIZABLE) { 1613 dc->hwss.enable_vblanks_synchronization( 1614 dc, group_index, group_size, pipe_set); 1615 } 1616 group_index++; 1617 } 1618 num_group++; 1619 } 1620 } 1621 1622 static bool streams_changed(struct dc *dc, 1623 struct dc_stream_state *streams[], 1624 uint8_t stream_count) 1625 { 1626 uint8_t i; 1627 1628 if (stream_count != dc->current_state->stream_count) 1629 return true; 1630 1631 for (i = 0; i < dc->current_state->stream_count; i++) { 1632 if (dc->current_state->streams[i] != streams[i]) 1633 return true; 1634 if (!streams[i]->link->link_state_valid) 1635 return true; 1636 } 1637 1638 return false; 1639 } 1640 1641 bool dc_validate_boot_timing(const struct dc *dc, 1642 const struct dc_sink *sink, 1643 struct dc_crtc_timing *crtc_timing) 1644 { 1645 struct timing_generator *tg; 1646 struct stream_encoder *se = NULL; 1647 1648 struct dc_crtc_timing hw_crtc_timing = {0}; 1649 1650 struct dc_link *link = sink->link; 1651 unsigned int i, enc_inst, tg_inst = 0; 1652 1653 /* Support seamless boot on EDP displays only */ 1654 if (sink->sink_signal != SIGNAL_TYPE_EDP) { 1655 return false; 1656 } 1657 1658 if (dc->debug.force_odm_combine) 1659 return false; 1660 1661 /* Check for enabled DIG to identify enabled display */ 1662 if (!link->link_enc->funcs->is_dig_enabled(link->link_enc)) 1663 return false; 1664 1665 enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc); 1666 1667 if (enc_inst == ENGINE_ID_UNKNOWN) 1668 return false; 1669 1670 for (i = 0; i < dc->res_pool->stream_enc_count; i++) { 1671 if (dc->res_pool->stream_enc[i]->id == enc_inst) { 1672 1673 se = dc->res_pool->stream_enc[i]; 1674 1675 tg_inst = dc->res_pool->stream_enc[i]->funcs->dig_source_otg( 1676 dc->res_pool->stream_enc[i]); 1677 break; 1678 } 1679 } 1680 1681 // tg_inst not found 1682 if (i == dc->res_pool->stream_enc_count) 1683 return false; 1684 1685 if (tg_inst >= dc->res_pool->timing_generator_count) 1686 return false; 1687 1688 if (tg_inst != link->link_enc->preferred_engine) 1689 return false; 1690 1691 tg = dc->res_pool->timing_generators[tg_inst]; 1692 1693 if (!tg->funcs->get_hw_timing) 1694 return false; 1695 1696 if (!tg->funcs->get_hw_timing(tg, &hw_crtc_timing)) 1697 return false; 1698 1699 if (crtc_timing->h_total != hw_crtc_timing.h_total) 1700 return false; 1701 1702 if (crtc_timing->h_border_left != hw_crtc_timing.h_border_left) 1703 return false; 1704 1705 if (crtc_timing->h_addressable != hw_crtc_timing.h_addressable) 1706 return false; 1707 1708 if (crtc_timing->h_border_right != hw_crtc_timing.h_border_right) 1709 return false; 1710 1711 if (crtc_timing->h_front_porch != hw_crtc_timing.h_front_porch) 1712 return false; 1713 1714 if (crtc_timing->h_sync_width != hw_crtc_timing.h_sync_width) 1715 return false; 1716 1717 if (crtc_timing->v_total != hw_crtc_timing.v_total) 1718 return false; 1719 1720 if (crtc_timing->v_border_top != hw_crtc_timing.v_border_top) 1721 return false; 1722 1723 if (crtc_timing->v_addressable != hw_crtc_timing.v_addressable) 1724 return false; 1725 1726 if (crtc_timing->v_border_bottom != hw_crtc_timing.v_border_bottom) 1727 return false; 1728 1729 if (crtc_timing->v_front_porch != hw_crtc_timing.v_front_porch) 1730 return false; 1731 1732 if (crtc_timing->v_sync_width != hw_crtc_timing.v_sync_width) 1733 return false; 1734 1735 /* block DSC for now, as VBIOS does not currently support DSC timings */ 1736 if (crtc_timing->flags.DSC) 1737 return false; 1738 1739 if (dc_is_dp_signal(link->connector_signal)) { 1740 unsigned int pix_clk_100hz = 0; 1741 uint32_t numOdmPipes = 1; 1742 uint32_t id_src[4] = {0}; 1743 1744 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz( 1745 dc->res_pool->dp_clock_source, 1746 tg_inst, &pix_clk_100hz); 1747 1748 if (tg->funcs->get_optc_source) 1749 tg->funcs->get_optc_source(tg, 1750 &numOdmPipes, &id_src[0], &id_src[1]); 1751 1752 if (numOdmPipes == 2) { 1753 pix_clk_100hz *= 2; 1754 } else if (numOdmPipes == 4) { 1755 pix_clk_100hz *= 4; 1756 } else if (se && se->funcs->get_pixels_per_cycle) { 1757 uint32_t pixels_per_cycle = se->funcs->get_pixels_per_cycle(se); 1758 1759 if (pixels_per_cycle != 1 && !dc->debug.enable_dp_dig_pixel_rate_div_policy) 1760 return false; 1761 1762 pix_clk_100hz *= pixels_per_cycle; 1763 } 1764 1765 // Note: In rare cases, HW pixclk may differ from crtc's pixclk 1766 // slightly due to rounding issues in 10 kHz units. 1767 if (crtc_timing->pix_clk_100hz != pix_clk_100hz) 1768 return false; 1769 1770 if (!se->funcs->dp_get_pixel_format) 1771 return false; 1772 1773 if (!se->funcs->dp_get_pixel_format( 1774 se, 1775 &hw_crtc_timing.pixel_encoding, 1776 &hw_crtc_timing.display_color_depth)) 1777 return false; 1778 1779 if (hw_crtc_timing.display_color_depth != crtc_timing->display_color_depth) 1780 return false; 1781 1782 if (hw_crtc_timing.pixel_encoding != crtc_timing->pixel_encoding) 1783 return false; 1784 } 1785 1786 if (link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) { 1787 return false; 1788 } 1789 1790 if (link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED) 1791 return false; 1792 1793 if (dc->link_srv->edp_is_ilr_optimization_required(link, crtc_timing)) { 1794 DC_LOG_EVENT_LINK_TRAINING("Seamless boot disabled to optimize eDP link rate\n"); 1795 return false; 1796 } 1797 1798 return true; 1799 } 1800 1801 static inline bool should_update_pipe_for_stream( 1802 struct dc_state *context, 1803 struct pipe_ctx *pipe_ctx, 1804 struct dc_stream_state *stream) 1805 { 1806 return (pipe_ctx->stream && pipe_ctx->stream == stream); 1807 } 1808 1809 static inline bool should_update_pipe_for_plane( 1810 struct dc_state *context, 1811 struct pipe_ctx *pipe_ctx, 1812 struct dc_plane_state *plane_state) 1813 { 1814 return (pipe_ctx->plane_state == plane_state); 1815 } 1816 1817 void dc_enable_stereo( 1818 struct dc *dc, 1819 struct dc_state *context, 1820 struct dc_stream_state *streams[], 1821 uint8_t stream_count) 1822 { 1823 int i, j; 1824 struct pipe_ctx *pipe; 1825 1826 dc_exit_ips_for_hw_access(dc); 1827 1828 for (i = 0; i < MAX_PIPES; i++) { 1829 if (context != NULL) { 1830 pipe = &context->res_ctx.pipe_ctx[i]; 1831 } else { 1832 context = dc->current_state; 1833 pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 1834 } 1835 1836 for (j = 0; pipe && j < stream_count; j++) { 1837 if (should_update_pipe_for_stream(context, pipe, streams[j]) && 1838 dc->hwss.setup_stereo) 1839 dc->hwss.setup_stereo(pipe, dc); 1840 } 1841 } 1842 } 1843 1844 void dc_trigger_sync(struct dc *dc, struct dc_state *context) 1845 { 1846 if (context->stream_count > 1 && !dc->debug.disable_timing_sync) { 1847 dc_exit_ips_for_hw_access(dc); 1848 1849 enable_timing_multisync(dc, context); 1850 program_timing_sync(dc, context); 1851 } 1852 } 1853 1854 static uint8_t get_stream_mask(struct dc *dc, struct dc_state *context) 1855 { 1856 int i; 1857 unsigned int stream_mask = 0; 1858 1859 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1860 if (context->res_ctx.pipe_ctx[i].stream) 1861 stream_mask |= 1 << i; 1862 } 1863 1864 return stream_mask; 1865 } 1866 1867 void dc_z10_restore(const struct dc *dc) 1868 { 1869 if (dc->hwss.z10_restore) 1870 dc->hwss.z10_restore(dc); 1871 } 1872 1873 void dc_z10_save_init(struct dc *dc) 1874 { 1875 if (dc->hwss.z10_save_init) 1876 dc->hwss.z10_save_init(dc); 1877 } 1878 1879 /** 1880 * dc_commit_state_no_check - Apply context to the hardware 1881 * 1882 * @dc: DC object with the current status to be updated 1883 * @context: New state that will become the current status at the end of this function 1884 * 1885 * Applies given context to the hardware and copy it into current context. 1886 * It's up to the user to release the src context afterwards. 1887 * 1888 * Return: an enum dc_status result code for the operation 1889 */ 1890 static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *context) 1891 { 1892 struct dc_bios *dcb = dc->ctx->dc_bios; 1893 enum dc_status result = DC_ERROR_UNEXPECTED; 1894 struct pipe_ctx *pipe; 1895 int i, k, l; 1896 struct dc_stream_state *dc_streams[MAX_STREAMS] = {0}; 1897 struct dc_state *old_state; 1898 bool subvp_prev_use = false; 1899 1900 dc_z10_restore(dc); 1901 dc_allow_idle_optimizations(dc, false); 1902 1903 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1904 struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 1905 1906 /* Check old context for SubVP */ 1907 subvp_prev_use |= (dc_state_get_pipe_subvp_type(dc->current_state, old_pipe) == SUBVP_PHANTOM); 1908 if (subvp_prev_use) 1909 break; 1910 } 1911 1912 for (i = 0; i < context->stream_count; i++) 1913 dc_streams[i] = context->streams[i]; 1914 1915 if (!dcb->funcs->is_accelerated_mode(dcb)) { 1916 disable_vbios_mode_if_required(dc, context); 1917 dc->hwss.enable_accelerated_mode(dc, context); 1918 } 1919 1920 if (context->stream_count > get_seamless_boot_stream_count(context) || 1921 context->stream_count == 0) 1922 dc->hwss.prepare_bandwidth(dc, context); 1923 1924 /* When SubVP is active, all HW programming must be done while 1925 * SubVP lock is acquired 1926 */ 1927 if (dc->hwss.subvp_pipe_control_lock) 1928 dc->hwss.subvp_pipe_control_lock(dc, context, true, true, NULL, subvp_prev_use); 1929 if (dc->hwss.fams2_global_control_lock) 1930 dc->hwss.fams2_global_control_lock(dc, context, true); 1931 1932 if (dc->hwss.update_dsc_pg) 1933 dc->hwss.update_dsc_pg(dc, context, false); 1934 1935 disable_dangling_plane(dc, context); 1936 /* re-program planes for existing stream, in case we need to 1937 * free up plane resource for later use 1938 */ 1939 if (dc->hwss.apply_ctx_for_surface) { 1940 for (i = 0; i < context->stream_count; i++) { 1941 if (context->streams[i]->mode_changed) 1942 continue; 1943 apply_ctx_interdependent_lock(dc, context, context->streams[i], true); 1944 dc->hwss.apply_ctx_for_surface( 1945 dc, context->streams[i], 1946 context->stream_status[i].plane_count, 1947 context); /* use new pipe config in new context */ 1948 apply_ctx_interdependent_lock(dc, context, context->streams[i], false); 1949 dc->hwss.post_unlock_program_front_end(dc, context); 1950 } 1951 } 1952 1953 /* Program hardware */ 1954 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1955 pipe = &context->res_ctx.pipe_ctx[i]; 1956 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe); 1957 } 1958 1959 result = dc->hwss.apply_ctx_to_hw(dc, context); 1960 1961 if (result != DC_OK) { 1962 /* Application of dc_state to hardware stopped. */ 1963 dc->current_state->res_ctx.link_enc_cfg_ctx.mode = LINK_ENC_CFG_STEADY; 1964 return result; 1965 } 1966 1967 dc_trigger_sync(dc, context); 1968 1969 /* Full update should unconditionally be triggered when dc_commit_state_no_check is called */ 1970 for (i = 0; i < context->stream_count; i++) { 1971 uint32_t prev_dsc_changed = context->streams[i]->update_flags.bits.dsc_changed; 1972 1973 context->streams[i]->update_flags.raw = 0xFFFFFFFF; 1974 context->streams[i]->update_flags.bits.dsc_changed = prev_dsc_changed; 1975 } 1976 1977 /* Program all planes within new context*/ 1978 if (dc->res_pool->funcs->prepare_mcache_programming) 1979 dc->res_pool->funcs->prepare_mcache_programming(dc, context); 1980 if (dc->hwss.program_front_end_for_ctx) { 1981 dc->hwss.interdependent_update_lock(dc, context, true); 1982 dc->hwss.program_front_end_for_ctx(dc, context); 1983 dc->hwss.interdependent_update_lock(dc, context, false); 1984 dc->hwss.post_unlock_program_front_end(dc, context); 1985 } 1986 1987 if (dc->hwss.commit_subvp_config) 1988 dc->hwss.commit_subvp_config(dc, context); 1989 if (dc->hwss.subvp_pipe_control_lock) 1990 dc->hwss.subvp_pipe_control_lock(dc, context, false, true, NULL, subvp_prev_use); 1991 if (dc->hwss.fams2_global_control_lock) 1992 dc->hwss.fams2_global_control_lock(dc, context, false); 1993 1994 for (i = 0; i < context->stream_count; i++) { 1995 const struct dc_link *link = context->streams[i]->link; 1996 1997 if (!context->streams[i]->mode_changed) 1998 continue; 1999 2000 if (dc->hwss.apply_ctx_for_surface) { 2001 apply_ctx_interdependent_lock(dc, context, context->streams[i], true); 2002 dc->hwss.apply_ctx_for_surface( 2003 dc, context->streams[i], 2004 context->stream_status[i].plane_count, 2005 context); 2006 apply_ctx_interdependent_lock(dc, context, context->streams[i], false); 2007 dc->hwss.post_unlock_program_front_end(dc, context); 2008 } 2009 2010 /* 2011 * enable stereo 2012 * TODO rework dc_enable_stereo call to work with validation sets? 2013 */ 2014 for (k = 0; k < MAX_PIPES; k++) { 2015 pipe = &context->res_ctx.pipe_ctx[k]; 2016 2017 for (l = 0 ; pipe && l < context->stream_count; l++) { 2018 if (context->streams[l] && 2019 context->streams[l] == pipe->stream && 2020 dc->hwss.setup_stereo) 2021 dc->hwss.setup_stereo(pipe, dc); 2022 } 2023 } 2024 2025 CONN_MSG_MODE(link, "{%dx%d, %dx%d@%dKhz}", 2026 context->streams[i]->timing.h_addressable, 2027 context->streams[i]->timing.v_addressable, 2028 context->streams[i]->timing.h_total, 2029 context->streams[i]->timing.v_total, 2030 context->streams[i]->timing.pix_clk_100hz / 10); 2031 } 2032 2033 dc_enable_stereo(dc, context, dc_streams, context->stream_count); 2034 2035 if (context->stream_count > get_seamless_boot_stream_count(context) || 2036 context->stream_count == 0) { 2037 /* Must wait for no flips to be pending before doing optimize bw */ 2038 hwss_wait_for_no_pipes_pending(dc, context); 2039 /* 2040 * optimized dispclk depends on ODM setup. Need to wait for ODM 2041 * update pending complete before optimizing bandwidth. 2042 */ 2043 hwss_wait_for_odm_update_pending_complete(dc, context); 2044 /* pplib is notified if disp_num changed */ 2045 dc->hwss.optimize_bandwidth(dc, context); 2046 /* Need to do otg sync again as otg could be out of sync due to otg 2047 * workaround applied during clock update 2048 */ 2049 dc_trigger_sync(dc, context); 2050 } 2051 2052 if (dc->hwss.update_dsc_pg) 2053 dc->hwss.update_dsc_pg(dc, context, true); 2054 2055 if (dc->ctx->dce_version >= DCE_VERSION_MAX) 2056 TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk); 2057 else 2058 TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce); 2059 2060 context->stream_mask = get_stream_mask(dc, context); 2061 2062 if (context->stream_mask != dc->current_state->stream_mask) 2063 dc_dmub_srv_notify_stream_mask(dc->ctx->dmub_srv, context->stream_mask); 2064 2065 for (i = 0; i < context->stream_count; i++) 2066 context->streams[i]->mode_changed = false; 2067 2068 /* Clear update flags that were set earlier to avoid redundant programming */ 2069 for (i = 0; i < context->stream_count; i++) { 2070 context->streams[i]->update_flags.raw = 0x0; 2071 } 2072 2073 old_state = dc->current_state; 2074 dc->current_state = context; 2075 2076 dc_state_release(old_state); 2077 2078 dc_state_retain(dc->current_state); 2079 2080 return result; 2081 } 2082 2083 static bool commit_minimal_transition_state(struct dc *dc, 2084 struct dc_state *transition_base_context); 2085 2086 /** 2087 * dc_commit_streams - Commit current stream state 2088 * 2089 * @dc: DC object with the commit state to be configured in the hardware 2090 * @params: Parameters for the commit, including the streams to be committed 2091 * 2092 * Function responsible for commit streams change to the hardware. 2093 * 2094 * Return: 2095 * Return DC_OK if everything work as expected, otherwise, return a dc_status 2096 * code. 2097 */ 2098 enum dc_status dc_commit_streams(struct dc *dc, struct dc_commit_streams_params *params) 2099 { 2100 int i, j; 2101 struct dc_state *context; 2102 enum dc_status res = DC_OK; 2103 struct dc_validation_set set[MAX_STREAMS] = {0}; 2104 struct pipe_ctx *pipe; 2105 bool handle_exit_odm2to1 = false; 2106 2107 if (!params) 2108 return DC_ERROR_UNEXPECTED; 2109 2110 if (dc->ctx->dce_environment == DCE_ENV_VIRTUAL_HW) 2111 return res; 2112 2113 if (!streams_changed(dc, params->streams, params->stream_count) && 2114 dc->current_state->power_source == params->power_source) 2115 return res; 2116 2117 dc_exit_ips_for_hw_access(dc); 2118 2119 DC_LOG_DC("%s: %d streams\n", __func__, params->stream_count); 2120 2121 for (i = 0; i < params->stream_count; i++) { 2122 struct dc_stream_state *stream = params->streams[i]; 2123 struct dc_stream_status *status = dc_stream_get_status(stream); 2124 2125 dc_stream_log(dc, stream); 2126 2127 set[i].stream = stream; 2128 2129 if (status) { 2130 set[i].plane_count = status->plane_count; 2131 for (j = 0; j < status->plane_count; j++) 2132 set[i].plane_states[j] = status->plane_states[j]; 2133 } 2134 } 2135 2136 /* ODM Combine 2:1 power optimization is only applied for single stream 2137 * scenario, it uses extra pipes than needed to reduce power consumption 2138 * We need to switch off this feature to make room for new streams. 2139 */ 2140 if (params->stream_count > dc->current_state->stream_count && 2141 dc->current_state->stream_count == 1) { 2142 for (i = 0; i < dc->res_pool->pipe_count; i++) { 2143 pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 2144 if (pipe->next_odm_pipe) 2145 handle_exit_odm2to1 = true; 2146 } 2147 } 2148 2149 if (handle_exit_odm2to1) 2150 res = commit_minimal_transition_state(dc, dc->current_state); 2151 2152 context = dc_state_create_current_copy(dc); 2153 if (!context) 2154 goto context_alloc_fail; 2155 2156 context->power_source = params->power_source; 2157 2158 res = dc_validate_with_context(dc, set, params->stream_count, context, false); 2159 if (res != DC_OK) { 2160 BREAK_TO_DEBUGGER(); 2161 goto fail; 2162 } 2163 2164 res = dc_commit_state_no_check(dc, context); 2165 2166 for (i = 0; i < params->stream_count; i++) { 2167 for (j = 0; j < context->stream_count; j++) { 2168 if (params->streams[i]->stream_id == context->streams[j]->stream_id) 2169 params->streams[i]->out.otg_offset = context->stream_status[j].primary_otg_inst; 2170 2171 if (dc_is_embedded_signal(params->streams[i]->signal)) { 2172 struct dc_stream_status *status = dc_state_get_stream_status(context, params->streams[i]); 2173 2174 if (!status) 2175 continue; 2176 2177 if (dc->hwss.is_abm_supported) 2178 status->is_abm_supported = dc->hwss.is_abm_supported(dc, context, params->streams[i]); 2179 else 2180 status->is_abm_supported = true; 2181 } 2182 } 2183 } 2184 2185 fail: 2186 dc_state_release(context); 2187 2188 context_alloc_fail: 2189 2190 DC_LOG_DC("%s Finished.\n", __func__); 2191 2192 return res; 2193 } 2194 2195 bool dc_acquire_release_mpc_3dlut( 2196 struct dc *dc, bool acquire, 2197 struct dc_stream_state *stream, 2198 struct dc_3dlut **lut, 2199 struct dc_transfer_func **shaper) 2200 { 2201 int pipe_idx; 2202 bool ret = false; 2203 bool found_pipe_idx = false; 2204 const struct resource_pool *pool = dc->res_pool; 2205 struct resource_context *res_ctx = &dc->current_state->res_ctx; 2206 int mpcc_id = 0; 2207 2208 if (pool && res_ctx) { 2209 if (acquire) { 2210 /*find pipe idx for the given stream*/ 2211 for (pipe_idx = 0; pipe_idx < pool->pipe_count; pipe_idx++) { 2212 if (res_ctx->pipe_ctx[pipe_idx].stream == stream) { 2213 found_pipe_idx = true; 2214 mpcc_id = res_ctx->pipe_ctx[pipe_idx].plane_res.hubp->inst; 2215 break; 2216 } 2217 } 2218 } else 2219 found_pipe_idx = true;/*for release pipe_idx is not required*/ 2220 2221 if (found_pipe_idx) { 2222 if (acquire && pool->funcs->acquire_post_bldn_3dlut) 2223 ret = pool->funcs->acquire_post_bldn_3dlut(res_ctx, pool, mpcc_id, lut, shaper); 2224 else if (!acquire && pool->funcs->release_post_bldn_3dlut) 2225 ret = pool->funcs->release_post_bldn_3dlut(res_ctx, pool, lut, shaper); 2226 } 2227 } 2228 return ret; 2229 } 2230 2231 static bool is_flip_pending_in_pipes(struct dc *dc, struct dc_state *context) 2232 { 2233 int i; 2234 struct pipe_ctx *pipe; 2235 2236 for (i = 0; i < MAX_PIPES; i++) { 2237 pipe = &context->res_ctx.pipe_ctx[i]; 2238 2239 // Don't check flip pending on phantom pipes 2240 if (!pipe->plane_state || (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM)) 2241 continue; 2242 2243 /* Must set to false to start with, due to OR in update function */ 2244 pipe->plane_state->status.is_flip_pending = false; 2245 dc->hwss.update_pending_status(pipe); 2246 if (pipe->plane_state->status.is_flip_pending) 2247 return true; 2248 } 2249 return false; 2250 } 2251 2252 /* Perform updates here which need to be deferred until next vupdate 2253 * 2254 * i.e. blnd lut, 3dlut, and shaper lut bypass regs are double buffered 2255 * but forcing lut memory to shutdown state is immediate. This causes 2256 * single frame corruption as lut gets disabled mid-frame unless shutdown 2257 * is deferred until after entering bypass. 2258 */ 2259 static void process_deferred_updates(struct dc *dc) 2260 { 2261 int i = 0; 2262 2263 if (dc->debug.enable_mem_low_power.bits.cm) { 2264 ASSERT(dc->dcn_ip->max_num_dpp); 2265 for (i = 0; i < dc->dcn_ip->max_num_dpp; i++) 2266 if (dc->res_pool->dpps[i]->funcs->dpp_deferred_update) 2267 dc->res_pool->dpps[i]->funcs->dpp_deferred_update(dc->res_pool->dpps[i]); 2268 } 2269 } 2270 2271 void dc_post_update_surfaces_to_stream(struct dc *dc) 2272 { 2273 int i; 2274 struct dc_state *context = dc->current_state; 2275 2276 if ((!dc->optimized_required) || get_seamless_boot_stream_count(context) > 0) 2277 return; 2278 2279 post_surface_trace(dc); 2280 2281 /* 2282 * Only relevant for DCN behavior where we can guarantee the optimization 2283 * is safe to apply - retain the legacy behavior for DCE. 2284 */ 2285 2286 if (dc->ctx->dce_version < DCE_VERSION_MAX) 2287 TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce); 2288 else { 2289 TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk); 2290 2291 if (is_flip_pending_in_pipes(dc, context)) 2292 return; 2293 2294 for (i = 0; i < dc->res_pool->pipe_count; i++) 2295 if (context->res_ctx.pipe_ctx[i].stream == NULL || 2296 context->res_ctx.pipe_ctx[i].plane_state == NULL) { 2297 context->res_ctx.pipe_ctx[i].pipe_idx = i; 2298 dc->hwss.disable_plane(dc, context, &context->res_ctx.pipe_ctx[i]); 2299 } 2300 2301 process_deferred_updates(dc); 2302 2303 dc->hwss.optimize_bandwidth(dc, context); 2304 2305 if (dc->hwss.update_dsc_pg) 2306 dc->hwss.update_dsc_pg(dc, context, true); 2307 } 2308 2309 dc->optimized_required = false; 2310 dc->wm_optimized_required = false; 2311 } 2312 2313 bool dc_set_generic_gpio_for_stereo(bool enable, 2314 struct gpio_service *gpio_service) 2315 { 2316 enum gpio_result gpio_result = GPIO_RESULT_NON_SPECIFIC_ERROR; 2317 struct gpio_pin_info pin_info; 2318 struct gpio *generic; 2319 struct gpio_generic_mux_config *config = kzalloc(sizeof(struct gpio_generic_mux_config), 2320 GFP_KERNEL); 2321 2322 if (!config) 2323 return false; 2324 pin_info = dal_gpio_get_generic_pin_info(gpio_service, GPIO_ID_GENERIC, 0); 2325 2326 if (pin_info.mask == 0xFFFFFFFF || pin_info.offset == 0xFFFFFFFF) { 2327 kfree(config); 2328 return false; 2329 } else { 2330 generic = dal_gpio_service_create_generic_mux( 2331 gpio_service, 2332 pin_info.offset, 2333 pin_info.mask); 2334 } 2335 2336 if (!generic) { 2337 kfree(config); 2338 return false; 2339 } 2340 2341 gpio_result = dal_gpio_open(generic, GPIO_MODE_OUTPUT); 2342 2343 config->enable_output_from_mux = enable; 2344 config->mux_select = GPIO_SIGNAL_SOURCE_PASS_THROUGH_STEREO_SYNC; 2345 2346 if (gpio_result == GPIO_RESULT_OK) 2347 gpio_result = dal_mux_setup_config(generic, config); 2348 2349 if (gpio_result == GPIO_RESULT_OK) { 2350 dal_gpio_close(generic); 2351 dal_gpio_destroy_generic_mux(&generic); 2352 kfree(config); 2353 return true; 2354 } else { 2355 dal_gpio_close(generic); 2356 dal_gpio_destroy_generic_mux(&generic); 2357 kfree(config); 2358 return false; 2359 } 2360 } 2361 2362 static bool is_surface_in_context( 2363 const struct dc_state *context, 2364 const struct dc_plane_state *plane_state) 2365 { 2366 int j; 2367 2368 for (j = 0; j < MAX_PIPES; j++) { 2369 const struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 2370 2371 if (plane_state == pipe_ctx->plane_state) { 2372 return true; 2373 } 2374 } 2375 2376 return false; 2377 } 2378 2379 static enum surface_update_type get_plane_info_update_type(const struct dc_surface_update *u) 2380 { 2381 union surface_update_flags *update_flags = &u->surface->update_flags; 2382 enum surface_update_type update_type = UPDATE_TYPE_FAST; 2383 2384 if (!u->plane_info) 2385 return UPDATE_TYPE_FAST; 2386 2387 if (u->plane_info->color_space != u->surface->color_space) { 2388 update_flags->bits.color_space_change = 1; 2389 elevate_update_type(&update_type, UPDATE_TYPE_MED); 2390 } 2391 2392 if (u->plane_info->horizontal_mirror != u->surface->horizontal_mirror) { 2393 update_flags->bits.horizontal_mirror_change = 1; 2394 elevate_update_type(&update_type, UPDATE_TYPE_MED); 2395 } 2396 2397 if (u->plane_info->rotation != u->surface->rotation) { 2398 update_flags->bits.rotation_change = 1; 2399 elevate_update_type(&update_type, UPDATE_TYPE_FULL); 2400 } 2401 2402 if (u->plane_info->format != u->surface->format) { 2403 update_flags->bits.pixel_format_change = 1; 2404 elevate_update_type(&update_type, UPDATE_TYPE_FULL); 2405 } 2406 2407 if (u->plane_info->stereo_format != u->surface->stereo_format) { 2408 update_flags->bits.stereo_format_change = 1; 2409 elevate_update_type(&update_type, UPDATE_TYPE_FULL); 2410 } 2411 2412 if (u->plane_info->per_pixel_alpha != u->surface->per_pixel_alpha) { 2413 update_flags->bits.per_pixel_alpha_change = 1; 2414 elevate_update_type(&update_type, UPDATE_TYPE_MED); 2415 } 2416 2417 if (u->plane_info->global_alpha_value != u->surface->global_alpha_value) { 2418 update_flags->bits.global_alpha_change = 1; 2419 elevate_update_type(&update_type, UPDATE_TYPE_MED); 2420 } 2421 2422 if (u->plane_info->dcc.enable != u->surface->dcc.enable 2423 || u->plane_info->dcc.dcc_ind_blk != u->surface->dcc.dcc_ind_blk 2424 || u->plane_info->dcc.meta_pitch != u->surface->dcc.meta_pitch) { 2425 /* During DCC on/off, stutter period is calculated before 2426 * DCC has fully transitioned. This results in incorrect 2427 * stutter period calculation. Triggering a full update will 2428 * recalculate stutter period. 2429 */ 2430 update_flags->bits.dcc_change = 1; 2431 elevate_update_type(&update_type, UPDATE_TYPE_FULL); 2432 } 2433 2434 if (resource_pixel_format_to_bpp(u->plane_info->format) != 2435 resource_pixel_format_to_bpp(u->surface->format)) { 2436 /* different bytes per element will require full bandwidth 2437 * and DML calculation 2438 */ 2439 update_flags->bits.bpp_change = 1; 2440 elevate_update_type(&update_type, UPDATE_TYPE_FULL); 2441 } 2442 2443 if (u->plane_info->plane_size.surface_pitch != u->surface->plane_size.surface_pitch 2444 || u->plane_info->plane_size.chroma_pitch != u->surface->plane_size.chroma_pitch) { 2445 update_flags->bits.plane_size_change = 1; 2446 elevate_update_type(&update_type, UPDATE_TYPE_MED); 2447 } 2448 2449 2450 if (memcmp(&u->plane_info->tiling_info, &u->surface->tiling_info, 2451 sizeof(union dc_tiling_info)) != 0) { 2452 update_flags->bits.swizzle_change = 1; 2453 elevate_update_type(&update_type, UPDATE_TYPE_MED); 2454 2455 /* todo: below are HW dependent, we should add a hook to 2456 * DCE/N resource and validated there. 2457 */ 2458 if (u->plane_info->tiling_info.gfx9.swizzle != DC_SW_LINEAR) { 2459 /* swizzled mode requires RQ to be setup properly, 2460 * thus need to run DML to calculate RQ settings 2461 */ 2462 update_flags->bits.bandwidth_change = 1; 2463 elevate_update_type(&update_type, UPDATE_TYPE_FULL); 2464 } 2465 } 2466 2467 /* This should be UPDATE_TYPE_FAST if nothing has changed. */ 2468 return update_type; 2469 } 2470 2471 static enum surface_update_type get_scaling_info_update_type( 2472 const struct dc *dc, 2473 const struct dc_surface_update *u) 2474 { 2475 union surface_update_flags *update_flags = &u->surface->update_flags; 2476 2477 if (!u->scaling_info) 2478 return UPDATE_TYPE_FAST; 2479 2480 if (u->scaling_info->dst_rect.width != u->surface->dst_rect.width 2481 || u->scaling_info->dst_rect.height != u->surface->dst_rect.height 2482 || u->scaling_info->scaling_quality.integer_scaling != 2483 u->surface->scaling_quality.integer_scaling 2484 ) { 2485 update_flags->bits.scaling_change = 1; 2486 2487 if ((u->scaling_info->dst_rect.width < u->surface->dst_rect.width 2488 || u->scaling_info->dst_rect.height < u->surface->dst_rect.height) 2489 && (u->scaling_info->dst_rect.width < u->surface->src_rect.width 2490 || u->scaling_info->dst_rect.height < u->surface->src_rect.height)) 2491 /* Making dst rect smaller requires a bandwidth change */ 2492 update_flags->bits.bandwidth_change = 1; 2493 } 2494 2495 if (u->scaling_info->src_rect.width != u->surface->src_rect.width 2496 || u->scaling_info->src_rect.height != u->surface->src_rect.height) { 2497 2498 update_flags->bits.scaling_change = 1; 2499 if (u->scaling_info->src_rect.width > u->surface->src_rect.width 2500 || u->scaling_info->src_rect.height > u->surface->src_rect.height) 2501 /* Making src rect bigger requires a bandwidth change */ 2502 update_flags->bits.clock_change = 1; 2503 } 2504 2505 if (u->scaling_info->src_rect.width > dc->caps.max_optimizable_video_width && 2506 (u->scaling_info->clip_rect.width > u->surface->clip_rect.width || 2507 u->scaling_info->clip_rect.height > u->surface->clip_rect.height)) 2508 /* Changing clip size of a large surface may result in MPC slice count change */ 2509 update_flags->bits.bandwidth_change = 1; 2510 2511 if (u->scaling_info->clip_rect.width != u->surface->clip_rect.width || 2512 u->scaling_info->clip_rect.height != u->surface->clip_rect.height) 2513 update_flags->bits.clip_size_change = 1; 2514 2515 if (u->scaling_info->src_rect.x != u->surface->src_rect.x 2516 || u->scaling_info->src_rect.y != u->surface->src_rect.y 2517 || u->scaling_info->clip_rect.x != u->surface->clip_rect.x 2518 || u->scaling_info->clip_rect.y != u->surface->clip_rect.y 2519 || u->scaling_info->dst_rect.x != u->surface->dst_rect.x 2520 || u->scaling_info->dst_rect.y != u->surface->dst_rect.y) 2521 update_flags->bits.position_change = 1; 2522 2523 if (update_flags->bits.clock_change 2524 || update_flags->bits.bandwidth_change 2525 || update_flags->bits.scaling_change) 2526 return UPDATE_TYPE_FULL; 2527 2528 if (update_flags->bits.position_change || 2529 update_flags->bits.clip_size_change) 2530 return UPDATE_TYPE_MED; 2531 2532 return UPDATE_TYPE_FAST; 2533 } 2534 2535 static enum surface_update_type det_surface_update(const struct dc *dc, 2536 const struct dc_surface_update *u) 2537 { 2538 const struct dc_state *context = dc->current_state; 2539 enum surface_update_type type; 2540 enum surface_update_type overall_type = UPDATE_TYPE_FAST; 2541 union surface_update_flags *update_flags = &u->surface->update_flags; 2542 2543 if (!is_surface_in_context(context, u->surface) || u->surface->force_full_update) { 2544 update_flags->raw = 0xFFFFFFFF; 2545 return UPDATE_TYPE_FULL; 2546 } 2547 2548 update_flags->raw = 0; // Reset all flags 2549 2550 type = get_plane_info_update_type(u); 2551 elevate_update_type(&overall_type, type); 2552 2553 type = get_scaling_info_update_type(dc, u); 2554 elevate_update_type(&overall_type, type); 2555 2556 if (u->flip_addr) { 2557 update_flags->bits.addr_update = 1; 2558 if (u->flip_addr->address.tmz_surface != u->surface->address.tmz_surface) { 2559 update_flags->bits.tmz_changed = 1; 2560 elevate_update_type(&overall_type, UPDATE_TYPE_FULL); 2561 } 2562 } 2563 if (u->in_transfer_func) 2564 update_flags->bits.in_transfer_func_change = 1; 2565 2566 if (u->input_csc_color_matrix) 2567 update_flags->bits.input_csc_change = 1; 2568 2569 if (u->coeff_reduction_factor) 2570 update_flags->bits.coeff_reduction_change = 1; 2571 2572 if (u->gamut_remap_matrix) 2573 update_flags->bits.gamut_remap_change = 1; 2574 2575 if (u->blend_tf) 2576 update_flags->bits.gamma_change = 1; 2577 2578 if (u->gamma) { 2579 enum surface_pixel_format format = SURFACE_PIXEL_FORMAT_GRPH_BEGIN; 2580 2581 if (u->plane_info) 2582 format = u->plane_info->format; 2583 else 2584 format = u->surface->format; 2585 2586 if (dce_use_lut(format)) 2587 update_flags->bits.gamma_change = 1; 2588 } 2589 2590 if (u->lut3d_func || u->func_shaper) 2591 update_flags->bits.lut_3d = 1; 2592 2593 if (u->hdr_mult.value) 2594 if (u->hdr_mult.value != u->surface->hdr_mult.value) { 2595 update_flags->bits.hdr_mult = 1; 2596 elevate_update_type(&overall_type, UPDATE_TYPE_MED); 2597 } 2598 2599 if (u->cm2_params) { 2600 if ((u->cm2_params->component_settings.shaper_3dlut_setting 2601 != u->surface->mcm_shaper_3dlut_setting) 2602 || (u->cm2_params->component_settings.lut1d_enable 2603 != u->surface->mcm_lut1d_enable)) 2604 update_flags->bits.mcm_transfer_function_enable_change = 1; 2605 if (u->cm2_params->cm2_luts.lut3d_data.lut3d_src 2606 != u->surface->mcm_luts.lut3d_data.lut3d_src) 2607 update_flags->bits.mcm_transfer_function_enable_change = 1; 2608 } 2609 if (update_flags->bits.in_transfer_func_change) { 2610 type = UPDATE_TYPE_MED; 2611 elevate_update_type(&overall_type, type); 2612 } 2613 2614 if (update_flags->bits.lut_3d) { 2615 type = UPDATE_TYPE_FULL; 2616 elevate_update_type(&overall_type, type); 2617 } 2618 if (update_flags->bits.mcm_transfer_function_enable_change) { 2619 type = UPDATE_TYPE_FULL; 2620 elevate_update_type(&overall_type, type); 2621 } 2622 2623 if (dc->debug.enable_legacy_fast_update && 2624 (update_flags->bits.gamma_change || 2625 update_flags->bits.gamut_remap_change || 2626 update_flags->bits.input_csc_change || 2627 update_flags->bits.coeff_reduction_change)) { 2628 type = UPDATE_TYPE_FULL; 2629 elevate_update_type(&overall_type, type); 2630 } 2631 return overall_type; 2632 } 2633 2634 static enum surface_update_type check_update_surfaces_for_stream( 2635 struct dc *dc, 2636 struct dc_surface_update *updates, 2637 int surface_count, 2638 struct dc_stream_update *stream_update, 2639 const struct dc_stream_status *stream_status) 2640 { 2641 int i; 2642 enum surface_update_type overall_type = UPDATE_TYPE_FAST; 2643 2644 if (dc->idle_optimizations_allowed) 2645 overall_type = UPDATE_TYPE_FULL; 2646 2647 if (stream_status == NULL || stream_status->plane_count != surface_count) 2648 overall_type = UPDATE_TYPE_FULL; 2649 2650 if (stream_update && stream_update->pending_test_pattern) { 2651 overall_type = UPDATE_TYPE_FULL; 2652 } 2653 2654 if (stream_update && stream_update->hw_cursor_req) { 2655 overall_type = UPDATE_TYPE_FULL; 2656 } 2657 2658 /* some stream updates require passive update */ 2659 if (stream_update) { 2660 union stream_update_flags *su_flags = &stream_update->stream->update_flags; 2661 2662 if ((stream_update->src.height != 0 && stream_update->src.width != 0) || 2663 (stream_update->dst.height != 0 && stream_update->dst.width != 0) || 2664 stream_update->integer_scaling_update) 2665 su_flags->bits.scaling = 1; 2666 2667 if (dc->debug.enable_legacy_fast_update && stream_update->out_transfer_func) 2668 su_flags->bits.out_tf = 1; 2669 2670 if (stream_update->abm_level) 2671 su_flags->bits.abm_level = 1; 2672 2673 if (stream_update->dpms_off) 2674 su_flags->bits.dpms_off = 1; 2675 2676 if (stream_update->gamut_remap) 2677 su_flags->bits.gamut_remap = 1; 2678 2679 if (stream_update->wb_update) 2680 su_flags->bits.wb_update = 1; 2681 2682 if (stream_update->dsc_config) 2683 su_flags->bits.dsc_changed = 1; 2684 2685 if (stream_update->mst_bw_update) 2686 su_flags->bits.mst_bw = 1; 2687 2688 if (stream_update->stream->freesync_on_desktop && 2689 (stream_update->vrr_infopacket || stream_update->allow_freesync || 2690 stream_update->vrr_active_variable || stream_update->vrr_active_fixed)) 2691 su_flags->bits.fams_changed = 1; 2692 2693 if (stream_update->scaler_sharpener_update) 2694 su_flags->bits.scaler_sharpener = 1; 2695 2696 if (su_flags->raw != 0) 2697 overall_type = UPDATE_TYPE_FULL; 2698 2699 if (stream_update->output_csc_transform || stream_update->output_color_space) 2700 su_flags->bits.out_csc = 1; 2701 2702 /* Output transfer function changes do not require bandwidth recalculation, 2703 * so don't trigger a full update 2704 */ 2705 if (!dc->debug.enable_legacy_fast_update && stream_update->out_transfer_func) 2706 su_flags->bits.out_tf = 1; 2707 } 2708 2709 for (i = 0 ; i < surface_count; i++) { 2710 enum surface_update_type type = 2711 det_surface_update(dc, &updates[i]); 2712 2713 elevate_update_type(&overall_type, type); 2714 } 2715 2716 return overall_type; 2717 } 2718 2719 /* 2720 * dc_check_update_surfaces_for_stream() - Determine update type (fast, med, or full) 2721 * 2722 * See :c:type:`enum surface_update_type <surface_update_type>` for explanation of update types 2723 */ 2724 enum surface_update_type dc_check_update_surfaces_for_stream( 2725 struct dc *dc, 2726 struct dc_surface_update *updates, 2727 int surface_count, 2728 struct dc_stream_update *stream_update, 2729 const struct dc_stream_status *stream_status) 2730 { 2731 int i; 2732 enum surface_update_type type; 2733 2734 if (stream_update) 2735 stream_update->stream->update_flags.raw = 0; 2736 for (i = 0; i < surface_count; i++) 2737 updates[i].surface->update_flags.raw = 0; 2738 2739 type = check_update_surfaces_for_stream(dc, updates, surface_count, stream_update, stream_status); 2740 if (type == UPDATE_TYPE_FULL) { 2741 if (stream_update) { 2742 uint32_t dsc_changed = stream_update->stream->update_flags.bits.dsc_changed; 2743 stream_update->stream->update_flags.raw = 0xFFFFFFFF; 2744 stream_update->stream->update_flags.bits.dsc_changed = dsc_changed; 2745 } 2746 for (i = 0; i < surface_count; i++) 2747 updates[i].surface->update_flags.raw = 0xFFFFFFFF; 2748 } 2749 2750 if (type == UPDATE_TYPE_FAST) { 2751 // If there's an available clock comparator, we use that. 2752 if (dc->clk_mgr->funcs->are_clock_states_equal) { 2753 if (!dc->clk_mgr->funcs->are_clock_states_equal(&dc->clk_mgr->clks, &dc->current_state->bw_ctx.bw.dcn.clk)) 2754 dc->optimized_required = true; 2755 // Else we fallback to mem compare. 2756 } else if (memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0) { 2757 dc->optimized_required = true; 2758 } 2759 2760 dc->optimized_required |= dc->wm_optimized_required; 2761 } 2762 2763 return type; 2764 } 2765 2766 static struct dc_stream_status *stream_get_status( 2767 struct dc_state *ctx, 2768 struct dc_stream_state *stream) 2769 { 2770 uint8_t i; 2771 2772 for (i = 0; i < ctx->stream_count; i++) { 2773 if (stream == ctx->streams[i]) { 2774 return &ctx->stream_status[i]; 2775 } 2776 } 2777 2778 return NULL; 2779 } 2780 2781 static const enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL; 2782 2783 static void copy_surface_update_to_plane( 2784 struct dc_plane_state *surface, 2785 struct dc_surface_update *srf_update) 2786 { 2787 if (srf_update->flip_addr) { 2788 surface->address = srf_update->flip_addr->address; 2789 surface->flip_immediate = 2790 srf_update->flip_addr->flip_immediate; 2791 surface->time.time_elapsed_in_us[surface->time.index] = 2792 srf_update->flip_addr->flip_timestamp_in_us - 2793 surface->time.prev_update_time_in_us; 2794 surface->time.prev_update_time_in_us = 2795 srf_update->flip_addr->flip_timestamp_in_us; 2796 surface->time.index++; 2797 if (surface->time.index >= DC_PLANE_UPDATE_TIMES_MAX) 2798 surface->time.index = 0; 2799 2800 surface->triplebuffer_flips = srf_update->flip_addr->triplebuffer_flips; 2801 } 2802 2803 if (srf_update->scaling_info) { 2804 surface->scaling_quality = 2805 srf_update->scaling_info->scaling_quality; 2806 surface->dst_rect = 2807 srf_update->scaling_info->dst_rect; 2808 surface->src_rect = 2809 srf_update->scaling_info->src_rect; 2810 surface->clip_rect = 2811 srf_update->scaling_info->clip_rect; 2812 } 2813 2814 if (srf_update->plane_info) { 2815 surface->color_space = 2816 srf_update->plane_info->color_space; 2817 surface->format = 2818 srf_update->plane_info->format; 2819 surface->plane_size = 2820 srf_update->plane_info->plane_size; 2821 surface->rotation = 2822 srf_update->plane_info->rotation; 2823 surface->horizontal_mirror = 2824 srf_update->plane_info->horizontal_mirror; 2825 surface->stereo_format = 2826 srf_update->plane_info->stereo_format; 2827 surface->tiling_info = 2828 srf_update->plane_info->tiling_info; 2829 surface->visible = 2830 srf_update->plane_info->visible; 2831 surface->per_pixel_alpha = 2832 srf_update->plane_info->per_pixel_alpha; 2833 surface->global_alpha = 2834 srf_update->plane_info->global_alpha; 2835 surface->global_alpha_value = 2836 srf_update->plane_info->global_alpha_value; 2837 surface->dcc = 2838 srf_update->plane_info->dcc; 2839 surface->layer_index = 2840 srf_update->plane_info->layer_index; 2841 } 2842 2843 if (srf_update->gamma) { 2844 memcpy(&surface->gamma_correction.entries, 2845 &srf_update->gamma->entries, 2846 sizeof(struct dc_gamma_entries)); 2847 surface->gamma_correction.is_identity = 2848 srf_update->gamma->is_identity; 2849 surface->gamma_correction.num_entries = 2850 srf_update->gamma->num_entries; 2851 surface->gamma_correction.type = 2852 srf_update->gamma->type; 2853 } 2854 2855 if (srf_update->in_transfer_func) { 2856 surface->in_transfer_func.sdr_ref_white_level = 2857 srf_update->in_transfer_func->sdr_ref_white_level; 2858 surface->in_transfer_func.tf = 2859 srf_update->in_transfer_func->tf; 2860 surface->in_transfer_func.type = 2861 srf_update->in_transfer_func->type; 2862 memcpy(&surface->in_transfer_func.tf_pts, 2863 &srf_update->in_transfer_func->tf_pts, 2864 sizeof(struct dc_transfer_func_distributed_points)); 2865 } 2866 2867 if (srf_update->func_shaper) 2868 memcpy(&surface->in_shaper_func, srf_update->func_shaper, 2869 sizeof(surface->in_shaper_func)); 2870 2871 if (srf_update->lut3d_func) 2872 memcpy(&surface->lut3d_func, srf_update->lut3d_func, 2873 sizeof(surface->lut3d_func)); 2874 2875 if (srf_update->hdr_mult.value) 2876 surface->hdr_mult = 2877 srf_update->hdr_mult; 2878 2879 if (srf_update->blend_tf) 2880 memcpy(&surface->blend_tf, srf_update->blend_tf, 2881 sizeof(surface->blend_tf)); 2882 2883 if (srf_update->input_csc_color_matrix) 2884 surface->input_csc_color_matrix = 2885 *srf_update->input_csc_color_matrix; 2886 2887 if (srf_update->coeff_reduction_factor) 2888 surface->coeff_reduction_factor = 2889 *srf_update->coeff_reduction_factor; 2890 2891 if (srf_update->gamut_remap_matrix) 2892 surface->gamut_remap_matrix = 2893 *srf_update->gamut_remap_matrix; 2894 if (srf_update->cm2_params) { 2895 surface->mcm_shaper_3dlut_setting = srf_update->cm2_params->component_settings.shaper_3dlut_setting; 2896 surface->mcm_lut1d_enable = srf_update->cm2_params->component_settings.lut1d_enable; 2897 surface->mcm_luts = srf_update->cm2_params->cm2_luts; 2898 } 2899 if (srf_update->cursor_csc_color_matrix) 2900 surface->cursor_csc_color_matrix = 2901 *srf_update->cursor_csc_color_matrix; 2902 } 2903 2904 static void copy_stream_update_to_stream(struct dc *dc, 2905 struct dc_state *context, 2906 struct dc_stream_state *stream, 2907 struct dc_stream_update *update) 2908 { 2909 struct dc_context *dc_ctx = dc->ctx; 2910 2911 if (update == NULL || stream == NULL) 2912 return; 2913 2914 if (update->src.height && update->src.width) 2915 stream->src = update->src; 2916 2917 if (update->dst.height && update->dst.width) 2918 stream->dst = update->dst; 2919 2920 if (update->out_transfer_func) { 2921 stream->out_transfer_func.sdr_ref_white_level = 2922 update->out_transfer_func->sdr_ref_white_level; 2923 stream->out_transfer_func.tf = update->out_transfer_func->tf; 2924 stream->out_transfer_func.type = 2925 update->out_transfer_func->type; 2926 memcpy(&stream->out_transfer_func.tf_pts, 2927 &update->out_transfer_func->tf_pts, 2928 sizeof(struct dc_transfer_func_distributed_points)); 2929 } 2930 2931 if (update->hdr_static_metadata) 2932 stream->hdr_static_metadata = *update->hdr_static_metadata; 2933 2934 if (update->abm_level) 2935 stream->abm_level = *update->abm_level; 2936 2937 if (update->periodic_interrupt) 2938 stream->periodic_interrupt = *update->periodic_interrupt; 2939 2940 if (update->gamut_remap) 2941 stream->gamut_remap_matrix = *update->gamut_remap; 2942 2943 /* Note: this being updated after mode set is currently not a use case 2944 * however if it arises OCSC would need to be reprogrammed at the 2945 * minimum 2946 */ 2947 if (update->output_color_space) 2948 stream->output_color_space = *update->output_color_space; 2949 2950 if (update->output_csc_transform) 2951 stream->csc_color_matrix = *update->output_csc_transform; 2952 2953 if (update->vrr_infopacket) 2954 stream->vrr_infopacket = *update->vrr_infopacket; 2955 2956 if (update->hw_cursor_req) 2957 stream->hw_cursor_req = *update->hw_cursor_req; 2958 2959 if (update->allow_freesync) 2960 stream->allow_freesync = *update->allow_freesync; 2961 2962 if (update->vrr_active_variable) 2963 stream->vrr_active_variable = *update->vrr_active_variable; 2964 2965 if (update->vrr_active_fixed) 2966 stream->vrr_active_fixed = *update->vrr_active_fixed; 2967 2968 if (update->crtc_timing_adjust) 2969 stream->adjust = *update->crtc_timing_adjust; 2970 2971 if (update->dpms_off) 2972 stream->dpms_off = *update->dpms_off; 2973 2974 if (update->hfvsif_infopacket) 2975 stream->hfvsif_infopacket = *update->hfvsif_infopacket; 2976 2977 if (update->vtem_infopacket) 2978 stream->vtem_infopacket = *update->vtem_infopacket; 2979 2980 if (update->vsc_infopacket) 2981 stream->vsc_infopacket = *update->vsc_infopacket; 2982 2983 if (update->vsp_infopacket) 2984 stream->vsp_infopacket = *update->vsp_infopacket; 2985 2986 if (update->adaptive_sync_infopacket) 2987 stream->adaptive_sync_infopacket = *update->adaptive_sync_infopacket; 2988 2989 if (update->dither_option) 2990 stream->dither_option = *update->dither_option; 2991 2992 if (update->pending_test_pattern) 2993 stream->test_pattern = *update->pending_test_pattern; 2994 /* update current stream with writeback info */ 2995 if (update->wb_update) { 2996 int i; 2997 2998 stream->num_wb_info = update->wb_update->num_wb_info; 2999 ASSERT(stream->num_wb_info <= MAX_DWB_PIPES); 3000 for (i = 0; i < stream->num_wb_info; i++) 3001 stream->writeback_info[i] = 3002 update->wb_update->writeback_info[i]; 3003 } 3004 if (update->dsc_config) { 3005 struct dc_dsc_config old_dsc_cfg = stream->timing.dsc_cfg; 3006 uint32_t old_dsc_enabled = stream->timing.flags.DSC; 3007 uint32_t enable_dsc = (update->dsc_config->num_slices_h != 0 && 3008 update->dsc_config->num_slices_v != 0); 3009 3010 /* Use temporarry context for validating new DSC config */ 3011 struct dc_state *dsc_validate_context = dc_state_create_copy(dc->current_state); 3012 3013 if (dsc_validate_context) { 3014 stream->timing.dsc_cfg = *update->dsc_config; 3015 stream->timing.flags.DSC = enable_dsc; 3016 if (!dc->res_pool->funcs->validate_bandwidth(dc, dsc_validate_context, true)) { 3017 stream->timing.dsc_cfg = old_dsc_cfg; 3018 stream->timing.flags.DSC = old_dsc_enabled; 3019 update->dsc_config = NULL; 3020 } 3021 3022 dc_state_release(dsc_validate_context); 3023 } else { 3024 DC_ERROR("Failed to allocate new validate context for DSC change\n"); 3025 update->dsc_config = NULL; 3026 } 3027 } 3028 if (update->scaler_sharpener_update) 3029 stream->scaler_sharpener_update = *update->scaler_sharpener_update; 3030 } 3031 3032 static void backup_planes_and_stream_state( 3033 struct dc_scratch_space *scratch, 3034 struct dc_stream_state *stream) 3035 { 3036 int i; 3037 struct dc_stream_status *status = dc_stream_get_status(stream); 3038 3039 if (!status) 3040 return; 3041 3042 for (i = 0; i < status->plane_count; i++) { 3043 scratch->plane_states[i] = *status->plane_states[i]; 3044 } 3045 scratch->stream_state = *stream; 3046 } 3047 3048 static void restore_planes_and_stream_state( 3049 struct dc_scratch_space *scratch, 3050 struct dc_stream_state *stream) 3051 { 3052 int i; 3053 struct dc_stream_status *status = dc_stream_get_status(stream); 3054 3055 if (!status) 3056 return; 3057 3058 for (i = 0; i < status->plane_count; i++) { 3059 *status->plane_states[i] = scratch->plane_states[i]; 3060 } 3061 *stream = scratch->stream_state; 3062 } 3063 3064 /** 3065 * update_seamless_boot_flags() - Helper function for updating seamless boot flags 3066 * 3067 * @dc: Current DC state 3068 * @context: New DC state to be programmed 3069 * @surface_count: Number of surfaces that have an updated 3070 * @stream: Corresponding stream to be updated in the current flip 3071 * 3072 * Updating seamless boot flags do not need to be part of the commit sequence. This 3073 * helper function will update the seamless boot flags on each flip (if required) 3074 * outside of the HW commit sequence (fast or slow). 3075 * 3076 * Return: void 3077 */ 3078 static void update_seamless_boot_flags(struct dc *dc, 3079 struct dc_state *context, 3080 int surface_count, 3081 struct dc_stream_state *stream) 3082 { 3083 if (get_seamless_boot_stream_count(context) > 0 && surface_count > 0) { 3084 /* Optimize seamless boot flag keeps clocks and watermarks high until 3085 * first flip. After first flip, optimization is required to lower 3086 * bandwidth. Important to note that it is expected UEFI will 3087 * only light up a single display on POST, therefore we only expect 3088 * one stream with seamless boot flag set. 3089 */ 3090 if (stream->apply_seamless_boot_optimization) { 3091 stream->apply_seamless_boot_optimization = false; 3092 3093 if (get_seamless_boot_stream_count(context) == 0) 3094 dc->optimized_required = true; 3095 } 3096 } 3097 } 3098 3099 /** 3100 * update_planes_and_stream_state() - The function takes planes and stream 3101 * updates as inputs and determines the appropriate update type. If update type 3102 * is FULL, the function allocates a new context, populates and validates it. 3103 * Otherwise, it updates current dc context. The function will return both 3104 * new_context and new_update_type back to the caller. The function also backs 3105 * up both current and new contexts into corresponding dc state scratch memory. 3106 * TODO: The function does too many things, and even conditionally allocates dc 3107 * context memory implicitly. We should consider to break it down. 3108 * 3109 * @dc: Current DC state 3110 * @srf_updates: an array of surface updates 3111 * @surface_count: surface update count 3112 * @stream: Corresponding stream to be updated 3113 * @stream_update: stream update 3114 * @new_update_type: [out] determined update type by the function 3115 * @new_context: [out] new context allocated and validated if update type is 3116 * FULL, reference to current context if update type is less than FULL. 3117 * 3118 * Return: true if a valid update is populated into new_context, false 3119 * otherwise. 3120 */ 3121 static bool update_planes_and_stream_state(struct dc *dc, 3122 struct dc_surface_update *srf_updates, int surface_count, 3123 struct dc_stream_state *stream, 3124 struct dc_stream_update *stream_update, 3125 enum surface_update_type *new_update_type, 3126 struct dc_state **new_context) 3127 { 3128 struct dc_state *context; 3129 int i, j; 3130 enum surface_update_type update_type; 3131 const struct dc_stream_status *stream_status; 3132 struct dc_context *dc_ctx = dc->ctx; 3133 3134 stream_status = dc_stream_get_status(stream); 3135 3136 if (!stream_status) { 3137 if (surface_count) /* Only an error condition if surf_count non-zero*/ 3138 ASSERT(false); 3139 3140 return false; /* Cannot commit surface to stream that is not committed */ 3141 } 3142 3143 context = dc->current_state; 3144 update_type = dc_check_update_surfaces_for_stream( 3145 dc, srf_updates, surface_count, stream_update, stream_status); 3146 if (update_type == UPDATE_TYPE_FULL) 3147 backup_planes_and_stream_state(&dc->scratch.current_state, stream); 3148 3149 /* update current stream with the new updates */ 3150 copy_stream_update_to_stream(dc, context, stream, stream_update); 3151 3152 /* do not perform surface update if surface has invalid dimensions 3153 * (all zero) and no scaling_info is provided 3154 */ 3155 if (surface_count > 0) { 3156 for (i = 0; i < surface_count; i++) { 3157 if ((srf_updates[i].surface->src_rect.width == 0 || 3158 srf_updates[i].surface->src_rect.height == 0 || 3159 srf_updates[i].surface->dst_rect.width == 0 || 3160 srf_updates[i].surface->dst_rect.height == 0) && 3161 (!srf_updates[i].scaling_info || 3162 srf_updates[i].scaling_info->src_rect.width == 0 || 3163 srf_updates[i].scaling_info->src_rect.height == 0 || 3164 srf_updates[i].scaling_info->dst_rect.width == 0 || 3165 srf_updates[i].scaling_info->dst_rect.height == 0)) { 3166 DC_ERROR("Invalid src/dst rects in surface update!\n"); 3167 return false; 3168 } 3169 } 3170 } 3171 3172 if (update_type >= update_surface_trace_level) 3173 update_surface_trace(dc, srf_updates, surface_count); 3174 3175 for (i = 0; i < surface_count; i++) 3176 copy_surface_update_to_plane(srf_updates[i].surface, &srf_updates[i]); 3177 3178 if (update_type >= UPDATE_TYPE_FULL) { 3179 struct dc_plane_state *new_planes[MAX_SURFACES] = {0}; 3180 3181 for (i = 0; i < surface_count; i++) 3182 new_planes[i] = srf_updates[i].surface; 3183 3184 /* initialize scratch memory for building context */ 3185 context = dc_state_create_copy(dc->current_state); 3186 if (context == NULL) { 3187 DC_ERROR("Failed to allocate new validate context!\n"); 3188 return false; 3189 } 3190 3191 /* For each full update, remove all existing phantom pipes first. 3192 * Ensures that we have enough pipes for newly added MPO planes 3193 */ 3194 dc_state_remove_phantom_streams_and_planes(dc, context); 3195 dc_state_release_phantom_streams_and_planes(dc, context); 3196 3197 /*remove old surfaces from context */ 3198 if (!dc_state_rem_all_planes_for_stream(dc, stream, context)) { 3199 3200 BREAK_TO_DEBUGGER(); 3201 goto fail; 3202 } 3203 3204 /* add surface to context */ 3205 if (!dc_state_add_all_planes_for_stream(dc, stream, new_planes, surface_count, context)) { 3206 3207 BREAK_TO_DEBUGGER(); 3208 goto fail; 3209 } 3210 } 3211 3212 /* save update parameters into surface */ 3213 for (i = 0; i < surface_count; i++) { 3214 struct dc_plane_state *surface = srf_updates[i].surface; 3215 3216 if (update_type != UPDATE_TYPE_MED) 3217 continue; 3218 if (surface->update_flags.bits.clip_size_change || 3219 surface->update_flags.bits.position_change) { 3220 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3221 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3222 3223 if (pipe_ctx->plane_state != surface) 3224 continue; 3225 3226 resource_build_scaling_params(pipe_ctx); 3227 } 3228 } 3229 } 3230 3231 if (update_type == UPDATE_TYPE_FULL) { 3232 if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) { 3233 BREAK_TO_DEBUGGER(); 3234 goto fail; 3235 } 3236 } 3237 update_seamless_boot_flags(dc, context, surface_count, stream); 3238 3239 *new_context = context; 3240 *new_update_type = update_type; 3241 if (update_type == UPDATE_TYPE_FULL) 3242 backup_planes_and_stream_state(&dc->scratch.new_state, stream); 3243 3244 return true; 3245 3246 fail: 3247 dc_state_release(context); 3248 3249 return false; 3250 3251 } 3252 3253 static void commit_planes_do_stream_update(struct dc *dc, 3254 struct dc_stream_state *stream, 3255 struct dc_stream_update *stream_update, 3256 enum surface_update_type update_type, 3257 struct dc_state *context) 3258 { 3259 int j; 3260 3261 // Stream updates 3262 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3263 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3264 3265 if (resource_is_pipe_type(pipe_ctx, OTG_MASTER) && pipe_ctx->stream == stream) { 3266 3267 if (stream_update->periodic_interrupt && dc->hwss.setup_periodic_interrupt) 3268 dc->hwss.setup_periodic_interrupt(dc, pipe_ctx); 3269 3270 if ((stream_update->hdr_static_metadata && !stream->use_dynamic_meta) || 3271 stream_update->vrr_infopacket || 3272 stream_update->vsc_infopacket || 3273 stream_update->vsp_infopacket || 3274 stream_update->hfvsif_infopacket || 3275 stream_update->adaptive_sync_infopacket || 3276 stream_update->vtem_infopacket) { 3277 resource_build_info_frame(pipe_ctx); 3278 dc->hwss.update_info_frame(pipe_ctx); 3279 3280 if (dc_is_dp_signal(pipe_ctx->stream->signal)) 3281 dc->link_srv->dp_trace_source_sequence( 3282 pipe_ctx->stream->link, 3283 DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME); 3284 } 3285 3286 if (stream_update->hdr_static_metadata && 3287 stream->use_dynamic_meta && 3288 dc->hwss.set_dmdata_attributes && 3289 pipe_ctx->stream->dmdata_address.quad_part != 0) 3290 dc->hwss.set_dmdata_attributes(pipe_ctx); 3291 3292 if (stream_update->gamut_remap) 3293 dc_stream_set_gamut_remap(dc, stream); 3294 3295 if (stream_update->output_csc_transform) 3296 dc_stream_program_csc_matrix(dc, stream); 3297 3298 if (stream_update->dither_option) { 3299 struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe; 3300 resource_build_bit_depth_reduction_params(pipe_ctx->stream, 3301 &pipe_ctx->stream->bit_depth_params); 3302 pipe_ctx->stream_res.opp->funcs->opp_program_fmt(pipe_ctx->stream_res.opp, 3303 &stream->bit_depth_params, 3304 &stream->clamping); 3305 while (odm_pipe) { 3306 odm_pipe->stream_res.opp->funcs->opp_program_fmt(odm_pipe->stream_res.opp, 3307 &stream->bit_depth_params, 3308 &stream->clamping); 3309 odm_pipe = odm_pipe->next_odm_pipe; 3310 } 3311 } 3312 3313 if (stream_update->cursor_attributes) 3314 program_cursor_attributes(dc, stream); 3315 3316 if (stream_update->cursor_position) 3317 program_cursor_position(dc, stream); 3318 3319 /* Full fe update*/ 3320 if (update_type == UPDATE_TYPE_FAST) 3321 continue; 3322 3323 if (stream_update->dsc_config) 3324 dc->link_srv->update_dsc_config(pipe_ctx); 3325 3326 if (stream_update->mst_bw_update) { 3327 if (stream_update->mst_bw_update->is_increase) 3328 dc->link_srv->increase_mst_payload(pipe_ctx, 3329 stream_update->mst_bw_update->mst_stream_bw); 3330 else 3331 dc->link_srv->reduce_mst_payload(pipe_ctx, 3332 stream_update->mst_bw_update->mst_stream_bw); 3333 } 3334 3335 if (stream_update->pending_test_pattern) { 3336 /* 3337 * test pattern params depends on ODM topology 3338 * changes that we could be applying to front 3339 * end. Since at the current stage front end 3340 * changes are not yet applied. We can only 3341 * apply test pattern in hw based on current 3342 * state and populate the final test pattern 3343 * params in new state. If current and new test 3344 * pattern params are different as result of 3345 * different ODM topology being used, it will be 3346 * detected and handle during front end 3347 * programming update. 3348 */ 3349 dc->link_srv->dp_set_test_pattern(stream->link, 3350 stream->test_pattern.type, 3351 stream->test_pattern.color_space, 3352 stream->test_pattern.p_link_settings, 3353 stream->test_pattern.p_custom_pattern, 3354 stream->test_pattern.cust_pattern_size); 3355 resource_build_test_pattern_params(&context->res_ctx, pipe_ctx); 3356 } 3357 3358 if (stream_update->dpms_off) { 3359 if (*stream_update->dpms_off) { 3360 dc->link_srv->set_dpms_off(pipe_ctx); 3361 /* for dpms, keep acquired resources*/ 3362 if (pipe_ctx->stream_res.audio && !dc->debug.az_endpoint_mute_only) 3363 pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio); 3364 3365 dc->optimized_required = true; 3366 3367 } else { 3368 if (get_seamless_boot_stream_count(context) == 0) 3369 dc->hwss.prepare_bandwidth(dc, dc->current_state); 3370 dc->link_srv->set_dpms_on(dc->current_state, pipe_ctx); 3371 } 3372 } else if (pipe_ctx->stream->link->wa_flags.blank_stream_on_ocs_change && stream_update->output_color_space 3373 && !stream->dpms_off && dc_is_dp_signal(pipe_ctx->stream->signal)) { 3374 /* 3375 * Workaround for firmware issue in some receivers where they don't pick up 3376 * correct output color space unless DP link is disabled/re-enabled 3377 */ 3378 dc->link_srv->set_dpms_on(dc->current_state, pipe_ctx); 3379 } 3380 3381 if (stream_update->abm_level && pipe_ctx->stream_res.abm) { 3382 bool should_program_abm = true; 3383 3384 // if otg funcs defined check if blanked before programming 3385 if (pipe_ctx->stream_res.tg->funcs->is_blanked) 3386 if (pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg)) 3387 should_program_abm = false; 3388 3389 if (should_program_abm) { 3390 if (*stream_update->abm_level == ABM_LEVEL_IMMEDIATE_DISABLE) { 3391 dc->hwss.set_abm_immediate_disable(pipe_ctx); 3392 } else { 3393 pipe_ctx->stream_res.abm->funcs->set_abm_level( 3394 pipe_ctx->stream_res.abm, stream->abm_level); 3395 } 3396 } 3397 } 3398 } 3399 } 3400 } 3401 3402 static bool dc_dmub_should_send_dirty_rect_cmd(struct dc *dc, struct dc_stream_state *stream) 3403 { 3404 if ((stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1 3405 || stream->link->psr_settings.psr_version == DC_PSR_VERSION_1) 3406 && stream->ctx->dce_version >= DCN_VERSION_3_1) 3407 return true; 3408 3409 if (stream->link->replay_settings.config.replay_supported) 3410 return true; 3411 3412 if (stream->ctx->dce_version >= DCN_VERSION_3_5 && stream->abm_level) 3413 return true; 3414 3415 return false; 3416 } 3417 3418 void dc_dmub_update_dirty_rect(struct dc *dc, 3419 int surface_count, 3420 struct dc_stream_state *stream, 3421 struct dc_surface_update *srf_updates, 3422 struct dc_state *context) 3423 { 3424 union dmub_rb_cmd cmd; 3425 struct dmub_cmd_update_dirty_rect_data *update_dirty_rect; 3426 unsigned int i, j; 3427 unsigned int panel_inst = 0; 3428 3429 if (!dc_dmub_should_send_dirty_rect_cmd(dc, stream)) 3430 return; 3431 3432 if (!dc_get_edp_link_panel_inst(dc, stream->link, &panel_inst)) 3433 return; 3434 3435 memset(&cmd, 0x0, sizeof(cmd)); 3436 cmd.update_dirty_rect.header.type = DMUB_CMD__UPDATE_DIRTY_RECT; 3437 cmd.update_dirty_rect.header.sub_type = 0; 3438 cmd.update_dirty_rect.header.payload_bytes = 3439 sizeof(cmd.update_dirty_rect) - 3440 sizeof(cmd.update_dirty_rect.header); 3441 update_dirty_rect = &cmd.update_dirty_rect.update_dirty_rect_data; 3442 for (i = 0; i < surface_count; i++) { 3443 struct dc_plane_state *plane_state = srf_updates[i].surface; 3444 const struct dc_flip_addrs *flip_addr = srf_updates[i].flip_addr; 3445 3446 if (!srf_updates[i].surface || !flip_addr) 3447 continue; 3448 /* Do not send in immediate flip mode */ 3449 if (srf_updates[i].surface->flip_immediate) 3450 continue; 3451 3452 update_dirty_rect->cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1; 3453 update_dirty_rect->dirty_rect_count = flip_addr->dirty_rect_count; 3454 memcpy(update_dirty_rect->src_dirty_rects, flip_addr->dirty_rects, 3455 sizeof(flip_addr->dirty_rects)); 3456 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3457 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3458 3459 if (pipe_ctx->stream != stream) 3460 continue; 3461 if (pipe_ctx->plane_state != plane_state) 3462 continue; 3463 3464 update_dirty_rect->panel_inst = panel_inst; 3465 update_dirty_rect->pipe_idx = j; 3466 dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT); 3467 } 3468 } 3469 } 3470 3471 static void build_dmub_update_dirty_rect( 3472 struct dc *dc, 3473 int surface_count, 3474 struct dc_stream_state *stream, 3475 struct dc_surface_update *srf_updates, 3476 struct dc_state *context, 3477 struct dc_dmub_cmd dc_dmub_cmd[], 3478 unsigned int *dmub_cmd_count) 3479 { 3480 union dmub_rb_cmd cmd; 3481 struct dmub_cmd_update_dirty_rect_data *update_dirty_rect; 3482 unsigned int i, j; 3483 unsigned int panel_inst = 0; 3484 3485 if (!dc_dmub_should_send_dirty_rect_cmd(dc, stream)) 3486 return; 3487 3488 if (!dc_get_edp_link_panel_inst(dc, stream->link, &panel_inst)) 3489 return; 3490 3491 memset(&cmd, 0x0, sizeof(cmd)); 3492 cmd.update_dirty_rect.header.type = DMUB_CMD__UPDATE_DIRTY_RECT; 3493 cmd.update_dirty_rect.header.sub_type = 0; 3494 cmd.update_dirty_rect.header.payload_bytes = 3495 sizeof(cmd.update_dirty_rect) - 3496 sizeof(cmd.update_dirty_rect.header); 3497 update_dirty_rect = &cmd.update_dirty_rect.update_dirty_rect_data; 3498 for (i = 0; i < surface_count; i++) { 3499 struct dc_plane_state *plane_state = srf_updates[i].surface; 3500 const struct dc_flip_addrs *flip_addr = srf_updates[i].flip_addr; 3501 3502 if (!srf_updates[i].surface || !flip_addr) 3503 continue; 3504 /* Do not send in immediate flip mode */ 3505 if (srf_updates[i].surface->flip_immediate) 3506 continue; 3507 update_dirty_rect->cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1; 3508 update_dirty_rect->dirty_rect_count = flip_addr->dirty_rect_count; 3509 memcpy(update_dirty_rect->src_dirty_rects, flip_addr->dirty_rects, 3510 sizeof(flip_addr->dirty_rects)); 3511 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3512 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3513 3514 if (pipe_ctx->stream != stream) 3515 continue; 3516 if (pipe_ctx->plane_state != plane_state) 3517 continue; 3518 update_dirty_rect->panel_inst = panel_inst; 3519 update_dirty_rect->pipe_idx = j; 3520 dc_dmub_cmd[*dmub_cmd_count].dmub_cmd = cmd; 3521 dc_dmub_cmd[*dmub_cmd_count].wait_type = DM_DMUB_WAIT_TYPE_NO_WAIT; 3522 (*dmub_cmd_count)++; 3523 } 3524 } 3525 } 3526 3527 static bool check_address_only_update(union surface_update_flags update_flags) 3528 { 3529 union surface_update_flags addr_only_update_flags; 3530 addr_only_update_flags.raw = 0; 3531 addr_only_update_flags.bits.addr_update = 1; 3532 3533 return update_flags.bits.addr_update && 3534 !(update_flags.raw & ~addr_only_update_flags.raw); 3535 } 3536 3537 /** 3538 * build_dmub_cmd_list() - Build an array of DMCUB commands to be sent to DMCUB 3539 * 3540 * @dc: Current DC state 3541 * @srf_updates: Array of surface updates 3542 * @surface_count: Number of surfaces that have an updated 3543 * @stream: Corresponding stream to be updated in the current flip 3544 * @context: New DC state to be programmed 3545 * 3546 * @dc_dmub_cmd: Array of DMCUB commands to be sent to DMCUB 3547 * @dmub_cmd_count: Count indicating the number of DMCUB commands in dc_dmub_cmd array 3548 * 3549 * This function builds an array of DMCUB commands to be sent to DMCUB. This function is required 3550 * to build an array of commands and have them sent while the OTG lock is acquired. 3551 * 3552 * Return: void 3553 */ 3554 static void build_dmub_cmd_list(struct dc *dc, 3555 struct dc_surface_update *srf_updates, 3556 int surface_count, 3557 struct dc_stream_state *stream, 3558 struct dc_state *context, 3559 struct dc_dmub_cmd dc_dmub_cmd[], 3560 unsigned int *dmub_cmd_count) 3561 { 3562 // Initialize cmd count to 0 3563 *dmub_cmd_count = 0; 3564 build_dmub_update_dirty_rect(dc, surface_count, stream, srf_updates, context, dc_dmub_cmd, dmub_cmd_count); 3565 } 3566 3567 static void commit_plane_for_stream_offload_fams2_flip(struct dc *dc, 3568 struct dc_surface_update *srf_updates, 3569 int surface_count, 3570 struct dc_stream_state *stream, 3571 struct dc_state *context) 3572 { 3573 int i, j; 3574 3575 /* update dirty rect for PSR */ 3576 dc_dmub_update_dirty_rect(dc, surface_count, stream, 3577 srf_updates, context); 3578 3579 /* Perform requested Updates */ 3580 for (i = 0; i < surface_count; i++) { 3581 struct dc_plane_state *plane_state = srf_updates[i].surface; 3582 3583 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3584 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3585 3586 if (!should_update_pipe_for_stream(context, pipe_ctx, stream)) 3587 continue; 3588 3589 if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state)) 3590 continue; 3591 3592 /* update pipe context for plane */ 3593 if (pipe_ctx->plane_state->update_flags.bits.addr_update) 3594 dc->hwss.update_plane_addr(dc, pipe_ctx); 3595 } 3596 } 3597 3598 /* Send commands to DMCUB */ 3599 dc_dmub_srv_fams2_passthrough_flip(dc, 3600 context, 3601 stream, 3602 srf_updates, 3603 surface_count); 3604 } 3605 3606 static void commit_planes_for_stream_fast(struct dc *dc, 3607 struct dc_surface_update *srf_updates, 3608 int surface_count, 3609 struct dc_stream_state *stream, 3610 struct dc_stream_update *stream_update, 3611 enum surface_update_type update_type, 3612 struct dc_state *context) 3613 { 3614 int i, j; 3615 struct pipe_ctx *top_pipe_to_program = NULL; 3616 struct dc_stream_status *stream_status = NULL; 3617 bool should_offload_fams2_flip = false; 3618 3619 if (dc->debug.fams2_config.bits.enable && 3620 dc->debug.fams2_config.bits.enable_offload_flip && 3621 dc_state_is_fams2_in_use(dc, context)) { 3622 /* if not offloading to HWFQ, offload to FAMS2 if needed */ 3623 should_offload_fams2_flip = true; 3624 for (i = 0; i < surface_count; i++) { 3625 if (srf_updates[i].surface && 3626 srf_updates[i].surface->update_flags.raw && 3627 !check_address_only_update(srf_updates[i].surface->update_flags)) { 3628 /* more than address update, need to acquire FAMS2 lock */ 3629 should_offload_fams2_flip = false; 3630 break; 3631 } 3632 } 3633 if (stream_update) { 3634 /* more than address update, need to acquire FAMS2 lock */ 3635 should_offload_fams2_flip = false; 3636 } 3637 } 3638 3639 dc_exit_ips_for_hw_access(dc); 3640 3641 dc_z10_restore(dc); 3642 3643 top_pipe_to_program = resource_get_otg_master_for_stream( 3644 &context->res_ctx, 3645 stream); 3646 3647 if (!top_pipe_to_program) 3648 return; 3649 3650 for (i = 0; i < dc->res_pool->pipe_count; i++) { 3651 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 3652 3653 if (pipe->stream && pipe->plane_state) { 3654 if (!dc->debug.using_dml2) 3655 set_p_state_switch_method(dc, context, pipe); 3656 3657 if (dc->debug.visual_confirm) 3658 dc_update_visual_confirm_color(dc, context, pipe); 3659 } 3660 } 3661 3662 for (i = 0; i < surface_count; i++) { 3663 struct dc_plane_state *plane_state = srf_updates[i].surface; 3664 /*set logical flag for lock/unlock use*/ 3665 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3666 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3667 3668 if (!pipe_ctx->plane_state) 3669 continue; 3670 if (should_update_pipe_for_plane(context, pipe_ctx, plane_state)) 3671 continue; 3672 pipe_ctx->plane_state->triplebuffer_flips = false; 3673 if (update_type == UPDATE_TYPE_FAST && 3674 dc->hwss.program_triplebuffer != NULL && 3675 !pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) { 3676 /*triple buffer for VUpdate only*/ 3677 pipe_ctx->plane_state->triplebuffer_flips = true; 3678 } 3679 } 3680 } 3681 3682 stream_status = dc_state_get_stream_status(context, stream); 3683 3684 if (should_offload_fams2_flip) { 3685 commit_plane_for_stream_offload_fams2_flip(dc, 3686 srf_updates, 3687 surface_count, 3688 stream, 3689 context); 3690 } else if (stream_status) { 3691 build_dmub_cmd_list(dc, 3692 srf_updates, 3693 surface_count, 3694 stream, 3695 context, 3696 context->dc_dmub_cmd, 3697 &(context->dmub_cmd_count)); 3698 hwss_build_fast_sequence(dc, 3699 context->dc_dmub_cmd, 3700 context->dmub_cmd_count, 3701 context->block_sequence, 3702 &(context->block_sequence_steps), 3703 top_pipe_to_program, 3704 stream_status, 3705 context); 3706 hwss_execute_sequence(dc, 3707 context->block_sequence, 3708 context->block_sequence_steps); 3709 } 3710 3711 /* Clear update flags so next flip doesn't have redundant programming 3712 * (if there's no stream update, the update flags are not cleared). 3713 * Surface updates are cleared unconditionally at the beginning of each flip, 3714 * so no need to clear here. 3715 */ 3716 if (top_pipe_to_program->stream) 3717 top_pipe_to_program->stream->update_flags.raw = 0; 3718 } 3719 3720 static void commit_planes_for_stream(struct dc *dc, 3721 struct dc_surface_update *srf_updates, 3722 int surface_count, 3723 struct dc_stream_state *stream, 3724 struct dc_stream_update *stream_update, 3725 enum surface_update_type update_type, 3726 struct dc_state *context) 3727 { 3728 int i, j; 3729 struct pipe_ctx *top_pipe_to_program = NULL; 3730 bool should_lock_all_pipes = (update_type != UPDATE_TYPE_FAST); 3731 bool subvp_prev_use = false; 3732 bool subvp_curr_use = false; 3733 uint8_t current_stream_mask = 0; 3734 3735 // Once we apply the new subvp context to hardware it won't be in the 3736 // dc->current_state anymore, so we have to cache it before we apply 3737 // the new SubVP context 3738 subvp_prev_use = false; 3739 dc_exit_ips_for_hw_access(dc); 3740 3741 dc_z10_restore(dc); 3742 if (update_type == UPDATE_TYPE_FULL) 3743 hwss_process_outstanding_hw_updates(dc, dc->current_state); 3744 3745 for (i = 0; i < dc->res_pool->pipe_count; i++) { 3746 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 3747 3748 if (pipe->stream && pipe->plane_state) { 3749 if (!dc->debug.using_dml2) 3750 set_p_state_switch_method(dc, context, pipe); 3751 3752 if (dc->debug.visual_confirm) 3753 dc_update_visual_confirm_color(dc, context, pipe); 3754 } 3755 } 3756 3757 if (update_type == UPDATE_TYPE_FULL) { 3758 dc_allow_idle_optimizations(dc, false); 3759 3760 if (get_seamless_boot_stream_count(context) == 0) 3761 dc->hwss.prepare_bandwidth(dc, context); 3762 3763 if (dc->hwss.update_dsc_pg) 3764 dc->hwss.update_dsc_pg(dc, context, false); 3765 3766 context_clock_trace(dc, context); 3767 } 3768 3769 top_pipe_to_program = resource_get_otg_master_for_stream( 3770 &context->res_ctx, 3771 stream); 3772 ASSERT(top_pipe_to_program != NULL); 3773 for (i = 0; i < dc->res_pool->pipe_count; i++) { 3774 struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 3775 3776 // Check old context for SubVP 3777 subvp_prev_use |= (dc_state_get_pipe_subvp_type(dc->current_state, old_pipe) == SUBVP_PHANTOM); 3778 if (subvp_prev_use) 3779 break; 3780 } 3781 3782 for (i = 0; i < dc->res_pool->pipe_count; i++) { 3783 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 3784 3785 if (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) { 3786 subvp_curr_use = true; 3787 break; 3788 } 3789 } 3790 3791 if (stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) { 3792 struct pipe_ctx *mpcc_pipe; 3793 struct pipe_ctx *odm_pipe; 3794 3795 for (mpcc_pipe = top_pipe_to_program; mpcc_pipe; mpcc_pipe = mpcc_pipe->bottom_pipe) 3796 for (odm_pipe = mpcc_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) 3797 odm_pipe->ttu_regs.min_ttu_vblank = MAX_TTU; 3798 } 3799 3800 if (update_type != UPDATE_TYPE_FAST && dc->res_pool->funcs->prepare_mcache_programming) 3801 dc->res_pool->funcs->prepare_mcache_programming(dc, context); 3802 3803 if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed) 3804 if (top_pipe_to_program && 3805 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) { 3806 if (should_use_dmub_lock(stream->link)) { 3807 union dmub_hw_lock_flags hw_locks = { 0 }; 3808 struct dmub_hw_lock_inst_flags inst_flags = { 0 }; 3809 3810 hw_locks.bits.lock_dig = 1; 3811 inst_flags.dig_inst = top_pipe_to_program->stream_res.tg->inst; 3812 3813 dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv, 3814 true, 3815 &hw_locks, 3816 &inst_flags); 3817 } else 3818 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable( 3819 top_pipe_to_program->stream_res.tg); 3820 } 3821 3822 if (dc->hwss.wait_for_dcc_meta_propagation) { 3823 dc->hwss.wait_for_dcc_meta_propagation(dc, top_pipe_to_program); 3824 } 3825 3826 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) { 3827 if (dc->hwss.subvp_pipe_control_lock) 3828 dc->hwss.subvp_pipe_control_lock(dc, context, true, should_lock_all_pipes, NULL, subvp_prev_use); 3829 3830 if (dc->hwss.fams2_global_control_lock) 3831 dc->hwss.fams2_global_control_lock(dc, context, true); 3832 3833 dc->hwss.interdependent_update_lock(dc, context, true); 3834 } else { 3835 if (dc->hwss.subvp_pipe_control_lock) 3836 dc->hwss.subvp_pipe_control_lock(dc, context, true, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use); 3837 3838 if (dc->hwss.fams2_global_control_lock) 3839 dc->hwss.fams2_global_control_lock(dc, context, true); 3840 3841 /* Lock the top pipe while updating plane addrs, since freesync requires 3842 * plane addr update event triggers to be synchronized. 3843 * top_pipe_to_program is expected to never be NULL 3844 */ 3845 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true); 3846 } 3847 3848 dc_dmub_update_dirty_rect(dc, surface_count, stream, srf_updates, context); 3849 3850 // Stream updates 3851 if (stream_update) 3852 commit_planes_do_stream_update(dc, stream, stream_update, update_type, context); 3853 3854 if (surface_count == 0) { 3855 /* 3856 * In case of turning off screen, no need to program front end a second time. 3857 * just return after program blank. 3858 */ 3859 if (dc->hwss.apply_ctx_for_surface) 3860 dc->hwss.apply_ctx_for_surface(dc, stream, 0, context); 3861 if (dc->hwss.program_front_end_for_ctx) 3862 dc->hwss.program_front_end_for_ctx(dc, context); 3863 3864 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) { 3865 dc->hwss.interdependent_update_lock(dc, context, false); 3866 } else { 3867 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false); 3868 } 3869 dc->hwss.post_unlock_program_front_end(dc, context); 3870 3871 if (update_type != UPDATE_TYPE_FAST) 3872 if (dc->hwss.commit_subvp_config) 3873 dc->hwss.commit_subvp_config(dc, context); 3874 3875 /* Since phantom pipe programming is moved to post_unlock_program_front_end, 3876 * move the SubVP lock to after the phantom pipes have been setup 3877 */ 3878 if (dc->hwss.subvp_pipe_control_lock) 3879 dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, 3880 NULL, subvp_prev_use); 3881 3882 if (dc->hwss.fams2_global_control_lock) 3883 dc->hwss.fams2_global_control_lock(dc, context, false); 3884 3885 return; 3886 } 3887 3888 if (update_type != UPDATE_TYPE_FAST) { 3889 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3890 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3891 3892 if ((dc->debug.visual_confirm == VISUAL_CONFIRM_SUBVP || 3893 dc->debug.visual_confirm == VISUAL_CONFIRM_MCLK_SWITCH) && 3894 pipe_ctx->stream && pipe_ctx->plane_state) { 3895 /* Only update visual confirm for SUBVP and Mclk switching here. 3896 * The bar appears on all pipes, so we need to update the bar on all displays, 3897 * so the information doesn't get stale. 3898 */ 3899 dc->hwss.update_visual_confirm_color(dc, pipe_ctx, 3900 pipe_ctx->plane_res.hubp->inst); 3901 } 3902 } 3903 } 3904 3905 for (i = 0; i < surface_count; i++) { 3906 struct dc_plane_state *plane_state = srf_updates[i].surface; 3907 3908 /*set logical flag for lock/unlock use*/ 3909 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3910 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3911 if (!pipe_ctx->plane_state) 3912 continue; 3913 if (should_update_pipe_for_plane(context, pipe_ctx, plane_state)) 3914 continue; 3915 pipe_ctx->plane_state->triplebuffer_flips = false; 3916 if (update_type == UPDATE_TYPE_FAST && 3917 dc->hwss.program_triplebuffer != NULL && 3918 !pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) { 3919 /*triple buffer for VUpdate only*/ 3920 pipe_ctx->plane_state->triplebuffer_flips = true; 3921 } 3922 } 3923 if (update_type == UPDATE_TYPE_FULL) { 3924 /* force vsync flip when reconfiguring pipes to prevent underflow */ 3925 plane_state->flip_immediate = false; 3926 } 3927 } 3928 3929 // Update Type FULL, Surface updates 3930 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3931 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3932 3933 if (!pipe_ctx->top_pipe && 3934 !pipe_ctx->prev_odm_pipe && 3935 should_update_pipe_for_stream(context, pipe_ctx, stream)) { 3936 struct dc_stream_status *stream_status = NULL; 3937 3938 if (!pipe_ctx->plane_state) 3939 continue; 3940 3941 /* Full fe update*/ 3942 if (update_type == UPDATE_TYPE_FAST) 3943 continue; 3944 3945 ASSERT(!pipe_ctx->plane_state->triplebuffer_flips); 3946 3947 if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) { 3948 /*turn off triple buffer for full update*/ 3949 dc->hwss.program_triplebuffer( 3950 dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips); 3951 } 3952 stream_status = 3953 stream_get_status(context, pipe_ctx->stream); 3954 3955 if (dc->hwss.apply_ctx_for_surface && stream_status) 3956 dc->hwss.apply_ctx_for_surface( 3957 dc, pipe_ctx->stream, stream_status->plane_count, context); 3958 } 3959 } 3960 if (dc->hwss.program_front_end_for_ctx && update_type != UPDATE_TYPE_FAST) { 3961 dc->hwss.program_front_end_for_ctx(dc, context); 3962 if (dc->debug.validate_dml_output) { 3963 for (i = 0; i < dc->res_pool->pipe_count; i++) { 3964 struct pipe_ctx *cur_pipe = &context->res_ctx.pipe_ctx[i]; 3965 if (cur_pipe->stream == NULL) 3966 continue; 3967 3968 cur_pipe->plane_res.hubp->funcs->validate_dml_output( 3969 cur_pipe->plane_res.hubp, dc->ctx, 3970 &context->res_ctx.pipe_ctx[i].rq_regs, 3971 &context->res_ctx.pipe_ctx[i].dlg_regs, 3972 &context->res_ctx.pipe_ctx[i].ttu_regs); 3973 } 3974 } 3975 } 3976 3977 // Update Type FAST, Surface updates 3978 if (update_type == UPDATE_TYPE_FAST) { 3979 if (dc->hwss.set_flip_control_gsl) 3980 for (i = 0; i < surface_count; i++) { 3981 struct dc_plane_state *plane_state = srf_updates[i].surface; 3982 3983 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3984 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3985 3986 if (!should_update_pipe_for_stream(context, pipe_ctx, stream)) 3987 continue; 3988 3989 if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state)) 3990 continue; 3991 3992 // GSL has to be used for flip immediate 3993 dc->hwss.set_flip_control_gsl(pipe_ctx, 3994 pipe_ctx->plane_state->flip_immediate); 3995 } 3996 } 3997 3998 /* Perform requested Updates */ 3999 for (i = 0; i < surface_count; i++) { 4000 struct dc_plane_state *plane_state = srf_updates[i].surface; 4001 4002 for (j = 0; j < dc->res_pool->pipe_count; j++) { 4003 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 4004 4005 if (!should_update_pipe_for_stream(context, pipe_ctx, stream)) 4006 continue; 4007 4008 if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state)) 4009 continue; 4010 4011 if (srf_updates[i].cm2_params && 4012 srf_updates[i].cm2_params->cm2_luts.lut3d_data.lut3d_src == 4013 DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM && 4014 srf_updates[i].cm2_params->component_settings.shaper_3dlut_setting == 4015 DC_CM2_SHAPER_3DLUT_SETTING_ENABLE_SHAPER_3DLUT && 4016 dc->hwss.trigger_3dlut_dma_load) 4017 dc->hwss.trigger_3dlut_dma_load(dc, pipe_ctx); 4018 4019 /*program triple buffer after lock based on flip type*/ 4020 if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) { 4021 /*only enable triplebuffer for fast_update*/ 4022 dc->hwss.program_triplebuffer( 4023 dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips); 4024 } 4025 if (pipe_ctx->plane_state->update_flags.bits.addr_update) 4026 dc->hwss.update_plane_addr(dc, pipe_ctx); 4027 } 4028 } 4029 } 4030 4031 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) { 4032 dc->hwss.interdependent_update_lock(dc, context, false); 4033 } else { 4034 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false); 4035 } 4036 4037 if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed) 4038 if (top_pipe_to_program && 4039 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) { 4040 top_pipe_to_program->stream_res.tg->funcs->wait_for_state( 4041 top_pipe_to_program->stream_res.tg, 4042 CRTC_STATE_VACTIVE); 4043 top_pipe_to_program->stream_res.tg->funcs->wait_for_state( 4044 top_pipe_to_program->stream_res.tg, 4045 CRTC_STATE_VBLANK); 4046 top_pipe_to_program->stream_res.tg->funcs->wait_for_state( 4047 top_pipe_to_program->stream_res.tg, 4048 CRTC_STATE_VACTIVE); 4049 4050 if (should_use_dmub_lock(stream->link)) { 4051 union dmub_hw_lock_flags hw_locks = { 0 }; 4052 struct dmub_hw_lock_inst_flags inst_flags = { 0 }; 4053 4054 hw_locks.bits.lock_dig = 1; 4055 inst_flags.dig_inst = top_pipe_to_program->stream_res.tg->inst; 4056 4057 dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv, 4058 false, 4059 &hw_locks, 4060 &inst_flags); 4061 } else 4062 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_disable( 4063 top_pipe_to_program->stream_res.tg); 4064 } 4065 4066 if (subvp_curr_use) { 4067 /* If enabling subvp or transitioning from subvp->subvp, enable the 4068 * phantom streams before we program front end for the phantom pipes. 4069 */ 4070 if (update_type != UPDATE_TYPE_FAST) { 4071 if (dc->hwss.enable_phantom_streams) 4072 dc->hwss.enable_phantom_streams(dc, context); 4073 } 4074 } 4075 4076 if (update_type != UPDATE_TYPE_FAST) 4077 dc->hwss.post_unlock_program_front_end(dc, context); 4078 4079 if (subvp_prev_use && !subvp_curr_use) { 4080 /* If disabling subvp, disable phantom streams after front end 4081 * programming has completed (we turn on phantom OTG in order 4082 * to complete the plane disable for phantom pipes). 4083 */ 4084 4085 if (dc->hwss.disable_phantom_streams) 4086 dc->hwss.disable_phantom_streams(dc, context); 4087 } 4088 4089 if (update_type != UPDATE_TYPE_FAST) 4090 if (dc->hwss.commit_subvp_config) 4091 dc->hwss.commit_subvp_config(dc, context); 4092 /* Since phantom pipe programming is moved to post_unlock_program_front_end, 4093 * move the SubVP lock to after the phantom pipes have been setup 4094 */ 4095 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) { 4096 if (dc->hwss.subvp_pipe_control_lock) 4097 dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use); 4098 if (dc->hwss.fams2_global_control_lock) 4099 dc->hwss.fams2_global_control_lock(dc, context, false); 4100 } else { 4101 if (dc->hwss.subvp_pipe_control_lock) 4102 dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use); 4103 if (dc->hwss.fams2_global_control_lock) 4104 dc->hwss.fams2_global_control_lock(dc, context, false); 4105 } 4106 4107 // Fire manual trigger only when bottom plane is flipped 4108 for (j = 0; j < dc->res_pool->pipe_count; j++) { 4109 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 4110 4111 if (!pipe_ctx->plane_state) 4112 continue; 4113 4114 if (pipe_ctx->bottom_pipe || pipe_ctx->next_odm_pipe || 4115 !pipe_ctx->stream || !should_update_pipe_for_stream(context, pipe_ctx, stream) || 4116 !pipe_ctx->plane_state->update_flags.bits.addr_update || 4117 pipe_ctx->plane_state->skip_manual_trigger) 4118 continue; 4119 4120 if (pipe_ctx->stream_res.tg->funcs->program_manual_trigger) 4121 pipe_ctx->stream_res.tg->funcs->program_manual_trigger(pipe_ctx->stream_res.tg); 4122 } 4123 4124 current_stream_mask = get_stream_mask(dc, context); 4125 if (current_stream_mask != context->stream_mask) { 4126 context->stream_mask = current_stream_mask; 4127 dc_dmub_srv_notify_stream_mask(dc->ctx->dmub_srv, current_stream_mask); 4128 } 4129 } 4130 4131 /** 4132 * could_mpcc_tree_change_for_active_pipes - Check if an OPP associated with MPCC might change 4133 * 4134 * @dc: Used to get the current state status 4135 * @stream: Target stream, which we want to remove the attached planes 4136 * @srf_updates: Array of surface updates 4137 * @surface_count: Number of surface update 4138 * @is_plane_addition: [in] Fill out with true if it is a plane addition case 4139 * 4140 * DCN32x and newer support a feature named Dynamic ODM which can conflict with 4141 * the MPO if used simultaneously in some specific configurations (e.g., 4142 * 4k@144). This function checks if the incoming context requires applying a 4143 * transition state with unnecessary pipe splitting and ODM disabled to 4144 * circumvent our hardware limitations to prevent this edge case. If the OPP 4145 * associated with an MPCC might change due to plane additions, this function 4146 * returns true. 4147 * 4148 * Return: 4149 * Return true if OPP and MPCC might change, otherwise, return false. 4150 */ 4151 static bool could_mpcc_tree_change_for_active_pipes(struct dc *dc, 4152 struct dc_stream_state *stream, 4153 struct dc_surface_update *srf_updates, 4154 int surface_count, 4155 bool *is_plane_addition) 4156 { 4157 4158 struct dc_stream_status *cur_stream_status = stream_get_status(dc->current_state, stream); 4159 bool force_minimal_pipe_splitting = false; 4160 bool subvp_active = false; 4161 uint32_t i; 4162 4163 *is_plane_addition = false; 4164 4165 if (cur_stream_status && 4166 dc->current_state->stream_count > 0 && 4167 dc->debug.pipe_split_policy != MPC_SPLIT_AVOID) { 4168 /* determine if minimal transition is required due to MPC*/ 4169 if (surface_count > 0) { 4170 if (cur_stream_status->plane_count > surface_count) { 4171 force_minimal_pipe_splitting = true; 4172 } else if (cur_stream_status->plane_count < surface_count) { 4173 force_minimal_pipe_splitting = true; 4174 *is_plane_addition = true; 4175 } 4176 } 4177 } 4178 4179 if (cur_stream_status && 4180 dc->current_state->stream_count == 1 && 4181 dc->debug.enable_single_display_2to1_odm_policy) { 4182 /* determine if minimal transition is required due to dynamic ODM*/ 4183 if (surface_count > 0) { 4184 if (cur_stream_status->plane_count > 2 && cur_stream_status->plane_count > surface_count) { 4185 force_minimal_pipe_splitting = true; 4186 } else if (surface_count > 2 && cur_stream_status->plane_count < surface_count) { 4187 force_minimal_pipe_splitting = true; 4188 *is_plane_addition = true; 4189 } 4190 } 4191 } 4192 4193 for (i = 0; i < dc->res_pool->pipe_count; i++) { 4194 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 4195 4196 if (dc_state_get_pipe_subvp_type(dc->current_state, pipe) != SUBVP_NONE) { 4197 subvp_active = true; 4198 break; 4199 } 4200 } 4201 4202 /* For SubVP when adding or removing planes we need to add a minimal transition 4203 * (even when disabling all planes). Whenever disabling a phantom pipe, we 4204 * must use the minimal transition path to disable the pipe correctly. 4205 * 4206 * We want to use the minimal transition whenever subvp is active, not only if 4207 * a plane is being added / removed from a subvp stream (MPO plane can be added 4208 * to a DRR pipe of SubVP + DRR config, in which case we still want to run through 4209 * a min transition to disable subvp. 4210 */ 4211 if (cur_stream_status && subvp_active) { 4212 /* determine if minimal transition is required due to SubVP*/ 4213 if (cur_stream_status->plane_count > surface_count) { 4214 force_minimal_pipe_splitting = true; 4215 } else if (cur_stream_status->plane_count < surface_count) { 4216 force_minimal_pipe_splitting = true; 4217 *is_plane_addition = true; 4218 } 4219 } 4220 4221 return force_minimal_pipe_splitting; 4222 } 4223 4224 struct pipe_split_policy_backup { 4225 bool dynamic_odm_policy; 4226 bool subvp_policy; 4227 enum pipe_split_policy mpc_policy; 4228 char force_odm[MAX_PIPES]; 4229 }; 4230 4231 static void backup_and_set_minimal_pipe_split_policy(struct dc *dc, 4232 struct dc_state *context, 4233 struct pipe_split_policy_backup *policy) 4234 { 4235 int i; 4236 4237 if (!dc->config.is_vmin_only_asic) { 4238 policy->mpc_policy = dc->debug.pipe_split_policy; 4239 dc->debug.pipe_split_policy = MPC_SPLIT_AVOID; 4240 } 4241 policy->dynamic_odm_policy = dc->debug.enable_single_display_2to1_odm_policy; 4242 dc->debug.enable_single_display_2to1_odm_policy = false; 4243 policy->subvp_policy = dc->debug.force_disable_subvp; 4244 dc->debug.force_disable_subvp = true; 4245 for (i = 0; i < context->stream_count; i++) { 4246 policy->force_odm[i] = context->streams[i]->debug.force_odm_combine_segments; 4247 if (context->streams[i]->debug.allow_transition_for_forced_odm) 4248 context->streams[i]->debug.force_odm_combine_segments = 0; 4249 } 4250 } 4251 4252 static void restore_minimal_pipe_split_policy(struct dc *dc, 4253 struct dc_state *context, 4254 struct pipe_split_policy_backup *policy) 4255 { 4256 uint8_t i; 4257 4258 if (!dc->config.is_vmin_only_asic) 4259 dc->debug.pipe_split_policy = policy->mpc_policy; 4260 dc->debug.enable_single_display_2to1_odm_policy = 4261 policy->dynamic_odm_policy; 4262 dc->debug.force_disable_subvp = policy->subvp_policy; 4263 for (i = 0; i < context->stream_count; i++) 4264 context->streams[i]->debug.force_odm_combine_segments = policy->force_odm[i]; 4265 } 4266 4267 static void release_minimal_transition_state(struct dc *dc, 4268 struct dc_state *minimal_transition_context, 4269 struct dc_state *base_context, 4270 struct pipe_split_policy_backup *policy) 4271 { 4272 restore_minimal_pipe_split_policy(dc, base_context, policy); 4273 dc_state_release(minimal_transition_context); 4274 } 4275 4276 static void force_vsync_flip_in_minimal_transition_context(struct dc_state *context) 4277 { 4278 uint8_t i; 4279 int j; 4280 struct dc_stream_status *stream_status; 4281 4282 for (i = 0; i < context->stream_count; i++) { 4283 stream_status = &context->stream_status[i]; 4284 4285 for (j = 0; j < stream_status->plane_count; j++) 4286 stream_status->plane_states[j]->flip_immediate = false; 4287 } 4288 } 4289 4290 static struct dc_state *create_minimal_transition_state(struct dc *dc, 4291 struct dc_state *base_context, struct pipe_split_policy_backup *policy) 4292 { 4293 struct dc_state *minimal_transition_context = NULL; 4294 4295 minimal_transition_context = dc_state_create_copy(base_context); 4296 if (!minimal_transition_context) 4297 return NULL; 4298 4299 backup_and_set_minimal_pipe_split_policy(dc, base_context, policy); 4300 /* commit minimal state */ 4301 if (dc->res_pool->funcs->validate_bandwidth(dc, minimal_transition_context, false)) { 4302 /* prevent underflow and corruption when reconfiguring pipes */ 4303 force_vsync_flip_in_minimal_transition_context(minimal_transition_context); 4304 } else { 4305 /* 4306 * This should never happen, minimal transition state should 4307 * always be validated first before adding pipe split features. 4308 */ 4309 release_minimal_transition_state(dc, minimal_transition_context, base_context, policy); 4310 BREAK_TO_DEBUGGER(); 4311 minimal_transition_context = NULL; 4312 } 4313 return minimal_transition_context; 4314 } 4315 4316 static bool is_pipe_topology_transition_seamless_with_intermediate_step( 4317 struct dc *dc, 4318 struct dc_state *initial_state, 4319 struct dc_state *intermediate_state, 4320 struct dc_state *final_state) 4321 { 4322 return dc->hwss.is_pipe_topology_transition_seamless(dc, initial_state, 4323 intermediate_state) && 4324 dc->hwss.is_pipe_topology_transition_seamless(dc, 4325 intermediate_state, final_state); 4326 } 4327 4328 static void swap_and_release_current_context(struct dc *dc, 4329 struct dc_state *new_context, struct dc_stream_state *stream) 4330 { 4331 4332 int i; 4333 struct dc_state *old = dc->current_state; 4334 struct pipe_ctx *pipe_ctx; 4335 4336 /* Since memory free requires elevated IRQ, an interrupt 4337 * request is generated by mem free. If this happens 4338 * between freeing and reassigning the context, our vsync 4339 * interrupt will call into dc and cause a memory 4340 * corruption. Hence, we first reassign the context, 4341 * then free the old context. 4342 */ 4343 dc->current_state = new_context; 4344 dc_state_release(old); 4345 4346 // clear any forced full updates 4347 for (i = 0; i < dc->res_pool->pipe_count; i++) { 4348 pipe_ctx = &new_context->res_ctx.pipe_ctx[i]; 4349 4350 if (pipe_ctx->plane_state && pipe_ctx->stream == stream) 4351 pipe_ctx->plane_state->force_full_update = false; 4352 } 4353 } 4354 4355 static int initialize_empty_surface_updates( 4356 struct dc_stream_state *stream, 4357 struct dc_surface_update *srf_updates) 4358 { 4359 struct dc_stream_status *status = dc_stream_get_status(stream); 4360 int i; 4361 4362 if (!status) 4363 return 0; 4364 4365 for (i = 0; i < status->plane_count; i++) 4366 srf_updates[i].surface = status->plane_states[i]; 4367 4368 return status->plane_count; 4369 } 4370 4371 static bool commit_minimal_transition_based_on_new_context(struct dc *dc, 4372 struct dc_state *new_context, 4373 struct dc_stream_state *stream, 4374 struct dc_surface_update *srf_updates, 4375 int surface_count) 4376 { 4377 bool success = false; 4378 struct pipe_split_policy_backup policy; 4379 struct dc_state *intermediate_context = 4380 create_minimal_transition_state(dc, new_context, 4381 &policy); 4382 4383 if (intermediate_context) { 4384 if (is_pipe_topology_transition_seamless_with_intermediate_step( 4385 dc, 4386 dc->current_state, 4387 intermediate_context, 4388 new_context)) { 4389 DC_LOG_DC("commit minimal transition state: base = new state\n"); 4390 commit_planes_for_stream(dc, srf_updates, 4391 surface_count, stream, NULL, 4392 UPDATE_TYPE_FULL, intermediate_context); 4393 swap_and_release_current_context( 4394 dc, intermediate_context, stream); 4395 dc_state_retain(dc->current_state); 4396 success = true; 4397 } 4398 release_minimal_transition_state( 4399 dc, intermediate_context, new_context, &policy); 4400 } 4401 return success; 4402 } 4403 4404 static bool commit_minimal_transition_based_on_current_context(struct dc *dc, 4405 struct dc_state *new_context, struct dc_stream_state *stream) 4406 { 4407 bool success = false; 4408 struct pipe_split_policy_backup policy; 4409 struct dc_state *intermediate_context; 4410 struct dc_state *old_current_state = dc->current_state; 4411 struct dc_surface_update srf_updates[MAX_SURFACE_NUM] = {0}; 4412 int surface_count; 4413 4414 /* 4415 * Both current and new contexts share the same stream and plane state 4416 * pointers. When new context is validated, stream and planes get 4417 * populated with new updates such as new plane addresses. This makes 4418 * the current context no longer valid because stream and planes are 4419 * modified from the original. We backup current stream and plane states 4420 * into scratch space whenever we are populating new context. So we can 4421 * restore the original values back by calling the restore function now. 4422 * This restores back the original stream and plane states associated 4423 * with the current state. 4424 */ 4425 restore_planes_and_stream_state(&dc->scratch.current_state, stream); 4426 dc_state_retain(old_current_state); 4427 intermediate_context = create_minimal_transition_state(dc, 4428 old_current_state, &policy); 4429 4430 if (intermediate_context) { 4431 if (is_pipe_topology_transition_seamless_with_intermediate_step( 4432 dc, 4433 dc->current_state, 4434 intermediate_context, 4435 new_context)) { 4436 DC_LOG_DC("commit minimal transition state: base = current state\n"); 4437 surface_count = initialize_empty_surface_updates( 4438 stream, srf_updates); 4439 commit_planes_for_stream(dc, srf_updates, 4440 surface_count, stream, NULL, 4441 UPDATE_TYPE_FULL, intermediate_context); 4442 swap_and_release_current_context( 4443 dc, intermediate_context, stream); 4444 dc_state_retain(dc->current_state); 4445 success = true; 4446 } 4447 release_minimal_transition_state(dc, intermediate_context, 4448 old_current_state, &policy); 4449 } 4450 dc_state_release(old_current_state); 4451 /* 4452 * Restore stream and plane states back to the values associated with 4453 * new context. 4454 */ 4455 restore_planes_and_stream_state(&dc->scratch.new_state, stream); 4456 return success; 4457 } 4458 4459 /** 4460 * commit_minimal_transition_state_in_dc_update - Commit a minimal state based 4461 * on current or new context 4462 * 4463 * @dc: DC structure, used to get the current state 4464 * @new_context: New context 4465 * @stream: Stream getting the update for the flip 4466 * @srf_updates: Surface updates 4467 * @surface_count: Number of surfaces 4468 * 4469 * The function takes in current state and new state and determine a minimal 4470 * transition state as the intermediate step which could make the transition 4471 * between current and new states seamless. If found, it will commit the minimal 4472 * transition state and update current state to this minimal transition state 4473 * and return true, if not, it will return false. 4474 * 4475 * Return: 4476 * Return True if the minimal transition succeeded, false otherwise 4477 */ 4478 static bool commit_minimal_transition_state_in_dc_update(struct dc *dc, 4479 struct dc_state *new_context, 4480 struct dc_stream_state *stream, 4481 struct dc_surface_update *srf_updates, 4482 int surface_count) 4483 { 4484 bool success = commit_minimal_transition_based_on_new_context( 4485 dc, new_context, stream, srf_updates, 4486 surface_count); 4487 if (!success) 4488 success = commit_minimal_transition_based_on_current_context(dc, 4489 new_context, stream); 4490 if (!success) 4491 DC_LOG_ERROR("Fail to commit a seamless minimal transition state between current and new states.\nThis pipe topology update is non-seamless!\n"); 4492 return success; 4493 } 4494 4495 /** 4496 * commit_minimal_transition_state - Create a transition pipe split state 4497 * 4498 * @dc: Used to get the current state status 4499 * @transition_base_context: New transition state 4500 * 4501 * In some specific configurations, such as pipe split on multi-display with 4502 * MPO and/or Dynamic ODM, removing a plane may cause unsupported pipe 4503 * programming when moving to new planes. To mitigate those types of problems, 4504 * this function adds a transition state that minimizes pipe usage before 4505 * programming the new configuration. When adding a new plane, the current 4506 * state requires the least pipes, so it is applied without splitting. When 4507 * removing a plane, the new state requires the least pipes, so it is applied 4508 * without splitting. 4509 * 4510 * Return: 4511 * Return false if something is wrong in the transition state. 4512 */ 4513 static bool commit_minimal_transition_state(struct dc *dc, 4514 struct dc_state *transition_base_context) 4515 { 4516 struct dc_state *transition_context; 4517 struct pipe_split_policy_backup policy; 4518 enum dc_status ret = DC_ERROR_UNEXPECTED; 4519 unsigned int i, j; 4520 unsigned int pipe_in_use = 0; 4521 bool subvp_in_use = false; 4522 bool odm_in_use = false; 4523 4524 /* check current pipes in use*/ 4525 for (i = 0; i < dc->res_pool->pipe_count; i++) { 4526 struct pipe_ctx *pipe = &transition_base_context->res_ctx.pipe_ctx[i]; 4527 4528 if (pipe->plane_state) 4529 pipe_in_use++; 4530 } 4531 4532 /* If SubVP is enabled and we are adding or removing planes from any main subvp 4533 * pipe, we must use the minimal transition. 4534 */ 4535 for (i = 0; i < dc->res_pool->pipe_count; i++) { 4536 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 4537 4538 if (pipe->stream && dc_state_get_pipe_subvp_type(dc->current_state, pipe) == SUBVP_PHANTOM) { 4539 subvp_in_use = true; 4540 break; 4541 } 4542 } 4543 4544 /* If ODM is enabled and we are adding or removing planes from any ODM 4545 * pipe, we must use the minimal transition. 4546 */ 4547 for (i = 0; i < dc->res_pool->pipe_count; i++) { 4548 struct pipe_ctx *pipe = &transition_base_context->res_ctx.pipe_ctx[i]; 4549 4550 if (resource_is_pipe_type(pipe, OTG_MASTER)) { 4551 odm_in_use = resource_get_odm_slice_count(pipe) > 1; 4552 break; 4553 } 4554 } 4555 4556 /* When the OS add a new surface if we have been used all of pipes with odm combine 4557 * and mpc split feature, it need use commit_minimal_transition_state to transition safely. 4558 * After OS exit MPO, it will back to use odm and mpc split with all of pipes, we need 4559 * call it again. Otherwise return true to skip. 4560 * 4561 * Reduce the scenarios to use dc_commit_state_no_check in the stage of flip. Especially 4562 * enter/exit MPO when DCN still have enough resources. 4563 */ 4564 if (pipe_in_use != dc->res_pool->pipe_count && !subvp_in_use && !odm_in_use) 4565 return true; 4566 4567 DC_LOG_DC("%s base = %s state, reason = %s\n", __func__, 4568 dc->current_state == transition_base_context ? "current" : "new", 4569 subvp_in_use ? "Subvp In Use" : 4570 odm_in_use ? "ODM in Use" : 4571 dc->debug.pipe_split_policy != MPC_SPLIT_AVOID ? "MPC in Use" : 4572 "Unknown"); 4573 4574 dc_state_retain(transition_base_context); 4575 transition_context = create_minimal_transition_state(dc, 4576 transition_base_context, &policy); 4577 if (transition_context) { 4578 ret = dc_commit_state_no_check(dc, transition_context); 4579 release_minimal_transition_state(dc, transition_context, transition_base_context, &policy); 4580 } 4581 dc_state_release(transition_base_context); 4582 4583 if (ret != DC_OK) { 4584 /* this should never happen */ 4585 BREAK_TO_DEBUGGER(); 4586 return false; 4587 } 4588 4589 /* force full surface update */ 4590 for (i = 0; i < dc->current_state->stream_count; i++) { 4591 for (j = 0; j < dc->current_state->stream_status[i].plane_count; j++) { 4592 dc->current_state->stream_status[i].plane_states[j]->update_flags.raw = 0xFFFFFFFF; 4593 } 4594 } 4595 4596 return true; 4597 } 4598 4599 void populate_fast_updates(struct dc_fast_update *fast_update, 4600 struct dc_surface_update *srf_updates, 4601 int surface_count, 4602 struct dc_stream_update *stream_update) 4603 { 4604 int i = 0; 4605 4606 if (stream_update) { 4607 fast_update[0].out_transfer_func = stream_update->out_transfer_func; 4608 fast_update[0].output_csc_transform = stream_update->output_csc_transform; 4609 } else { 4610 fast_update[0].out_transfer_func = NULL; 4611 fast_update[0].output_csc_transform = NULL; 4612 } 4613 4614 for (i = 0; i < surface_count; i++) { 4615 fast_update[i].flip_addr = srf_updates[i].flip_addr; 4616 fast_update[i].gamma = srf_updates[i].gamma; 4617 fast_update[i].gamut_remap_matrix = srf_updates[i].gamut_remap_matrix; 4618 fast_update[i].input_csc_color_matrix = srf_updates[i].input_csc_color_matrix; 4619 fast_update[i].coeff_reduction_factor = srf_updates[i].coeff_reduction_factor; 4620 fast_update[i].cursor_csc_color_matrix = srf_updates[i].cursor_csc_color_matrix; 4621 } 4622 } 4623 4624 static bool fast_updates_exist(struct dc_fast_update *fast_update, int surface_count) 4625 { 4626 int i; 4627 4628 if (fast_update[0].out_transfer_func || 4629 fast_update[0].output_csc_transform) 4630 return true; 4631 4632 for (i = 0; i < surface_count; i++) { 4633 if (fast_update[i].flip_addr || 4634 fast_update[i].gamma || 4635 fast_update[i].gamut_remap_matrix || 4636 fast_update[i].input_csc_color_matrix || 4637 fast_update[i].cursor_csc_color_matrix || 4638 fast_update[i].coeff_reduction_factor) 4639 return true; 4640 } 4641 4642 return false; 4643 } 4644 4645 bool fast_nonaddr_updates_exist(struct dc_fast_update *fast_update, int surface_count) 4646 { 4647 int i; 4648 4649 if (fast_update[0].out_transfer_func || 4650 fast_update[0].output_csc_transform) 4651 return true; 4652 4653 for (i = 0; i < surface_count; i++) { 4654 if (fast_update[i].input_csc_color_matrix || 4655 fast_update[i].gamma || 4656 fast_update[i].gamut_remap_matrix || 4657 fast_update[i].coeff_reduction_factor || 4658 fast_update[i].cursor_csc_color_matrix) 4659 return true; 4660 } 4661 4662 return false; 4663 } 4664 4665 static bool full_update_required(struct dc *dc, 4666 struct dc_surface_update *srf_updates, 4667 int surface_count, 4668 struct dc_stream_update *stream_update, 4669 struct dc_stream_state *stream) 4670 { 4671 4672 int i; 4673 struct dc_stream_status *stream_status; 4674 const struct dc_state *context = dc->current_state; 4675 4676 for (i = 0; i < surface_count; i++) { 4677 if (srf_updates && 4678 (srf_updates[i].plane_info || 4679 srf_updates[i].scaling_info || 4680 (srf_updates[i].hdr_mult.value && 4681 srf_updates[i].hdr_mult.value != srf_updates->surface->hdr_mult.value) || 4682 srf_updates[i].in_transfer_func || 4683 srf_updates[i].func_shaper || 4684 srf_updates[i].lut3d_func || 4685 srf_updates[i].surface->force_full_update || 4686 (srf_updates[i].flip_addr && 4687 srf_updates[i].flip_addr->address.tmz_surface != srf_updates[i].surface->address.tmz_surface) || 4688 (srf_updates[i].cm2_params && 4689 (srf_updates[i].cm2_params->component_settings.shaper_3dlut_setting != srf_updates[i].surface->mcm_shaper_3dlut_setting || 4690 srf_updates[i].cm2_params->component_settings.lut1d_enable != srf_updates[i].surface->mcm_lut1d_enable)) || 4691 !is_surface_in_context(context, srf_updates[i].surface))) 4692 return true; 4693 } 4694 4695 if (stream_update && 4696 (((stream_update->src.height != 0 && stream_update->src.width != 0) || 4697 (stream_update->dst.height != 0 && stream_update->dst.width != 0) || 4698 stream_update->integer_scaling_update) || 4699 stream_update->hdr_static_metadata || 4700 stream_update->abm_level || 4701 stream_update->periodic_interrupt || 4702 stream_update->vrr_infopacket || 4703 stream_update->vsc_infopacket || 4704 stream_update->vsp_infopacket || 4705 stream_update->hfvsif_infopacket || 4706 stream_update->vtem_infopacket || 4707 stream_update->adaptive_sync_infopacket || 4708 stream_update->dpms_off || 4709 stream_update->allow_freesync || 4710 stream_update->vrr_active_variable || 4711 stream_update->vrr_active_fixed || 4712 stream_update->gamut_remap || 4713 stream_update->output_color_space || 4714 stream_update->dither_option || 4715 stream_update->wb_update || 4716 stream_update->dsc_config || 4717 stream_update->mst_bw_update || 4718 stream_update->func_shaper || 4719 stream_update->lut3d_func || 4720 stream_update->pending_test_pattern || 4721 stream_update->crtc_timing_adjust || 4722 stream_update->scaler_sharpener_update)) 4723 return true; 4724 4725 if (stream) { 4726 stream_status = dc_stream_get_status(stream); 4727 if (stream_status == NULL || stream_status->plane_count != surface_count) 4728 return true; 4729 } 4730 if (dc->idle_optimizations_allowed) 4731 return true; 4732 4733 return false; 4734 } 4735 4736 static bool fast_update_only(struct dc *dc, 4737 struct dc_fast_update *fast_update, 4738 struct dc_surface_update *srf_updates, 4739 int surface_count, 4740 struct dc_stream_update *stream_update, 4741 struct dc_stream_state *stream) 4742 { 4743 return fast_updates_exist(fast_update, surface_count) 4744 && !full_update_required(dc, srf_updates, surface_count, stream_update, stream); 4745 } 4746 4747 static bool update_planes_and_stream_v1(struct dc *dc, 4748 struct dc_surface_update *srf_updates, int surface_count, 4749 struct dc_stream_state *stream, 4750 struct dc_stream_update *stream_update, 4751 struct dc_state *state) 4752 { 4753 const struct dc_stream_status *stream_status; 4754 enum surface_update_type update_type; 4755 struct dc_state *context; 4756 struct dc_context *dc_ctx = dc->ctx; 4757 int i, j; 4758 struct dc_fast_update fast_update[MAX_SURFACES] = {0}; 4759 4760 dc_exit_ips_for_hw_access(dc); 4761 4762 populate_fast_updates(fast_update, srf_updates, surface_count, stream_update); 4763 stream_status = dc_stream_get_status(stream); 4764 context = dc->current_state; 4765 4766 update_type = dc_check_update_surfaces_for_stream( 4767 dc, srf_updates, surface_count, stream_update, stream_status); 4768 4769 if (update_type >= UPDATE_TYPE_FULL) { 4770 4771 /* initialize scratch memory for building context */ 4772 context = dc_state_create_copy(state); 4773 if (context == NULL) { 4774 DC_ERROR("Failed to allocate new validate context!\n"); 4775 return false; 4776 } 4777 4778 for (i = 0; i < dc->res_pool->pipe_count; i++) { 4779 struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i]; 4780 struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 4781 4782 if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state) 4783 new_pipe->plane_state->force_full_update = true; 4784 } 4785 } else if (update_type == UPDATE_TYPE_FAST) { 4786 /* 4787 * Previous frame finished and HW is ready for optimization. 4788 */ 4789 dc_post_update_surfaces_to_stream(dc); 4790 } 4791 4792 for (i = 0; i < surface_count; i++) { 4793 struct dc_plane_state *surface = srf_updates[i].surface; 4794 4795 copy_surface_update_to_plane(surface, &srf_updates[i]); 4796 4797 if (update_type >= UPDATE_TYPE_MED) { 4798 for (j = 0; j < dc->res_pool->pipe_count; j++) { 4799 struct pipe_ctx *pipe_ctx = 4800 &context->res_ctx.pipe_ctx[j]; 4801 4802 if (pipe_ctx->plane_state != surface) 4803 continue; 4804 4805 resource_build_scaling_params(pipe_ctx); 4806 } 4807 } 4808 } 4809 4810 copy_stream_update_to_stream(dc, context, stream, stream_update); 4811 4812 if (update_type >= UPDATE_TYPE_FULL) { 4813 if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) { 4814 DC_ERROR("Mode validation failed for stream update!\n"); 4815 dc_state_release(context); 4816 return false; 4817 } 4818 } 4819 4820 TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES); 4821 4822 if (fast_update_only(dc, fast_update, srf_updates, surface_count, stream_update, stream) && 4823 !dc->debug.enable_legacy_fast_update) { 4824 commit_planes_for_stream_fast(dc, 4825 srf_updates, 4826 surface_count, 4827 stream, 4828 stream_update, 4829 update_type, 4830 context); 4831 } else { 4832 commit_planes_for_stream( 4833 dc, 4834 srf_updates, 4835 surface_count, 4836 stream, 4837 stream_update, 4838 update_type, 4839 context); 4840 } 4841 /*update current_State*/ 4842 if (dc->current_state != context) { 4843 4844 struct dc_state *old = dc->current_state; 4845 4846 dc->current_state = context; 4847 dc_state_release(old); 4848 4849 for (i = 0; i < dc->res_pool->pipe_count; i++) { 4850 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 4851 4852 if (pipe_ctx->plane_state && pipe_ctx->stream == stream) 4853 pipe_ctx->plane_state->force_full_update = false; 4854 } 4855 } 4856 4857 /* Legacy optimization path for DCE. */ 4858 if (update_type >= UPDATE_TYPE_FULL && dc_ctx->dce_version < DCE_VERSION_MAX) { 4859 dc_post_update_surfaces_to_stream(dc); 4860 TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce); 4861 } 4862 return true; 4863 } 4864 4865 static bool update_planes_and_stream_v2(struct dc *dc, 4866 struct dc_surface_update *srf_updates, int surface_count, 4867 struct dc_stream_state *stream, 4868 struct dc_stream_update *stream_update) 4869 { 4870 struct dc_state *context; 4871 enum surface_update_type update_type; 4872 struct dc_fast_update fast_update[MAX_SURFACES] = {0}; 4873 4874 /* In cases where MPO and split or ODM are used transitions can 4875 * cause underflow. Apply stream configuration with minimal pipe 4876 * split first to avoid unsupported transitions for active pipes. 4877 */ 4878 bool force_minimal_pipe_splitting = 0; 4879 bool is_plane_addition = 0; 4880 bool is_fast_update_only; 4881 4882 populate_fast_updates(fast_update, srf_updates, surface_count, stream_update); 4883 is_fast_update_only = fast_update_only(dc, fast_update, srf_updates, 4884 surface_count, stream_update, stream); 4885 force_minimal_pipe_splitting = could_mpcc_tree_change_for_active_pipes( 4886 dc, 4887 stream, 4888 srf_updates, 4889 surface_count, 4890 &is_plane_addition); 4891 4892 /* on plane addition, minimal state is the current one */ 4893 if (force_minimal_pipe_splitting && is_plane_addition && 4894 !commit_minimal_transition_state(dc, dc->current_state)) 4895 return false; 4896 4897 if (!update_planes_and_stream_state( 4898 dc, 4899 srf_updates, 4900 surface_count, 4901 stream, 4902 stream_update, 4903 &update_type, 4904 &context)) 4905 return false; 4906 4907 /* on plane removal, minimal state is the new one */ 4908 if (force_minimal_pipe_splitting && !is_plane_addition) { 4909 if (!commit_minimal_transition_state(dc, context)) { 4910 dc_state_release(context); 4911 return false; 4912 } 4913 update_type = UPDATE_TYPE_FULL; 4914 } 4915 4916 if (dc->hwss.is_pipe_topology_transition_seamless && 4917 !dc->hwss.is_pipe_topology_transition_seamless( 4918 dc, dc->current_state, context)) 4919 commit_minimal_transition_state_in_dc_update(dc, context, stream, 4920 srf_updates, surface_count); 4921 4922 if (is_fast_update_only && !dc->debug.enable_legacy_fast_update) { 4923 commit_planes_for_stream_fast(dc, 4924 srf_updates, 4925 surface_count, 4926 stream, 4927 stream_update, 4928 update_type, 4929 context); 4930 } else { 4931 if (!stream_update && 4932 dc->hwss.is_pipe_topology_transition_seamless && 4933 !dc->hwss.is_pipe_topology_transition_seamless( 4934 dc, dc->current_state, context)) { 4935 DC_LOG_ERROR("performing non-seamless pipe topology transition with surface only update!\n"); 4936 BREAK_TO_DEBUGGER(); 4937 } 4938 commit_planes_for_stream( 4939 dc, 4940 srf_updates, 4941 surface_count, 4942 stream, 4943 stream_update, 4944 update_type, 4945 context); 4946 } 4947 if (dc->current_state != context) 4948 swap_and_release_current_context(dc, context, stream); 4949 return true; 4950 } 4951 4952 static void commit_planes_and_stream_update_on_current_context(struct dc *dc, 4953 struct dc_surface_update *srf_updates, int surface_count, 4954 struct dc_stream_state *stream, 4955 struct dc_stream_update *stream_update, 4956 enum surface_update_type update_type) 4957 { 4958 struct dc_fast_update fast_update[MAX_SURFACES] = {0}; 4959 4960 ASSERT(update_type < UPDATE_TYPE_FULL); 4961 populate_fast_updates(fast_update, srf_updates, surface_count, 4962 stream_update); 4963 if (fast_update_only(dc, fast_update, srf_updates, surface_count, 4964 stream_update, stream) && 4965 !dc->debug.enable_legacy_fast_update) 4966 commit_planes_for_stream_fast(dc, 4967 srf_updates, 4968 surface_count, 4969 stream, 4970 stream_update, 4971 update_type, 4972 dc->current_state); 4973 else 4974 commit_planes_for_stream( 4975 dc, 4976 srf_updates, 4977 surface_count, 4978 stream, 4979 stream_update, 4980 update_type, 4981 dc->current_state); 4982 } 4983 4984 static void commit_planes_and_stream_update_with_new_context(struct dc *dc, 4985 struct dc_surface_update *srf_updates, int surface_count, 4986 struct dc_stream_state *stream, 4987 struct dc_stream_update *stream_update, 4988 enum surface_update_type update_type, 4989 struct dc_state *new_context) 4990 { 4991 ASSERT(update_type >= UPDATE_TYPE_FULL); 4992 if (!dc->hwss.is_pipe_topology_transition_seamless(dc, 4993 dc->current_state, new_context)) 4994 /* 4995 * It is required by the feature design that all pipe topologies 4996 * using extra free pipes for power saving purposes such as 4997 * dynamic ODM or SubVp shall only be enabled when it can be 4998 * transitioned seamlessly to AND from its minimal transition 4999 * state. A minimal transition state is defined as the same dc 5000 * state but with all power saving features disabled. So it uses 5001 * the minimum pipe topology. When we can't seamlessly 5002 * transition from state A to state B, we will insert the 5003 * minimal transition state A' or B' in between so seamless 5004 * transition between A and B can be made possible. 5005 */ 5006 commit_minimal_transition_state_in_dc_update(dc, new_context, 5007 stream, srf_updates, surface_count); 5008 5009 commit_planes_for_stream( 5010 dc, 5011 srf_updates, 5012 surface_count, 5013 stream, 5014 stream_update, 5015 update_type, 5016 new_context); 5017 } 5018 5019 static bool update_planes_and_stream_v3(struct dc *dc, 5020 struct dc_surface_update *srf_updates, int surface_count, 5021 struct dc_stream_state *stream, 5022 struct dc_stream_update *stream_update) 5023 { 5024 struct dc_state *new_context; 5025 enum surface_update_type update_type; 5026 5027 /* 5028 * When this function returns true and new_context is not equal to 5029 * current state, the function allocates and validates a new dc state 5030 * and assigns it to new_context. The function expects that the caller 5031 * is responsible to free this memory when new_context is no longer 5032 * used. We swap current with new context and free current instead. So 5033 * new_context's memory will live until the next full update after it is 5034 * replaced by a newer context. Refer to the use of 5035 * swap_and_free_current_context below. 5036 */ 5037 if (!update_planes_and_stream_state(dc, srf_updates, surface_count, 5038 stream, stream_update, &update_type, 5039 &new_context)) 5040 return false; 5041 5042 if (new_context == dc->current_state) { 5043 commit_planes_and_stream_update_on_current_context(dc, 5044 srf_updates, surface_count, stream, 5045 stream_update, update_type); 5046 } else { 5047 commit_planes_and_stream_update_with_new_context(dc, 5048 srf_updates, surface_count, stream, 5049 stream_update, update_type, new_context); 5050 swap_and_release_current_context(dc, new_context, stream); 5051 } 5052 5053 return true; 5054 } 5055 5056 bool dc_update_planes_and_stream(struct dc *dc, 5057 struct dc_surface_update *srf_updates, int surface_count, 5058 struct dc_stream_state *stream, 5059 struct dc_stream_update *stream_update) 5060 { 5061 dc_exit_ips_for_hw_access(dc); 5062 /* 5063 * update planes and stream version 3 separates FULL and FAST updates 5064 * to their own sequences. It aims to clean up frequent checks for 5065 * update type resulting unnecessary branching in logic flow. It also 5066 * adds a new commit minimal transition sequence, which detects the need 5067 * for minimal transition based on the actual comparison of current and 5068 * new states instead of "predicting" it based on per feature software 5069 * policy.i.e could_mpcc_tree_change_for_active_pipes. The new commit 5070 * minimal transition sequence is made universal to any power saving 5071 * features that would use extra free pipes such as Dynamic ODM/MPC 5072 * Combine, MPO or SubVp. Therefore there is no longer a need to 5073 * specially handle compatibility problems with transitions among those 5074 * features as they are now transparent to the new sequence. 5075 */ 5076 if (dc->ctx->dce_version >= DCN_VERSION_4_01) 5077 return update_planes_and_stream_v3(dc, srf_updates, 5078 surface_count, stream, stream_update); 5079 return update_planes_and_stream_v2(dc, srf_updates, 5080 surface_count, stream, stream_update); 5081 } 5082 5083 void dc_commit_updates_for_stream(struct dc *dc, 5084 struct dc_surface_update *srf_updates, 5085 int surface_count, 5086 struct dc_stream_state *stream, 5087 struct dc_stream_update *stream_update, 5088 struct dc_state *state) 5089 { 5090 dc_exit_ips_for_hw_access(dc); 5091 /* TODO: Since change commit sequence can have a huge impact, 5092 * we decided to only enable it for DCN3x. However, as soon as 5093 * we get more confident about this change we'll need to enable 5094 * the new sequence for all ASICs. 5095 */ 5096 if (dc->ctx->dce_version >= DCN_VERSION_4_01) { 5097 update_planes_and_stream_v3(dc, srf_updates, surface_count, 5098 stream, stream_update); 5099 return; 5100 } 5101 if (dc->ctx->dce_version >= DCN_VERSION_3_2) { 5102 update_planes_and_stream_v2(dc, srf_updates, surface_count, 5103 stream, stream_update); 5104 return; 5105 } 5106 update_planes_and_stream_v1(dc, srf_updates, surface_count, stream, 5107 stream_update, state); 5108 } 5109 5110 uint8_t dc_get_current_stream_count(struct dc *dc) 5111 { 5112 return dc->current_state->stream_count; 5113 } 5114 5115 struct dc_stream_state *dc_get_stream_at_index(struct dc *dc, uint8_t i) 5116 { 5117 if (i < dc->current_state->stream_count) 5118 return dc->current_state->streams[i]; 5119 return NULL; 5120 } 5121 5122 enum dc_irq_source dc_interrupt_to_irq_source( 5123 struct dc *dc, 5124 uint32_t src_id, 5125 uint32_t ext_id) 5126 { 5127 return dal_irq_service_to_irq_source(dc->res_pool->irqs, src_id, ext_id); 5128 } 5129 5130 /* 5131 * dc_interrupt_set() - Enable/disable an AMD hw interrupt source 5132 */ 5133 bool dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable) 5134 { 5135 5136 if (dc == NULL) 5137 return false; 5138 5139 return dal_irq_service_set(dc->res_pool->irqs, src, enable); 5140 } 5141 5142 void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src) 5143 { 5144 dal_irq_service_ack(dc->res_pool->irqs, src); 5145 } 5146 5147 void dc_power_down_on_boot(struct dc *dc) 5148 { 5149 if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW && 5150 dc->hwss.power_down_on_boot) { 5151 if (dc->caps.ips_support) 5152 dc_exit_ips_for_hw_access(dc); 5153 dc->hwss.power_down_on_boot(dc); 5154 } 5155 } 5156 5157 void dc_set_power_state(struct dc *dc, enum dc_acpi_cm_power_state power_state) 5158 { 5159 if (!dc->current_state) 5160 return; 5161 5162 switch (power_state) { 5163 case DC_ACPI_CM_POWER_STATE_D0: 5164 dc_state_construct(dc, dc->current_state); 5165 5166 dc_exit_ips_for_hw_access(dc); 5167 5168 dc_z10_restore(dc); 5169 5170 dc_dmub_srv_notify_fw_dc_power_state(dc->ctx->dmub_srv, power_state); 5171 5172 dc->hwss.init_hw(dc); 5173 5174 if (dc->hwss.init_sys_ctx != NULL && 5175 dc->vm_pa_config.valid) { 5176 dc->hwss.init_sys_ctx(dc->hwseq, dc, &dc->vm_pa_config); 5177 } 5178 5179 break; 5180 default: 5181 ASSERT(dc->current_state->stream_count == 0); 5182 5183 dc_dmub_srv_notify_fw_dc_power_state(dc->ctx->dmub_srv, power_state); 5184 5185 dc_state_destruct(dc->current_state); 5186 5187 break; 5188 } 5189 } 5190 5191 void dc_resume(struct dc *dc) 5192 { 5193 uint32_t i; 5194 5195 for (i = 0; i < dc->link_count; i++) 5196 dc->link_srv->resume(dc->links[i]); 5197 } 5198 5199 bool dc_is_dmcu_initialized(struct dc *dc) 5200 { 5201 struct dmcu *dmcu = dc->res_pool->dmcu; 5202 5203 if (dmcu) 5204 return dmcu->funcs->is_dmcu_initialized(dmcu); 5205 return false; 5206 } 5207 5208 void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info) 5209 { 5210 info->displayClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dispclk_khz; 5211 info->engineClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_khz; 5212 info->memoryClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dramclk_khz; 5213 info->maxSupportedDppClock = (unsigned int)state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz; 5214 info->dppClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dppclk_khz; 5215 info->socClock = (unsigned int)state->bw_ctx.bw.dcn.clk.socclk_khz; 5216 info->dcfClockDeepSleep = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz; 5217 info->fClock = (unsigned int)state->bw_ctx.bw.dcn.clk.fclk_khz; 5218 info->phyClock = (unsigned int)state->bw_ctx.bw.dcn.clk.phyclk_khz; 5219 } 5220 enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, uint32_t stepping) 5221 { 5222 if (dc->hwss.set_clock) 5223 return dc->hwss.set_clock(dc, clock_type, clk_khz, stepping); 5224 return DC_ERROR_UNEXPECTED; 5225 } 5226 void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg) 5227 { 5228 if (dc->hwss.get_clock) 5229 dc->hwss.get_clock(dc, clock_type, clock_cfg); 5230 } 5231 5232 /* enable/disable eDP PSR without specify stream for eDP */ 5233 bool dc_set_psr_allow_active(struct dc *dc, bool enable) 5234 { 5235 int i; 5236 bool allow_active; 5237 5238 for (i = 0; i < dc->current_state->stream_count ; i++) { 5239 struct dc_link *link; 5240 struct dc_stream_state *stream = dc->current_state->streams[i]; 5241 5242 link = stream->link; 5243 if (!link) 5244 continue; 5245 5246 if (link->psr_settings.psr_feature_enabled) { 5247 if (enable && !link->psr_settings.psr_allow_active) { 5248 allow_active = true; 5249 if (!dc_link_set_psr_allow_active(link, &allow_active, false, false, NULL)) 5250 return false; 5251 } else if (!enable && link->psr_settings.psr_allow_active) { 5252 allow_active = false; 5253 if (!dc_link_set_psr_allow_active(link, &allow_active, true, false, NULL)) 5254 return false; 5255 } 5256 } 5257 } 5258 5259 return true; 5260 } 5261 5262 /* enable/disable eDP Replay without specify stream for eDP */ 5263 bool dc_set_replay_allow_active(struct dc *dc, bool active) 5264 { 5265 int i; 5266 bool allow_active; 5267 5268 for (i = 0; i < dc->current_state->stream_count; i++) { 5269 struct dc_link *link; 5270 struct dc_stream_state *stream = dc->current_state->streams[i]; 5271 5272 link = stream->link; 5273 if (!link) 5274 continue; 5275 5276 if (link->replay_settings.replay_feature_enabled) { 5277 if (active && !link->replay_settings.replay_allow_active) { 5278 allow_active = true; 5279 if (!dc_link_set_replay_allow_active(link, &allow_active, 5280 false, false, NULL)) 5281 return false; 5282 } else if (!active && link->replay_settings.replay_allow_active) { 5283 allow_active = false; 5284 if (!dc_link_set_replay_allow_active(link, &allow_active, 5285 true, false, NULL)) 5286 return false; 5287 } 5288 } 5289 } 5290 5291 return true; 5292 } 5293 5294 /* set IPS disable state */ 5295 bool dc_set_ips_disable(struct dc *dc, unsigned int disable_ips) 5296 { 5297 dc_exit_ips_for_hw_access(dc); 5298 5299 dc->config.disable_ips = disable_ips; 5300 5301 return true; 5302 } 5303 5304 void dc_allow_idle_optimizations_internal(struct dc *dc, bool allow, char const *caller_name) 5305 { 5306 if (dc->debug.disable_idle_power_optimizations) 5307 return; 5308 5309 if (allow != dc->idle_optimizations_allowed) 5310 DC_LOG_IPS("%s: allow_idle old=%d new=%d (caller=%s)\n", __func__, 5311 dc->idle_optimizations_allowed, allow, caller_name); 5312 5313 if (dc->caps.ips_support && (dc->config.disable_ips == DMUB_IPS_DISABLE_ALL)) 5314 return; 5315 5316 if (dc->clk_mgr != NULL && dc->clk_mgr->funcs->is_smu_present) 5317 if (!dc->clk_mgr->funcs->is_smu_present(dc->clk_mgr)) 5318 return; 5319 5320 if (allow == dc->idle_optimizations_allowed) 5321 return; 5322 5323 if (dc->hwss.apply_idle_power_optimizations && dc->clk_mgr != NULL && 5324 dc->hwss.apply_idle_power_optimizations(dc, allow)) 5325 dc->idle_optimizations_allowed = allow; 5326 } 5327 5328 void dc_exit_ips_for_hw_access_internal(struct dc *dc, const char *caller_name) 5329 { 5330 if (dc->caps.ips_support) 5331 dc_allow_idle_optimizations_internal(dc, false, caller_name); 5332 } 5333 5334 bool dc_dmub_is_ips_idle_state(struct dc *dc) 5335 { 5336 if (dc->debug.disable_idle_power_optimizations) 5337 return false; 5338 5339 if (!dc->caps.ips_support || (dc->config.disable_ips == DMUB_IPS_DISABLE_ALL)) 5340 return false; 5341 5342 if (!dc->ctx->dmub_srv) 5343 return false; 5344 5345 return dc->ctx->dmub_srv->idle_allowed; 5346 } 5347 5348 /* set min and max memory clock to lowest and highest DPM level, respectively */ 5349 void dc_unlock_memory_clock_frequency(struct dc *dc) 5350 { 5351 if (dc->clk_mgr->funcs->set_hard_min_memclk) 5352 dc->clk_mgr->funcs->set_hard_min_memclk(dc->clk_mgr, false); 5353 5354 if (dc->clk_mgr->funcs->set_hard_max_memclk) 5355 dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr); 5356 } 5357 5358 /* set min memory clock to the min required for current mode, max to maxDPM */ 5359 void dc_lock_memory_clock_frequency(struct dc *dc) 5360 { 5361 if (dc->clk_mgr->funcs->get_memclk_states_from_smu) 5362 dc->clk_mgr->funcs->get_memclk_states_from_smu(dc->clk_mgr); 5363 5364 if (dc->clk_mgr->funcs->set_hard_min_memclk) 5365 dc->clk_mgr->funcs->set_hard_min_memclk(dc->clk_mgr, true); 5366 5367 if (dc->clk_mgr->funcs->set_hard_max_memclk) 5368 dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr); 5369 } 5370 5371 static void blank_and_force_memclk(struct dc *dc, bool apply, unsigned int memclk_mhz) 5372 { 5373 struct dc_state *context = dc->current_state; 5374 struct hubp *hubp; 5375 struct pipe_ctx *pipe; 5376 int i; 5377 5378 for (i = 0; i < dc->res_pool->pipe_count; i++) { 5379 pipe = &context->res_ctx.pipe_ctx[i]; 5380 5381 if (pipe->stream != NULL) { 5382 dc->hwss.disable_pixel_data(dc, pipe, true); 5383 5384 // wait for double buffer 5385 pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VACTIVE); 5386 pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VBLANK); 5387 pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VACTIVE); 5388 5389 hubp = pipe->plane_res.hubp; 5390 hubp->funcs->set_blank_regs(hubp, true); 5391 } 5392 } 5393 if (dc->clk_mgr->funcs->set_max_memclk) 5394 dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, memclk_mhz); 5395 if (dc->clk_mgr->funcs->set_min_memclk) 5396 dc->clk_mgr->funcs->set_min_memclk(dc->clk_mgr, memclk_mhz); 5397 5398 for (i = 0; i < dc->res_pool->pipe_count; i++) { 5399 pipe = &context->res_ctx.pipe_ctx[i]; 5400 5401 if (pipe->stream != NULL) { 5402 dc->hwss.disable_pixel_data(dc, pipe, false); 5403 5404 hubp = pipe->plane_res.hubp; 5405 hubp->funcs->set_blank_regs(hubp, false); 5406 } 5407 } 5408 } 5409 5410 5411 /** 5412 * dc_enable_dcmode_clk_limit() - lower clocks in dc (battery) mode 5413 * @dc: pointer to dc of the dm calling this 5414 * @enable: True = transition to DC mode, false = transition back to AC mode 5415 * 5416 * Some SoCs define additional clock limits when in DC mode, DM should 5417 * invoke this function when the platform undergoes a power source transition 5418 * so DC can apply/unapply the limit. This interface may be disruptive to 5419 * the onscreen content. 5420 * 5421 * Context: Triggered by OS through DM interface, or manually by escape calls. 5422 * Need to hold a dclock when doing so. 5423 * 5424 * Return: none (void function) 5425 * 5426 */ 5427 void dc_enable_dcmode_clk_limit(struct dc *dc, bool enable) 5428 { 5429 unsigned int softMax = 0, maxDPM = 0, funcMin = 0, i; 5430 bool p_state_change_support; 5431 5432 if (!dc->config.dc_mode_clk_limit_support) 5433 return; 5434 5435 softMax = dc->clk_mgr->bw_params->dc_mode_softmax_memclk; 5436 for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries; i++) { 5437 if (dc->clk_mgr->bw_params->clk_table.entries[i].memclk_mhz > maxDPM) 5438 maxDPM = dc->clk_mgr->bw_params->clk_table.entries[i].memclk_mhz; 5439 } 5440 funcMin = (dc->clk_mgr->clks.dramclk_khz + 999) / 1000; 5441 p_state_change_support = dc->clk_mgr->clks.p_state_change_support; 5442 5443 if (enable && !dc->clk_mgr->dc_mode_softmax_enabled) { 5444 if (p_state_change_support) { 5445 if (funcMin <= softMax && dc->clk_mgr->funcs->set_max_memclk) 5446 dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, softMax); 5447 // else: No-Op 5448 } else { 5449 if (funcMin <= softMax) 5450 blank_and_force_memclk(dc, true, softMax); 5451 // else: No-Op 5452 } 5453 } else if (!enable && dc->clk_mgr->dc_mode_softmax_enabled) { 5454 if (p_state_change_support) { 5455 if (funcMin <= softMax && dc->clk_mgr->funcs->set_max_memclk) 5456 dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, maxDPM); 5457 // else: No-Op 5458 } else { 5459 if (funcMin <= softMax) 5460 blank_and_force_memclk(dc, true, maxDPM); 5461 // else: No-Op 5462 } 5463 } 5464 dc->clk_mgr->dc_mode_softmax_enabled = enable; 5465 } 5466 bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc, 5467 unsigned int pitch, 5468 unsigned int height, 5469 enum surface_pixel_format format, 5470 struct dc_cursor_attributes *cursor_attr) 5471 { 5472 if (dc->hwss.does_plane_fit_in_mall && dc->hwss.does_plane_fit_in_mall(dc, pitch, height, format, cursor_attr)) 5473 return true; 5474 return false; 5475 } 5476 5477 /* cleanup on driver unload */ 5478 void dc_hardware_release(struct dc *dc) 5479 { 5480 dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(dc); 5481 5482 if (dc->hwss.hardware_release) 5483 dc->hwss.hardware_release(dc); 5484 } 5485 5486 void dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(struct dc *dc) 5487 { 5488 if (dc->current_state) 5489 dc->current_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching_shut_down = true; 5490 } 5491 5492 /** 5493 * dc_is_dmub_outbox_supported - Check if DMUB firmware support outbox notification 5494 * 5495 * @dc: [in] dc structure 5496 * 5497 * Checks whether DMUB FW supports outbox notifications, if supported DM 5498 * should register outbox interrupt prior to actually enabling interrupts 5499 * via dc_enable_dmub_outbox 5500 * 5501 * Return: 5502 * True if DMUB FW supports outbox notifications, False otherwise 5503 */ 5504 bool dc_is_dmub_outbox_supported(struct dc *dc) 5505 { 5506 if (!dc->caps.dmcub_support) 5507 return false; 5508 5509 switch (dc->ctx->asic_id.chip_family) { 5510 5511 case FAMILY_YELLOW_CARP: 5512 /* DCN31 B0 USB4 DPIA needs dmub notifications for interrupts */ 5513 if (dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0 && 5514 !dc->debug.dpia_debug.bits.disable_dpia) 5515 return true; 5516 break; 5517 5518 case AMDGPU_FAMILY_GC_11_0_1: 5519 case AMDGPU_FAMILY_GC_11_5_0: 5520 if (!dc->debug.dpia_debug.bits.disable_dpia) 5521 return true; 5522 break; 5523 5524 default: 5525 break; 5526 } 5527 5528 /* dmub aux needs dmub notifications to be enabled */ 5529 return dc->debug.enable_dmub_aux_for_legacy_ddc; 5530 5531 } 5532 5533 /** 5534 * dc_enable_dmub_notifications - Check if dmub fw supports outbox 5535 * 5536 * @dc: [in] dc structure 5537 * 5538 * Calls dc_is_dmub_outbox_supported to check if dmub fw supports outbox 5539 * notifications. All DMs shall switch to dc_is_dmub_outbox_supported. This 5540 * API shall be removed after switching. 5541 * 5542 * Return: 5543 * True if DMUB FW supports outbox notifications, False otherwise 5544 */ 5545 bool dc_enable_dmub_notifications(struct dc *dc) 5546 { 5547 return dc_is_dmub_outbox_supported(dc); 5548 } 5549 5550 /** 5551 * dc_enable_dmub_outbox - Enables DMUB unsolicited notification 5552 * 5553 * @dc: [in] dc structure 5554 * 5555 * Enables DMUB unsolicited notifications to x86 via outbox. 5556 */ 5557 void dc_enable_dmub_outbox(struct dc *dc) 5558 { 5559 struct dc_context *dc_ctx = dc->ctx; 5560 5561 dmub_enable_outbox_notification(dc_ctx->dmub_srv); 5562 DC_LOG_DC("%s: dmub outbox notifications enabled\n", __func__); 5563 } 5564 5565 /** 5566 * dc_process_dmub_aux_transfer_async - Submits aux command to dmub via inbox message 5567 * Sets port index appropriately for legacy DDC 5568 * @dc: dc structure 5569 * @link_index: link index 5570 * @payload: aux payload 5571 * 5572 * Returns: True if successful, False if failure 5573 */ 5574 bool dc_process_dmub_aux_transfer_async(struct dc *dc, 5575 uint32_t link_index, 5576 struct aux_payload *payload) 5577 { 5578 uint8_t action; 5579 union dmub_rb_cmd cmd = {0}; 5580 5581 ASSERT(payload->length <= 16); 5582 5583 cmd.dp_aux_access.header.type = DMUB_CMD__DP_AUX_ACCESS; 5584 cmd.dp_aux_access.header.payload_bytes = 0; 5585 /* For dpia, ddc_pin is set to NULL */ 5586 if (!dc->links[link_index]->ddc->ddc_pin) 5587 cmd.dp_aux_access.aux_control.type = AUX_CHANNEL_DPIA; 5588 else 5589 cmd.dp_aux_access.aux_control.type = AUX_CHANNEL_LEGACY_DDC; 5590 5591 cmd.dp_aux_access.aux_control.instance = dc->links[link_index]->ddc_hw_inst; 5592 cmd.dp_aux_access.aux_control.sw_crc_enabled = 0; 5593 cmd.dp_aux_access.aux_control.timeout = 0; 5594 cmd.dp_aux_access.aux_control.dpaux.address = payload->address; 5595 cmd.dp_aux_access.aux_control.dpaux.is_i2c_over_aux = payload->i2c_over_aux; 5596 cmd.dp_aux_access.aux_control.dpaux.length = payload->length; 5597 5598 /* set aux action */ 5599 if (payload->i2c_over_aux) { 5600 if (payload->write) { 5601 if (payload->mot) 5602 action = DP_AUX_REQ_ACTION_I2C_WRITE_MOT; 5603 else 5604 action = DP_AUX_REQ_ACTION_I2C_WRITE; 5605 } else { 5606 if (payload->mot) 5607 action = DP_AUX_REQ_ACTION_I2C_READ_MOT; 5608 else 5609 action = DP_AUX_REQ_ACTION_I2C_READ; 5610 } 5611 } else { 5612 if (payload->write) 5613 action = DP_AUX_REQ_ACTION_DPCD_WRITE; 5614 else 5615 action = DP_AUX_REQ_ACTION_DPCD_READ; 5616 } 5617 5618 cmd.dp_aux_access.aux_control.dpaux.action = action; 5619 5620 if (payload->length && payload->write) { 5621 memcpy(cmd.dp_aux_access.aux_control.dpaux.data, 5622 payload->data, 5623 payload->length 5624 ); 5625 } 5626 5627 dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); 5628 5629 return true; 5630 } 5631 5632 uint8_t get_link_index_from_dpia_port_index(const struct dc *dc, 5633 uint8_t dpia_port_index) 5634 { 5635 uint8_t index, link_index = 0xFF; 5636 5637 for (index = 0; index < dc->link_count; index++) { 5638 /* ddc_hw_inst has dpia port index for dpia links 5639 * and ddc instance for legacy links 5640 */ 5641 if (!dc->links[index]->ddc->ddc_pin) { 5642 if (dc->links[index]->ddc_hw_inst == dpia_port_index) { 5643 link_index = index; 5644 break; 5645 } 5646 } 5647 } 5648 ASSERT(link_index != 0xFF); 5649 return link_index; 5650 } 5651 5652 /** 5653 * dc_process_dmub_set_config_async - Submits set_config command 5654 * 5655 * @dc: [in] dc structure 5656 * @link_index: [in] link_index: link index 5657 * @payload: [in] aux payload 5658 * @notify: [out] set_config immediate reply 5659 * 5660 * Submits set_config command to dmub via inbox message. 5661 * 5662 * Return: 5663 * True if successful, False if failure 5664 */ 5665 bool dc_process_dmub_set_config_async(struct dc *dc, 5666 uint32_t link_index, 5667 struct set_config_cmd_payload *payload, 5668 struct dmub_notification *notify) 5669 { 5670 union dmub_rb_cmd cmd = {0}; 5671 bool is_cmd_complete = true; 5672 5673 /* prepare SET_CONFIG command */ 5674 cmd.set_config_access.header.type = DMUB_CMD__DPIA; 5675 cmd.set_config_access.header.sub_type = DMUB_CMD__DPIA_SET_CONFIG_ACCESS; 5676 5677 cmd.set_config_access.set_config_control.instance = dc->links[link_index]->ddc_hw_inst; 5678 cmd.set_config_access.set_config_control.cmd_pkt.msg_type = payload->msg_type; 5679 cmd.set_config_access.set_config_control.cmd_pkt.msg_data = payload->msg_data; 5680 5681 if (!dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) { 5682 /* command is not processed by dmub */ 5683 notify->sc_status = SET_CONFIG_UNKNOWN_ERROR; 5684 return is_cmd_complete; 5685 } 5686 5687 /* command processed by dmub, if ret_status is 1, it is completed instantly */ 5688 if (cmd.set_config_access.header.ret_status == 1) 5689 notify->sc_status = cmd.set_config_access.set_config_control.immed_status; 5690 else 5691 /* cmd pending, will receive notification via outbox */ 5692 is_cmd_complete = false; 5693 5694 return is_cmd_complete; 5695 } 5696 5697 /** 5698 * dc_process_dmub_set_mst_slots - Submits MST solt allocation 5699 * 5700 * @dc: [in] dc structure 5701 * @link_index: [in] link index 5702 * @mst_alloc_slots: [in] mst slots to be allotted 5703 * @mst_slots_in_use: [out] mst slots in use returned in failure case 5704 * 5705 * Submits mst slot allocation command to dmub via inbox message 5706 * 5707 * Return: 5708 * DC_OK if successful, DC_ERROR if failure 5709 */ 5710 enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc, 5711 uint32_t link_index, 5712 uint8_t mst_alloc_slots, 5713 uint8_t *mst_slots_in_use) 5714 { 5715 union dmub_rb_cmd cmd = {0}; 5716 5717 /* prepare MST_ALLOC_SLOTS command */ 5718 cmd.set_mst_alloc_slots.header.type = DMUB_CMD__DPIA; 5719 cmd.set_mst_alloc_slots.header.sub_type = DMUB_CMD__DPIA_MST_ALLOC_SLOTS; 5720 5721 cmd.set_mst_alloc_slots.mst_slots_control.instance = dc->links[link_index]->ddc_hw_inst; 5722 cmd.set_mst_alloc_slots.mst_slots_control.mst_alloc_slots = mst_alloc_slots; 5723 5724 if (!dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) 5725 /* command is not processed by dmub */ 5726 return DC_ERROR_UNEXPECTED; 5727 5728 /* command processed by dmub, if ret_status is 1 */ 5729 if (cmd.set_config_access.header.ret_status != 1) 5730 /* command processing error */ 5731 return DC_ERROR_UNEXPECTED; 5732 5733 /* command processed and we have a status of 2, mst not enabled in dpia */ 5734 if (cmd.set_mst_alloc_slots.mst_slots_control.immed_status == 2) 5735 return DC_FAIL_UNSUPPORTED_1; 5736 5737 /* previously configured mst alloc and used slots did not match */ 5738 if (cmd.set_mst_alloc_slots.mst_slots_control.immed_status == 3) { 5739 *mst_slots_in_use = cmd.set_mst_alloc_slots.mst_slots_control.mst_slots_in_use; 5740 return DC_NOT_SUPPORTED; 5741 } 5742 5743 return DC_OK; 5744 } 5745 5746 /** 5747 * dc_process_dmub_dpia_hpd_int_enable - Submits DPIA DPD interruption 5748 * 5749 * @dc: [in] dc structure 5750 * @hpd_int_enable: [in] 1 for hpd int enable, 0 to disable 5751 * 5752 * Submits dpia hpd int enable command to dmub via inbox message 5753 */ 5754 void dc_process_dmub_dpia_hpd_int_enable(const struct dc *dc, 5755 uint32_t hpd_int_enable) 5756 { 5757 union dmub_rb_cmd cmd = {0}; 5758 5759 cmd.dpia_hpd_int_enable.header.type = DMUB_CMD__DPIA_HPD_INT_ENABLE; 5760 cmd.dpia_hpd_int_enable.enable = hpd_int_enable; 5761 5762 dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); 5763 5764 DC_LOG_DEBUG("%s: hpd_int_enable(%d)\n", __func__, hpd_int_enable); 5765 } 5766 5767 /** 5768 * dc_print_dmub_diagnostic_data - Print DMUB diagnostic data for debugging 5769 * 5770 * @dc: [in] dc structure 5771 * 5772 * 5773 */ 5774 void dc_print_dmub_diagnostic_data(const struct dc *dc) 5775 { 5776 dc_dmub_srv_log_diagnostic_data(dc->ctx->dmub_srv); 5777 } 5778 5779 /** 5780 * dc_disable_accelerated_mode - disable accelerated mode 5781 * @dc: dc structure 5782 */ 5783 void dc_disable_accelerated_mode(struct dc *dc) 5784 { 5785 bios_set_scratch_acc_mode_change(dc->ctx->dc_bios, 0); 5786 } 5787 5788 5789 /** 5790 * dc_notify_vsync_int_state - notifies vsync enable/disable state 5791 * @dc: dc structure 5792 * @stream: stream where vsync int state changed 5793 * @enable: whether vsync is enabled or disabled 5794 * 5795 * Called when vsync is enabled/disabled Will notify DMUB to start/stop ABM 5796 * interrupts after steady state is reached. 5797 */ 5798 void dc_notify_vsync_int_state(struct dc *dc, struct dc_stream_state *stream, bool enable) 5799 { 5800 int i; 5801 int edp_num; 5802 struct pipe_ctx *pipe = NULL; 5803 struct dc_link *link = stream->sink->link; 5804 struct dc_link *edp_links[MAX_NUM_EDP]; 5805 5806 5807 if (link->psr_settings.psr_feature_enabled) 5808 return; 5809 5810 if (link->replay_settings.replay_feature_enabled) 5811 return; 5812 5813 /*find primary pipe associated with stream*/ 5814 for (i = 0; i < MAX_PIPES; i++) { 5815 pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 5816 5817 if (pipe->stream == stream && pipe->stream_res.tg) 5818 break; 5819 } 5820 5821 if (i == MAX_PIPES) { 5822 ASSERT(0); 5823 return; 5824 } 5825 5826 dc_get_edp_links(dc, edp_links, &edp_num); 5827 5828 /* Determine panel inst */ 5829 for (i = 0; i < edp_num; i++) { 5830 if (edp_links[i] == link) 5831 break; 5832 } 5833 5834 if (i == edp_num) { 5835 return; 5836 } 5837 5838 if (pipe->stream_res.abm && pipe->stream_res.abm->funcs->set_abm_pause) 5839 pipe->stream_res.abm->funcs->set_abm_pause(pipe->stream_res.abm, !enable, i, pipe->stream_res.tg->inst); 5840 } 5841 5842 /***************************************************************************** 5843 * dc_abm_save_restore() - Interface to DC for save+pause and restore+un-pause 5844 * ABM 5845 * @dc: dc structure 5846 * @stream: stream where vsync int state changed 5847 * @pData: abm hw states 5848 * 5849 ****************************************************************************/ 5850 bool dc_abm_save_restore( 5851 struct dc *dc, 5852 struct dc_stream_state *stream, 5853 struct abm_save_restore *pData) 5854 { 5855 int i; 5856 int edp_num; 5857 struct pipe_ctx *pipe = NULL; 5858 struct dc_link *link = stream->sink->link; 5859 struct dc_link *edp_links[MAX_NUM_EDP]; 5860 5861 if (link->replay_settings.replay_feature_enabled) 5862 return false; 5863 5864 /*find primary pipe associated with stream*/ 5865 for (i = 0; i < MAX_PIPES; i++) { 5866 pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 5867 5868 if (pipe->stream == stream && pipe->stream_res.tg) 5869 break; 5870 } 5871 5872 if (i == MAX_PIPES) { 5873 ASSERT(0); 5874 return false; 5875 } 5876 5877 dc_get_edp_links(dc, edp_links, &edp_num); 5878 5879 /* Determine panel inst */ 5880 for (i = 0; i < edp_num; i++) 5881 if (edp_links[i] == link) 5882 break; 5883 5884 if (i == edp_num) 5885 return false; 5886 5887 if (pipe->stream_res.abm && 5888 pipe->stream_res.abm->funcs->save_restore) 5889 return pipe->stream_res.abm->funcs->save_restore( 5890 pipe->stream_res.abm, 5891 i, 5892 pData); 5893 return false; 5894 } 5895 5896 void dc_query_current_properties(struct dc *dc, struct dc_current_properties *properties) 5897 { 5898 unsigned int i; 5899 bool subvp_sw_cursor_req = false; 5900 5901 for (i = 0; i < dc->current_state->stream_count; i++) { 5902 if (check_subvp_sw_cursor_fallback_req(dc, dc->current_state->streams[i])) { 5903 subvp_sw_cursor_req = true; 5904 break; 5905 } 5906 } 5907 properties->cursor_size_limit = subvp_sw_cursor_req ? 64 : dc->caps.max_cursor_size; 5908 } 5909 5910 /** 5911 * dc_set_edp_power() - DM controls eDP power to be ON/OFF 5912 * 5913 * Called when DM wants to power on/off eDP. 5914 * Only work on links with flag skip_implict_edp_power_control is set. 5915 * 5916 * @dc: Current DC state 5917 * @edp_link: a link with eDP connector signal type 5918 * @powerOn: power on/off eDP 5919 * 5920 * Return: void 5921 */ 5922 void dc_set_edp_power(const struct dc *dc, struct dc_link *edp_link, 5923 bool powerOn) 5924 { 5925 if (edp_link->connector_signal != SIGNAL_TYPE_EDP) 5926 return; 5927 5928 if (edp_link->skip_implict_edp_power_control == false) 5929 return; 5930 5931 edp_link->dc->link_srv->edp_set_panel_power(edp_link, powerOn); 5932 } 5933 5934 /* 5935 ***************************************************************************** 5936 * dc_get_power_profile_for_dc_state() - extracts power profile from dc state 5937 * 5938 * Called when DM wants to make power policy decisions based on dc_state 5939 * 5940 ***************************************************************************** 5941 */ 5942 struct dc_power_profile dc_get_power_profile_for_dc_state(const struct dc_state *context) 5943 { 5944 struct dc_power_profile profile = { 0 }; 5945 5946 profile.power_level += !context->bw_ctx.bw.dcn.clk.p_state_change_support; 5947 5948 return profile; 5949 } 5950