1 /* 2 * Copyright 2019 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: AMD 23 * 24 */ 25 26 #include "dm_services.h" 27 #include "dc.h" 28 #include "dc_dmub_srv.h" 29 #include "../dmub/dmub_srv.h" 30 #include "dm_helpers.h" 31 #include "dc_hw_types.h" 32 #include "core_types.h" 33 #include "../basics/conversion.h" 34 #include "cursor_reg_cache.h" 35 #include "resource.h" 36 #include "clk_mgr.h" 37 #include "dc_state_priv.h" 38 #include "dc_plane_priv.h" 39 40 #define CTX dc_dmub_srv->ctx 41 #define DC_LOGGER CTX->logger 42 #define GPINT_RETRY_NUM 20 43 44 #define MAX_WAIT_US 100000 45 46 static void dc_dmub_srv_construct(struct dc_dmub_srv *dc_srv, struct dc *dc, 47 struct dmub_srv *dmub) 48 { 49 dc_srv->dmub = dmub; 50 dc_srv->ctx = dc->ctx; 51 } 52 53 static void dc_dmub_srv_handle_failure(struct dc_dmub_srv *dc_dmub_srv) 54 { 55 dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); 56 if (dc_dmub_srv->ctx->dc->debug.enable_dmu_recovery) 57 dm_helpers_dmu_timeout(dc_dmub_srv->ctx); 58 } 59 60 struct dc_dmub_srv *dc_dmub_srv_create(struct dc *dc, struct dmub_srv *dmub) 61 { 62 struct dc_dmub_srv *dc_srv = 63 kzalloc_obj(struct dc_dmub_srv); 64 65 if (dc_srv == NULL) { 66 BREAK_TO_DEBUGGER(); 67 return NULL; 68 } 69 70 dc_dmub_srv_construct(dc_srv, dc, dmub); 71 72 return dc_srv; 73 } 74 75 void dc_dmub_srv_destroy(struct dc_dmub_srv **dmub_srv) 76 { 77 if (*dmub_srv) { 78 kfree(*dmub_srv); 79 *dmub_srv = NULL; 80 } 81 } 82 83 bool dc_dmub_srv_wait_for_pending(struct dc_dmub_srv *dc_dmub_srv) 84 { 85 struct dmub_srv *dmub; 86 struct dc_context *dc_ctx; 87 enum dmub_status status; 88 89 if (!dc_dmub_srv || !dc_dmub_srv->dmub) 90 return false; 91 92 dc_ctx = dc_dmub_srv->ctx; 93 dmub = dc_dmub_srv->dmub; 94 95 do { 96 status = dmub_srv_wait_for_pending(dmub, MAX_WAIT_US); 97 } while (dc_dmub_srv->ctx->dc->debug.disable_timeout && status != DMUB_STATUS_OK); 98 99 if (status != DMUB_STATUS_OK) { 100 DC_ERROR("Error waiting for DMUB idle: status=%d\n", status); 101 dc_dmub_srv_handle_failure(dc_dmub_srv); 102 } 103 104 return status == DMUB_STATUS_OK; 105 } 106 107 void dc_dmub_srv_clear_inbox0_ack(struct dc_dmub_srv *dc_dmub_srv) 108 { 109 struct dmub_srv *dmub = dc_dmub_srv->dmub; 110 struct dc_context *dc_ctx = dc_dmub_srv->ctx; 111 enum dmub_status status = DMUB_STATUS_OK; 112 113 status = dmub_srv_clear_inbox0_ack(dmub); 114 if (status != DMUB_STATUS_OK) { 115 DC_ERROR("Error clearing INBOX0 ack: status=%d\n", status); 116 dc_dmub_srv_handle_failure(dc_dmub_srv); 117 } 118 } 119 120 void dc_dmub_srv_wait_for_inbox0_ack(struct dc_dmub_srv *dc_dmub_srv) 121 { 122 struct dmub_srv *dmub = dc_dmub_srv->dmub; 123 struct dc_context *dc_ctx = dc_dmub_srv->ctx; 124 enum dmub_status status = DMUB_STATUS_OK; 125 126 status = dmub_srv_wait_for_inbox0_ack(dmub, MAX_WAIT_US); 127 if (status != DMUB_STATUS_OK) { 128 DC_ERROR("Error waiting for INBOX0 HW Lock Ack\n"); 129 dc_dmub_srv_handle_failure(dc_dmub_srv); 130 } 131 } 132 133 void dc_dmub_srv_send_inbox0_cmd(struct dc_dmub_srv *dc_dmub_srv, 134 union dmub_inbox0_data_register data) 135 { 136 struct dmub_srv *dmub = dc_dmub_srv->dmub; 137 struct dc_context *dc_ctx = dc_dmub_srv->ctx; 138 enum dmub_status status = DMUB_STATUS_OK; 139 140 status = dmub_srv_send_inbox0_cmd(dmub, data); 141 if (status != DMUB_STATUS_OK) { 142 DC_ERROR("Error sending INBOX0 cmd\n"); 143 dc_dmub_srv_handle_failure(dc_dmub_srv); 144 } 145 } 146 147 static bool dc_dmub_srv_reg_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv, 148 unsigned int count, 149 union dmub_rb_cmd *cmd_list) 150 { 151 struct dc_context *dc_ctx; 152 struct dmub_srv *dmub; 153 enum dmub_status status = DMUB_STATUS_OK; 154 int i; 155 156 if (!dc_dmub_srv || !dc_dmub_srv->dmub) 157 return false; 158 159 dc_ctx = dc_dmub_srv->ctx; 160 dmub = dc_dmub_srv->dmub; 161 162 for (i = 0 ; i < count; i++) { 163 /* confirm no messages pending */ 164 do { 165 status = dmub_srv_wait_for_idle(dmub, MAX_WAIT_US); 166 } while (dc_dmub_srv->ctx->dc->debug.disable_timeout && status != DMUB_STATUS_OK); 167 168 /* queue command */ 169 if (status == DMUB_STATUS_OK) 170 status = dmub_srv_reg_cmd_execute(dmub, &cmd_list[i]); 171 172 /* check for errors */ 173 if (status != DMUB_STATUS_OK) { 174 break; 175 } 176 } 177 178 if (status != DMUB_STATUS_OK) { 179 if (status != DMUB_STATUS_POWER_STATE_D3) { 180 DC_ERROR("Error starting DMUB execution: status=%d\n", status); 181 dc_dmub_srv_handle_failure(dc_dmub_srv); 182 } 183 return false; 184 } 185 186 return true; 187 } 188 189 static bool dc_dmub_srv_fb_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv, 190 unsigned int count, 191 union dmub_rb_cmd *cmd_list) 192 { 193 struct dc_context *dc_ctx; 194 struct dmub_srv *dmub; 195 enum dmub_status status; 196 int i; 197 198 if (!dc_dmub_srv || !dc_dmub_srv->dmub) 199 return false; 200 201 dc_ctx = dc_dmub_srv->ctx; 202 dmub = dc_dmub_srv->dmub; 203 204 for (i = 0 ; i < count; i++) { 205 // Queue command 206 if (!cmd_list[i].cmd_common.header.multi_cmd_pending || 207 dmub_rb_num_free(&dmub->inbox1.rb) >= count - i) { 208 status = dmub_srv_fb_cmd_queue(dmub, &cmd_list[i]); 209 } else { 210 status = DMUB_STATUS_QUEUE_FULL; 211 } 212 213 if (status == DMUB_STATUS_QUEUE_FULL) { 214 /* Execute and wait for queue to become empty again. */ 215 status = dmub_srv_fb_cmd_execute(dmub); 216 if (status == DMUB_STATUS_POWER_STATE_D3) 217 return false; 218 219 do { 220 status = dmub_srv_wait_for_inbox_free(dmub, MAX_WAIT_US, count - i); 221 } while (dc_dmub_srv->ctx->dc->debug.disable_timeout && status != DMUB_STATUS_OK); 222 223 /* Requeue the command. */ 224 status = dmub_srv_fb_cmd_queue(dmub, &cmd_list[i]); 225 } 226 227 if (status != DMUB_STATUS_OK) { 228 if (status != DMUB_STATUS_POWER_STATE_D3) { 229 DC_ERROR("Error queueing DMUB command: status=%d\n", status); 230 dc_dmub_srv_handle_failure(dc_dmub_srv); 231 } 232 return false; 233 } 234 } 235 236 status = dmub_srv_fb_cmd_execute(dmub); 237 if (status != DMUB_STATUS_OK) { 238 if (status != DMUB_STATUS_POWER_STATE_D3) { 239 DC_ERROR("Error starting DMUB execution: status=%d\n", status); 240 dc_dmub_srv_handle_failure(dc_dmub_srv); 241 } 242 return false; 243 } 244 245 return true; 246 } 247 248 bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv, 249 unsigned int count, 250 union dmub_rb_cmd *cmd_list) 251 { 252 bool res = false; 253 254 if (dc_dmub_srv && dc_dmub_srv->dmub) { 255 if (dc_dmub_srv->dmub->inbox_type == DMUB_CMD_INTERFACE_REG) { 256 res = dc_dmub_srv_reg_cmd_list_queue_execute(dc_dmub_srv, count, cmd_list); 257 } else { 258 res = dc_dmub_srv_fb_cmd_list_queue_execute(dc_dmub_srv, count, cmd_list); 259 } 260 261 if (res) 262 res = dmub_srv_update_inbox_status(dc_dmub_srv->dmub) == DMUB_STATUS_OK; 263 } 264 265 return res; 266 } 267 268 bool dc_dmub_srv_wait_for_idle(struct dc_dmub_srv *dc_dmub_srv, 269 enum dm_dmub_wait_type wait_type, 270 union dmub_rb_cmd *cmd_list) 271 { 272 struct dmub_srv *dmub; 273 enum dmub_status status; 274 275 if (!dc_dmub_srv || !dc_dmub_srv->dmub) 276 return false; 277 278 dmub = dc_dmub_srv->dmub; 279 280 // Wait for DMUB to process command 281 if (wait_type != DM_DMUB_WAIT_TYPE_NO_WAIT) { 282 do { 283 status = dmub_srv_wait_for_idle(dmub, MAX_WAIT_US); 284 } while (dc_dmub_srv->ctx->dc->debug.disable_timeout && status != DMUB_STATUS_OK); 285 286 if (status != DMUB_STATUS_OK) { 287 DC_LOG_DEBUG("No reply for DMUB command: status=%d\n", status); 288 if (!dmub->debug.timeout_info.timeout_occured) { 289 dmub->debug.timeout_info.timeout_occured = true; 290 if (cmd_list) 291 dmub->debug.timeout_info.timeout_cmd = *cmd_list; 292 dmub->debug.timeout_info.timestamp = dm_get_timestamp(dc_dmub_srv->ctx); 293 } 294 dc_dmub_srv_handle_failure(dc_dmub_srv); 295 return false; 296 } 297 298 // Copy data back from ring buffer into command 299 if (wait_type == DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY && cmd_list) { 300 dmub_srv_cmd_get_response(dc_dmub_srv->dmub, cmd_list); 301 } 302 } 303 304 return true; 305 } 306 307 bool dc_dmub_srv_cmd_run(struct dc_dmub_srv *dc_dmub_srv, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type) 308 { 309 return dc_dmub_srv_cmd_run_list(dc_dmub_srv, 1, cmd, wait_type); 310 } 311 312 bool dc_dmub_srv_cmd_run_list(struct dc_dmub_srv *dc_dmub_srv, unsigned int count, union dmub_rb_cmd *cmd_list, enum dm_dmub_wait_type wait_type) 313 { 314 if (!dc_dmub_srv_cmd_list_queue_execute(dc_dmub_srv, count, cmd_list)) 315 return false; 316 317 return dc_dmub_srv_wait_for_idle(dc_dmub_srv, wait_type, cmd_list); 318 } 319 320 bool dc_dmub_srv_optimized_init_done(struct dc_dmub_srv *dc_dmub_srv) 321 { 322 struct dmub_srv *dmub; 323 struct dc_context *dc_ctx; 324 union dmub_fw_boot_status boot_status; 325 enum dmub_status status; 326 327 if (!dc_dmub_srv || !dc_dmub_srv->dmub) 328 return false; 329 330 dmub = dc_dmub_srv->dmub; 331 dc_ctx = dc_dmub_srv->ctx; 332 333 status = dmub_srv_get_fw_boot_status(dmub, &boot_status); 334 if (status != DMUB_STATUS_OK) { 335 DC_ERROR("Error querying DMUB boot status: error=%d\n", status); 336 return false; 337 } 338 339 return boot_status.bits.optimized_init_done; 340 } 341 342 bool dc_dmub_srv_notify_stream_mask(struct dc_dmub_srv *dc_dmub_srv, 343 unsigned int stream_mask) 344 { 345 if (!dc_dmub_srv || !dc_dmub_srv->dmub) 346 return false; 347 348 return dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__IDLE_OPT_NOTIFY_STREAM_MASK, 349 stream_mask, NULL, DM_DMUB_WAIT_TYPE_WAIT); 350 } 351 352 bool dc_dmub_srv_is_restore_required(struct dc_dmub_srv *dc_dmub_srv) 353 { 354 struct dmub_srv *dmub; 355 struct dc_context *dc_ctx; 356 union dmub_fw_boot_status boot_status; 357 enum dmub_status status; 358 359 if (!dc_dmub_srv || !dc_dmub_srv->dmub) 360 return false; 361 362 dmub = dc_dmub_srv->dmub; 363 dc_ctx = dc_dmub_srv->ctx; 364 365 status = dmub_srv_get_fw_boot_status(dmub, &boot_status); 366 if (status != DMUB_STATUS_OK) { 367 DC_ERROR("Error querying DMUB boot status: error=%d\n", status); 368 return false; 369 } 370 371 return boot_status.bits.restore_required; 372 } 373 374 bool dc_dmub_srv_get_dmub_outbox0_msg(const struct dc *dc, struct dmcub_trace_buf_entry *entry) 375 { 376 struct dmub_srv *dmub = dc->ctx->dmub_srv->dmub; 377 return dmub_srv_get_outbox0_msg(dmub, entry); 378 } 379 380 void dc_dmub_trace_event_control(struct dc *dc, bool enable) 381 { 382 dm_helpers_dmub_outbox_interrupt_control(dc->ctx, enable); 383 } 384 385 void dc_dmub_srv_drr_update_cmd(struct dc *dc, uint32_t tg_inst, uint32_t vtotal_min, uint32_t vtotal_max) 386 { 387 union dmub_rb_cmd cmd = { 0 }; 388 389 cmd.drr_update.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH; 390 cmd.drr_update.header.sub_type = DMUB_CMD__FAMS_DRR_UPDATE; 391 cmd.drr_update.dmub_optc_state_req.v_total_max = vtotal_max; 392 cmd.drr_update.dmub_optc_state_req.v_total_min = vtotal_min; 393 cmd.drr_update.dmub_optc_state_req.tg_inst = tg_inst; 394 395 cmd.drr_update.header.payload_bytes = sizeof(cmd.drr_update) - sizeof(cmd.drr_update.header); 396 397 // Send the command to the DMCUB. 398 dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); 399 } 400 401 void dc_dmub_srv_set_drr_manual_trigger_cmd(struct dc *dc, uint32_t tg_inst) 402 { 403 union dmub_rb_cmd cmd = { 0 }; 404 405 cmd.drr_update.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH; 406 cmd.drr_update.header.sub_type = DMUB_CMD__FAMS_SET_MANUAL_TRIGGER; 407 cmd.drr_update.dmub_optc_state_req.tg_inst = tg_inst; 408 409 cmd.drr_update.header.payload_bytes = sizeof(cmd.drr_update) - sizeof(cmd.drr_update.header); 410 411 // Send the command to the DMCUB. 412 dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); 413 } 414 415 static uint8_t dc_dmub_srv_get_pipes_for_stream(struct dc *dc, struct dc_stream_state *stream) 416 { 417 uint8_t pipes = 0; 418 int i = 0; 419 420 for (i = 0; i < MAX_PIPES; i++) { 421 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 422 423 if (pipe->stream == stream && pipe->stream_res.tg) 424 pipes = i; 425 } 426 return pipes; 427 } 428 429 static void dc_dmub_srv_populate_fams_pipe_info(struct dc *dc, struct dc_state *context, 430 struct pipe_ctx *head_pipe, 431 struct dmub_cmd_fw_assisted_mclk_switch_pipe_data *fams_pipe_data) 432 { 433 int j; 434 int pipe_idx = 0; 435 436 fams_pipe_data->pipe_index[pipe_idx++] = head_pipe->plane_res.hubp->inst; 437 for (j = 0; j < dc->res_pool->pipe_count; j++) { 438 struct pipe_ctx *split_pipe = &context->res_ctx.pipe_ctx[j]; 439 440 if (split_pipe->stream == head_pipe->stream && (split_pipe->top_pipe || split_pipe->prev_odm_pipe)) { 441 fams_pipe_data->pipe_index[pipe_idx++] = split_pipe->plane_res.hubp->inst; 442 } 443 } 444 fams_pipe_data->pipe_count = pipe_idx; 445 } 446 447 bool dc_dmub_srv_p_state_delegate(struct dc *dc, bool should_manage_pstate, struct dc_state *context) 448 { 449 union dmub_rb_cmd cmd = { 0 }; 450 struct dmub_cmd_fw_assisted_mclk_switch_config *config_data = &cmd.fw_assisted_mclk_switch.config_data; 451 int i = 0, k = 0; 452 int ramp_up_num_steps = 1; // TODO: Ramp is currently disabled. Reenable it. 453 uint8_t visual_confirm_enabled; 454 struct dc_stream_status *stream_status = NULL; 455 456 if (dc == NULL) 457 return false; 458 459 visual_confirm_enabled = dc->debug.visual_confirm == VISUAL_CONFIRM_FAMS; 460 461 // Format command. 462 cmd.fw_assisted_mclk_switch.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH; 463 cmd.fw_assisted_mclk_switch.header.sub_type = DMUB_CMD__FAMS_SETUP_FW_CTRL; 464 cmd.fw_assisted_mclk_switch.config_data.fams_enabled = should_manage_pstate; 465 cmd.fw_assisted_mclk_switch.config_data.visual_confirm_enabled = visual_confirm_enabled; 466 467 if (should_manage_pstate) { 468 for (i = 0; i < dc->res_pool->pipe_count; i++) { 469 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 470 471 if (!pipe->stream) 472 continue; 473 474 /* If FAMS is being used to support P-State and there is a stream 475 * that does not use FAMS, we are in an FPO + VActive scenario. 476 * Assign vactive stretch margin in this case. 477 */ 478 stream_status = dc_state_get_stream_status(context, pipe->stream); 479 if (stream_status && !stream_status->fpo_in_use) { 480 cmd.fw_assisted_mclk_switch.config_data.vactive_stretch_margin_us = dc->debug.fpo_vactive_margin_us; 481 break; 482 } 483 } 484 } 485 486 for (i = 0, k = 0; context && i < dc->res_pool->pipe_count; i++) { 487 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 488 489 if (!resource_is_pipe_type(pipe, OTG_MASTER)) 490 continue; 491 492 stream_status = dc_state_get_stream_status(context, pipe->stream); 493 if (stream_status && stream_status->fpo_in_use) { 494 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 495 uint8_t min_refresh_in_hz = (pipe->stream->timing.min_refresh_in_uhz + 999999) / 1000000; 496 497 config_data->pipe_data[k].pix_clk_100hz = pipe->stream->timing.pix_clk_100hz; 498 config_data->pipe_data[k].min_refresh_in_hz = min_refresh_in_hz; 499 config_data->pipe_data[k].max_ramp_step = ramp_up_num_steps; 500 config_data->pipe_data[k].pipes = dc_dmub_srv_get_pipes_for_stream(dc, pipe->stream); 501 dc_dmub_srv_populate_fams_pipe_info(dc, context, pipe, &config_data->pipe_data[k]); 502 k++; 503 } 504 } 505 cmd.fw_assisted_mclk_switch.header.payload_bytes = 506 sizeof(cmd.fw_assisted_mclk_switch) - sizeof(cmd.fw_assisted_mclk_switch.header); 507 508 // Send the command to the DMCUB. 509 dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); 510 511 return true; 512 } 513 514 void dc_dmub_srv_query_caps_cmd(struct dc_dmub_srv *dc_dmub_srv) 515 { 516 union dmub_rb_cmd cmd = { 0 }; 517 518 if (dc_dmub_srv->ctx->dc->debug.dmcub_emulation) 519 return; 520 521 memset(&cmd, 0, sizeof(cmd)); 522 523 /* Prepare fw command */ 524 cmd.query_feature_caps.header.type = DMUB_CMD__QUERY_FEATURE_CAPS; 525 cmd.query_feature_caps.header.sub_type = 0; 526 cmd.query_feature_caps.header.ret_status = 1; 527 cmd.query_feature_caps.header.payload_bytes = sizeof(struct dmub_cmd_query_feature_caps_data); 528 529 /* If command was processed, copy feature caps to dmub srv */ 530 if (dc_wake_and_execute_dmub_cmd(dc_dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) && 531 cmd.query_feature_caps.header.ret_status == 0) { 532 memcpy(&dc_dmub_srv->dmub->feature_caps, 533 &cmd.query_feature_caps.query_feature_caps_data, 534 sizeof(struct dmub_feature_caps)); 535 } 536 } 537 538 void dc_dmub_srv_get_visual_confirm_color_cmd(struct dc *dc, struct pipe_ctx *pipe_ctx) 539 { 540 union dmub_rb_cmd cmd = { 0 }; 541 unsigned int panel_inst = 0; 542 543 if (!dc_get_edp_link_panel_inst(dc, pipe_ctx->stream->link, &panel_inst) && 544 dc->debug.visual_confirm == VISUAL_CONFIRM_DISABLE) 545 return; 546 547 memset(&cmd, 0, sizeof(cmd)); 548 549 // Prepare fw command 550 cmd.visual_confirm_color.header.type = DMUB_CMD__GET_VISUAL_CONFIRM_COLOR; 551 cmd.visual_confirm_color.header.sub_type = 0; 552 cmd.visual_confirm_color.header.ret_status = 1; 553 cmd.visual_confirm_color.header.payload_bytes = sizeof(struct dmub_cmd_visual_confirm_color_data); 554 cmd.visual_confirm_color.visual_confirm_color_data.visual_confirm_color.panel_inst = panel_inst; 555 556 // If command was processed, copy feature caps to dmub srv 557 if (dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) && 558 cmd.visual_confirm_color.header.ret_status == 0) { 559 memcpy(&dc->ctx->dmub_srv->dmub->visual_confirm_color, 560 &cmd.visual_confirm_color.visual_confirm_color_data, 561 sizeof(struct dmub_visual_confirm_color)); 562 } 563 } 564 565 /** 566 * populate_subvp_cmd_drr_info - Helper to populate DRR pipe info for the DMCUB subvp command 567 * 568 * @dc: [in] pointer to dc object 569 * @subvp_pipe: [in] pipe_ctx for the SubVP pipe 570 * @vblank_pipe: [in] pipe_ctx for the DRR pipe 571 * @pipe_data: [in] Pipe data which stores the VBLANK/DRR info 572 * @context: [in] DC state for access to phantom stream 573 * 574 * Populate the DMCUB SubVP command with DRR pipe info. All the information 575 * required for calculating the SubVP + DRR microschedule is populated here. 576 * 577 * High level algorithm: 578 * 1. Get timing for SubVP pipe, phantom pipe, and DRR pipe 579 * 2. Calculate the min and max vtotal which supports SubVP + DRR microschedule 580 * 3. Populate the drr_info with the min and max supported vtotal values 581 */ 582 static void populate_subvp_cmd_drr_info(struct dc *dc, 583 struct dc_state *context, 584 struct pipe_ctx *subvp_pipe, 585 struct pipe_ctx *vblank_pipe, 586 struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 *pipe_data) 587 { 588 struct dc_stream_state *phantom_stream = dc_state_get_paired_subvp_stream(context, subvp_pipe->stream); 589 struct dc_crtc_timing *main_timing = &subvp_pipe->stream->timing; 590 struct dc_crtc_timing *phantom_timing; 591 struct dc_crtc_timing *drr_timing = &vblank_pipe->stream->timing; 592 uint16_t drr_frame_us = 0; 593 uint16_t min_drr_supported_us = 0; 594 uint16_t max_drr_supported_us = 0; 595 uint16_t max_drr_vblank_us = 0; 596 uint16_t max_drr_mallregion_us = 0; 597 uint16_t mall_region_us = 0; 598 uint16_t prefetch_us = 0; 599 uint16_t subvp_active_us = 0; 600 uint16_t drr_active_us = 0; 601 uint16_t min_vtotal_supported = 0; 602 uint16_t max_vtotal_supported = 0; 603 604 if (!phantom_stream) 605 return; 606 607 phantom_timing = &phantom_stream->timing; 608 609 pipe_data->pipe_config.vblank_data.drr_info.drr_in_use = true; 610 pipe_data->pipe_config.vblank_data.drr_info.use_ramping = false; // for now don't use ramping 611 pipe_data->pipe_config.vblank_data.drr_info.drr_window_size_ms = 4; // hardcode 4ms DRR window for now 612 613 drr_frame_us = div64_u64(((uint64_t)drr_timing->v_total * drr_timing->h_total * 1000000), 614 (((uint64_t)drr_timing->pix_clk_100hz * 100))); 615 // P-State allow width and FW delays already included phantom_timing->v_addressable 616 mall_region_us = div64_u64(((uint64_t)phantom_timing->v_addressable * phantom_timing->h_total * 1000000), 617 (((uint64_t)phantom_timing->pix_clk_100hz * 100))); 618 min_drr_supported_us = drr_frame_us + mall_region_us + SUBVP_DRR_MARGIN_US; 619 min_vtotal_supported = div64_u64(((uint64_t)drr_timing->pix_clk_100hz * 100 * min_drr_supported_us), 620 (((uint64_t)drr_timing->h_total * 1000000))); 621 622 prefetch_us = div64_u64(((uint64_t)(phantom_timing->v_total - phantom_timing->v_front_porch) * phantom_timing->h_total * 1000000), 623 (((uint64_t)phantom_timing->pix_clk_100hz * 100) + dc->caps.subvp_prefetch_end_to_mall_start_us)); 624 subvp_active_us = div64_u64(((uint64_t)main_timing->v_addressable * main_timing->h_total * 1000000), 625 (((uint64_t)main_timing->pix_clk_100hz * 100))); 626 drr_active_us = div64_u64(((uint64_t)drr_timing->v_addressable * drr_timing->h_total * 1000000), 627 (((uint64_t)drr_timing->pix_clk_100hz * 100))); 628 max_drr_vblank_us = div64_u64((subvp_active_us - prefetch_us - 629 dc->caps.subvp_fw_processing_delay_us - drr_active_us), 2) + drr_active_us; 630 max_drr_mallregion_us = subvp_active_us - prefetch_us - mall_region_us - dc->caps.subvp_fw_processing_delay_us; 631 max_drr_supported_us = max_drr_vblank_us > max_drr_mallregion_us ? max_drr_vblank_us : max_drr_mallregion_us; 632 max_vtotal_supported = div64_u64(((uint64_t)drr_timing->pix_clk_100hz * 100 * max_drr_supported_us), 633 (((uint64_t)drr_timing->h_total * 1000000))); 634 635 /* When calculating the max vtotal supported for SubVP + DRR cases, add 636 * margin due to possible rounding errors (being off by 1 line in the 637 * FW calculation can incorrectly push the P-State switch to wait 1 frame 638 * longer). 639 */ 640 max_vtotal_supported = max_vtotal_supported - dc->caps.subvp_drr_max_vblank_margin_us; 641 642 pipe_data->pipe_config.vblank_data.drr_info.min_vtotal_supported = min_vtotal_supported; 643 pipe_data->pipe_config.vblank_data.drr_info.max_vtotal_supported = max_vtotal_supported; 644 pipe_data->pipe_config.vblank_data.drr_info.drr_vblank_start_margin = dc->caps.subvp_drr_vblank_start_margin_us; 645 } 646 647 /** 648 * populate_subvp_cmd_vblank_pipe_info - Helper to populate VBLANK pipe info for the DMUB subvp command 649 * 650 * @dc: [in] current dc state 651 * @context: [in] new dc state 652 * @cmd: [in] DMUB cmd to be populated with SubVP info 653 * @vblank_pipe: [in] pipe_ctx for the VBLANK pipe 654 * @cmd_pipe_index: [in] index for the pipe array in DMCUB SubVP cmd 655 * 656 * Populate the DMCUB SubVP command with VBLANK pipe info. All the information 657 * required to calculate the microschedule for SubVP + VBLANK case is stored in 658 * the pipe_data (subvp_data and vblank_data). Also check if the VBLANK pipe 659 * is a DRR display -- if it is make a call to populate drr_info. 660 */ 661 static void populate_subvp_cmd_vblank_pipe_info(struct dc *dc, 662 struct dc_state *context, 663 union dmub_rb_cmd *cmd, 664 struct pipe_ctx *vblank_pipe, 665 uint8_t cmd_pipe_index) 666 { 667 uint32_t i; 668 struct pipe_ctx *pipe = NULL; 669 struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 *pipe_data = 670 &cmd->fw_assisted_mclk_switch_v2.config_data.pipe_data[cmd_pipe_index]; 671 672 // Find the SubVP pipe 673 for (i = 0; i < dc->res_pool->pipe_count; i++) { 674 pipe = &context->res_ctx.pipe_ctx[i]; 675 676 // We check for master pipe, but it shouldn't matter since we only need 677 // the pipe for timing info (stream should be same for any pipe splits) 678 if (!resource_is_pipe_type(pipe, OTG_MASTER) || 679 !resource_is_pipe_type(pipe, DPP_PIPE)) 680 continue; 681 682 // Find the SubVP pipe 683 if (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN) 684 break; 685 } 686 687 pipe_data->mode = VBLANK; 688 pipe_data->pipe_config.vblank_data.pix_clk_100hz = vblank_pipe->stream->timing.pix_clk_100hz; 689 pipe_data->pipe_config.vblank_data.vblank_start = vblank_pipe->stream->timing.v_total - 690 vblank_pipe->stream->timing.v_front_porch; 691 pipe_data->pipe_config.vblank_data.vtotal = vblank_pipe->stream->timing.v_total; 692 pipe_data->pipe_config.vblank_data.htotal = vblank_pipe->stream->timing.h_total; 693 pipe_data->pipe_config.vblank_data.vblank_pipe_index = vblank_pipe->pipe_idx; 694 pipe_data->pipe_config.vblank_data.vstartup_start = vblank_pipe->pipe_dlg_param.vstartup_start; 695 pipe_data->pipe_config.vblank_data.vblank_end = 696 vblank_pipe->stream->timing.v_total - vblank_pipe->stream->timing.v_front_porch - vblank_pipe->stream->timing.v_addressable; 697 698 if (vblank_pipe->stream->ignore_msa_timing_param && 699 (vblank_pipe->stream->allow_freesync || vblank_pipe->stream->vrr_active_variable || vblank_pipe->stream->vrr_active_fixed)) 700 populate_subvp_cmd_drr_info(dc, context, pipe, vblank_pipe, pipe_data); 701 } 702 703 /** 704 * update_subvp_prefetch_end_to_mall_start - Helper for SubVP + SubVP case 705 * 706 * @dc: [in] current dc state 707 * @context: [in] new dc state 708 * @cmd: [in] DMUB cmd to be populated with SubVP info 709 * @subvp_pipes: [in] Array of SubVP pipes (should always be length 2) 710 * 711 * For SubVP + SubVP, we use a single vertical interrupt to start the 712 * microschedule for both SubVP pipes. In order for this to work correctly, the 713 * MALL REGION of both SubVP pipes must start at the same time. This function 714 * lengthens the prefetch end to mall start delay of the SubVP pipe that has 715 * the shorter prefetch so that both MALL REGION's will start at the same time. 716 */ 717 static void update_subvp_prefetch_end_to_mall_start(struct dc *dc, 718 struct dc_state *context, 719 union dmub_rb_cmd *cmd, 720 struct pipe_ctx *subvp_pipes[]) 721 { 722 uint32_t subvp0_prefetch_us = 0; 723 uint32_t subvp1_prefetch_us = 0; 724 uint32_t prefetch_delta_us = 0; 725 struct dc_stream_state *phantom_stream0 = NULL; 726 struct dc_stream_state *phantom_stream1 = NULL; 727 struct dc_crtc_timing *phantom_timing0 = NULL; 728 struct dc_crtc_timing *phantom_timing1 = NULL; 729 struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 *pipe_data = NULL; 730 731 phantom_stream0 = dc_state_get_paired_subvp_stream(context, subvp_pipes[0]->stream); 732 if (!phantom_stream0) 733 return; 734 735 phantom_stream1 = dc_state_get_paired_subvp_stream(context, subvp_pipes[1]->stream); 736 if (!phantom_stream1) 737 return; 738 739 phantom_timing0 = &phantom_stream0->timing; 740 phantom_timing1 = &phantom_stream1->timing; 741 742 subvp0_prefetch_us = div64_u64(((uint64_t)(phantom_timing0->v_total - phantom_timing0->v_front_porch) * 743 (uint64_t)phantom_timing0->h_total * 1000000), 744 (((uint64_t)phantom_timing0->pix_clk_100hz * 100) + dc->caps.subvp_prefetch_end_to_mall_start_us)); 745 subvp1_prefetch_us = div64_u64(((uint64_t)(phantom_timing1->v_total - phantom_timing1->v_front_porch) * 746 (uint64_t)phantom_timing1->h_total * 1000000), 747 (((uint64_t)phantom_timing1->pix_clk_100hz * 100) + dc->caps.subvp_prefetch_end_to_mall_start_us)); 748 749 // Whichever SubVP PIPE has the smaller prefetch (including the prefetch end to mall start time) 750 // should increase it's prefetch time to match the other 751 if (subvp0_prefetch_us > subvp1_prefetch_us) { 752 pipe_data = &cmd->fw_assisted_mclk_switch_v2.config_data.pipe_data[1]; 753 prefetch_delta_us = subvp0_prefetch_us - subvp1_prefetch_us; 754 pipe_data->pipe_config.subvp_data.prefetch_to_mall_start_lines = 755 div64_u64(((uint64_t)(dc->caps.subvp_prefetch_end_to_mall_start_us + prefetch_delta_us) * 756 ((uint64_t)phantom_timing1->pix_clk_100hz * 100) + ((uint64_t)phantom_timing1->h_total * 1000000 - 1)), 757 ((uint64_t)phantom_timing1->h_total * 1000000)); 758 759 } else if (subvp1_prefetch_us > subvp0_prefetch_us) { 760 pipe_data = &cmd->fw_assisted_mclk_switch_v2.config_data.pipe_data[0]; 761 prefetch_delta_us = subvp1_prefetch_us - subvp0_prefetch_us; 762 pipe_data->pipe_config.subvp_data.prefetch_to_mall_start_lines = 763 div64_u64(((uint64_t)(dc->caps.subvp_prefetch_end_to_mall_start_us + prefetch_delta_us) * 764 ((uint64_t)phantom_timing0->pix_clk_100hz * 100) + ((uint64_t)phantom_timing0->h_total * 1000000 - 1)), 765 ((uint64_t)phantom_timing0->h_total * 1000000)); 766 } 767 } 768 769 /** 770 * populate_subvp_cmd_pipe_info - Helper to populate the SubVP pipe info for the DMUB subvp command 771 * 772 * @dc: [in] current dc state 773 * @context: [in] new dc state 774 * @cmd: [in] DMUB cmd to be populated with SubVP info 775 * @subvp_pipe: [in] pipe_ctx for the SubVP pipe 776 * @cmd_pipe_index: [in] index for the pipe array in DMCUB SubVP cmd 777 * 778 * Populate the DMCUB SubVP command with SubVP pipe info. All the information 779 * required to calculate the microschedule for the SubVP pipe is stored in the 780 * pipe_data of the DMCUB SubVP command. 781 */ 782 static void populate_subvp_cmd_pipe_info(struct dc *dc, 783 struct dc_state *context, 784 union dmub_rb_cmd *cmd, 785 struct pipe_ctx *subvp_pipe, 786 uint8_t cmd_pipe_index) 787 { 788 uint32_t j; 789 struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 *pipe_data = 790 &cmd->fw_assisted_mclk_switch_v2.config_data.pipe_data[cmd_pipe_index]; 791 struct dc_stream_state *phantom_stream = dc_state_get_paired_subvp_stream(context, subvp_pipe->stream); 792 struct dc_crtc_timing *main_timing = &subvp_pipe->stream->timing; 793 struct dc_crtc_timing *phantom_timing; 794 uint32_t out_num_stream, out_den_stream, out_num_plane, out_den_plane, out_num, out_den; 795 796 if (!phantom_stream) 797 return; 798 799 phantom_timing = &phantom_stream->timing; 800 801 pipe_data->mode = SUBVP; 802 pipe_data->pipe_config.subvp_data.pix_clk_100hz = subvp_pipe->stream->timing.pix_clk_100hz; 803 pipe_data->pipe_config.subvp_data.htotal = subvp_pipe->stream->timing.h_total; 804 pipe_data->pipe_config.subvp_data.vtotal = subvp_pipe->stream->timing.v_total; 805 pipe_data->pipe_config.subvp_data.main_vblank_start = 806 main_timing->v_total - main_timing->v_front_porch; 807 pipe_data->pipe_config.subvp_data.main_vblank_end = 808 main_timing->v_total - main_timing->v_front_porch - main_timing->v_addressable; 809 pipe_data->pipe_config.subvp_data.mall_region_lines = phantom_timing->v_addressable; 810 pipe_data->pipe_config.subvp_data.main_pipe_index = subvp_pipe->stream_res.tg->inst; 811 pipe_data->pipe_config.subvp_data.is_drr = subvp_pipe->stream->ignore_msa_timing_param && 812 (subvp_pipe->stream->allow_freesync || subvp_pipe->stream->vrr_active_variable || subvp_pipe->stream->vrr_active_fixed); 813 814 /* Calculate the scaling factor from the src and dst height. 815 * e.g. If 3840x2160 being downscaled to 1920x1080, the scaling factor is 1/2. 816 * Reduce the fraction 1080/2160 = 1/2 for the "scaling factor" 817 * 818 * Make sure to combine stream and plane scaling together. 819 */ 820 reduce_fraction(subvp_pipe->stream->src.height, subvp_pipe->stream->dst.height, 821 &out_num_stream, &out_den_stream); 822 reduce_fraction(subvp_pipe->plane_state->src_rect.height, subvp_pipe->plane_state->dst_rect.height, 823 &out_num_plane, &out_den_plane); 824 reduce_fraction(out_num_stream * out_num_plane, out_den_stream * out_den_plane, &out_num, &out_den); 825 pipe_data->pipe_config.subvp_data.scale_factor_numerator = out_num; 826 pipe_data->pipe_config.subvp_data.scale_factor_denominator = out_den; 827 828 // Prefetch lines is equal to VACTIVE + BP + VSYNC 829 pipe_data->pipe_config.subvp_data.prefetch_lines = 830 phantom_timing->v_total - phantom_timing->v_front_porch; 831 832 // Round up 833 pipe_data->pipe_config.subvp_data.prefetch_to_mall_start_lines = 834 div64_u64(((uint64_t)dc->caps.subvp_prefetch_end_to_mall_start_us * ((uint64_t)phantom_timing->pix_clk_100hz * 100) + 835 ((uint64_t)phantom_timing->h_total * 1000000 - 1)), ((uint64_t)phantom_timing->h_total * 1000000)); 836 pipe_data->pipe_config.subvp_data.processing_delay_lines = 837 div64_u64(((uint64_t)(dc->caps.subvp_fw_processing_delay_us) * ((uint64_t)phantom_timing->pix_clk_100hz * 100) + 838 ((uint64_t)phantom_timing->h_total * 1000000 - 1)), ((uint64_t)phantom_timing->h_total * 1000000)); 839 840 if (subvp_pipe->bottom_pipe) { 841 pipe_data->pipe_config.subvp_data.main_split_pipe_index = subvp_pipe->bottom_pipe->pipe_idx; 842 } else if (subvp_pipe->next_odm_pipe) { 843 pipe_data->pipe_config.subvp_data.main_split_pipe_index = subvp_pipe->next_odm_pipe->pipe_idx; 844 } else { 845 pipe_data->pipe_config.subvp_data.main_split_pipe_index = 0xF; 846 } 847 848 // Find phantom pipe index based on phantom stream 849 for (j = 0; j < dc->res_pool->pipe_count; j++) { 850 struct pipe_ctx *phantom_pipe = &context->res_ctx.pipe_ctx[j]; 851 852 if (resource_is_pipe_type(phantom_pipe, OTG_MASTER) && 853 phantom_pipe->stream == dc_state_get_paired_subvp_stream(context, subvp_pipe->stream)) { 854 pipe_data->pipe_config.subvp_data.phantom_pipe_index = phantom_pipe->stream_res.tg->inst; 855 if (phantom_pipe->bottom_pipe) { 856 pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = phantom_pipe->bottom_pipe->plane_res.hubp->inst; 857 } else if (phantom_pipe->next_odm_pipe) { 858 pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = phantom_pipe->next_odm_pipe->plane_res.hubp->inst; 859 } else { 860 pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = 0xF; 861 } 862 break; 863 } 864 } 865 } 866 867 /** 868 * dc_dmub_setup_subvp_dmub_command - Populate the DMCUB SubVP command 869 * 870 * @dc: [in] current dc state 871 * @context: [in] new dc state 872 * @enable: [in] if true enables the pipes population 873 * 874 * This function loops through each pipe and populates the DMUB SubVP CMD info 875 * based on the pipe (e.g. SubVP, VBLANK). 876 */ 877 void dc_dmub_setup_subvp_dmub_command(struct dc *dc, 878 struct dc_state *context, 879 bool enable) 880 { 881 uint8_t cmd_pipe_index = 0; 882 uint32_t i; 883 uint8_t subvp_count = 0; 884 union dmub_rb_cmd cmd; 885 struct pipe_ctx *subvp_pipes[2]; 886 uint32_t wm_val_refclk = 0; 887 enum mall_stream_type pipe_mall_type; 888 889 memset(&cmd, 0, sizeof(cmd)); 890 // FW command for SUBVP 891 cmd.fw_assisted_mclk_switch_v2.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH; 892 cmd.fw_assisted_mclk_switch_v2.header.sub_type = DMUB_CMD__HANDLE_SUBVP_CMD; 893 cmd.fw_assisted_mclk_switch_v2.header.payload_bytes = 894 sizeof(cmd.fw_assisted_mclk_switch_v2) - sizeof(cmd.fw_assisted_mclk_switch_v2.header); 895 896 for (i = 0; i < dc->res_pool->pipe_count; i++) { 897 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 898 899 /* For SubVP pipe count, only count the top most (ODM / MPC) pipe 900 */ 901 if (resource_is_pipe_type(pipe, OTG_MASTER) && 902 resource_is_pipe_type(pipe, DPP_PIPE) && 903 dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN) 904 subvp_pipes[subvp_count++] = pipe; 905 } 906 907 if (enable) { 908 // For each pipe that is a "main" SUBVP pipe, fill in pipe data for DMUB SUBVP cmd 909 for (i = 0; i < dc->res_pool->pipe_count; i++) { 910 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 911 pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe); 912 913 if (!pipe->stream) 914 continue; 915 916 /* When populating subvp cmd info, only pass in the top most (ODM / MPC) pipe. 917 * Any ODM or MPC splits being used in SubVP will be handled internally in 918 * populate_subvp_cmd_pipe_info 919 */ 920 if (resource_is_pipe_type(pipe, OTG_MASTER) && 921 resource_is_pipe_type(pipe, DPP_PIPE) && 922 pipe_mall_type == SUBVP_MAIN) { 923 populate_subvp_cmd_pipe_info(dc, context, &cmd, pipe, cmd_pipe_index++); 924 } else if (resource_is_pipe_type(pipe, OTG_MASTER) && 925 resource_is_pipe_type(pipe, DPP_PIPE) && 926 pipe_mall_type == SUBVP_NONE) { 927 // Don't need to check for ActiveDRAMClockChangeMargin < 0, not valid in cases where 928 // we run through DML without calculating "natural" P-state support 929 populate_subvp_cmd_vblank_pipe_info(dc, context, &cmd, pipe, cmd_pipe_index++); 930 931 } 932 } 933 if (subvp_count == 2) { 934 update_subvp_prefetch_end_to_mall_start(dc, context, &cmd, subvp_pipes); 935 } 936 cmd.fw_assisted_mclk_switch_v2.config_data.pstate_allow_width_us = dc->caps.subvp_pstate_allow_width_us; 937 cmd.fw_assisted_mclk_switch_v2.config_data.vertical_int_margin_us = dc->caps.subvp_vertical_int_margin_us; 938 939 // Store the original watermark value for this SubVP config so we can lower it when the 940 // MCLK switch starts 941 wm_val_refclk = context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns * 942 (dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000) / 1000; 943 944 cmd.fw_assisted_mclk_switch_v2.config_data.watermark_a_cache = wm_val_refclk < 0xFFFF ? wm_val_refclk : 0xFFFF; 945 } 946 947 dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); 948 } 949 950 bool dc_dmub_srv_get_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv) 951 { 952 if (!dc_dmub_srv || !dc_dmub_srv->dmub) 953 return false; 954 return dmub_srv_get_diagnostic_data(dc_dmub_srv->dmub); 955 } 956 957 void dc_dmub_srv_log_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv) 958 { 959 uint32_t i; 960 961 if (!dc_dmub_srv || !dc_dmub_srv->dmub) { 962 DC_LOG_ERROR("%s: invalid parameters.", __func__); 963 return; 964 } 965 966 DC_LOG_ERROR("%s: DMCUB error - collecting diagnostic data\n", __func__); 967 968 if (!dc_dmub_srv_get_diagnostic_data(dc_dmub_srv)) { 969 DC_LOG_ERROR("%s: dc_dmub_srv_get_diagnostic_data failed.", __func__); 970 return; 971 } 972 973 DC_LOG_DEBUG("DMCUB STATE:"); 974 DC_LOG_DEBUG(" dmcub_version : %08x", dc_dmub_srv->dmub->debug.dmcub_version); 975 DC_LOG_DEBUG(" scratch [0] : %08x", dc_dmub_srv->dmub->debug.scratch[0]); 976 DC_LOG_DEBUG(" scratch [1] : %08x", dc_dmub_srv->dmub->debug.scratch[1]); 977 DC_LOG_DEBUG(" scratch [2] : %08x", dc_dmub_srv->dmub->debug.scratch[2]); 978 DC_LOG_DEBUG(" scratch [3] : %08x", dc_dmub_srv->dmub->debug.scratch[3]); 979 DC_LOG_DEBUG(" scratch [4] : %08x", dc_dmub_srv->dmub->debug.scratch[4]); 980 DC_LOG_DEBUG(" scratch [5] : %08x", dc_dmub_srv->dmub->debug.scratch[5]); 981 DC_LOG_DEBUG(" scratch [6] : %08x", dc_dmub_srv->dmub->debug.scratch[6]); 982 DC_LOG_DEBUG(" scratch [7] : %08x", dc_dmub_srv->dmub->debug.scratch[7]); 983 DC_LOG_DEBUG(" scratch [8] : %08x", dc_dmub_srv->dmub->debug.scratch[8]); 984 DC_LOG_DEBUG(" scratch [9] : %08x", dc_dmub_srv->dmub->debug.scratch[9]); 985 DC_LOG_DEBUG(" scratch [10] : %08x", dc_dmub_srv->dmub->debug.scratch[10]); 986 DC_LOG_DEBUG(" scratch [11] : %08x", dc_dmub_srv->dmub->debug.scratch[11]); 987 DC_LOG_DEBUG(" scratch [12] : %08x", dc_dmub_srv->dmub->debug.scratch[12]); 988 DC_LOG_DEBUG(" scratch [13] : %08x", dc_dmub_srv->dmub->debug.scratch[13]); 989 DC_LOG_DEBUG(" scratch [14] : %08x", dc_dmub_srv->dmub->debug.scratch[14]); 990 DC_LOG_DEBUG(" scratch [15] : %08x", dc_dmub_srv->dmub->debug.scratch[15]); 991 for (i = 0; i < DMUB_PC_SNAPSHOT_COUNT; i++) 992 DC_LOG_DEBUG(" pc[%d] : %08x", i, dc_dmub_srv->dmub->debug.pc[i]); 993 DC_LOG_DEBUG(" unk_fault_addr : %08x", dc_dmub_srv->dmub->debug.undefined_address_fault_addr); 994 DC_LOG_DEBUG(" inst_fault_addr : %08x", dc_dmub_srv->dmub->debug.inst_fetch_fault_addr); 995 DC_LOG_DEBUG(" data_fault_addr : %08x", dc_dmub_srv->dmub->debug.data_write_fault_addr); 996 DC_LOG_DEBUG(" inbox1_rptr : %08x", dc_dmub_srv->dmub->debug.inbox1_rptr); 997 DC_LOG_DEBUG(" inbox1_wptr : %08x", dc_dmub_srv->dmub->debug.inbox1_wptr); 998 DC_LOG_DEBUG(" inbox1_size : %08x", dc_dmub_srv->dmub->debug.inbox1_size); 999 DC_LOG_DEBUG(" inbox0_rptr : %08x", dc_dmub_srv->dmub->debug.inbox0_rptr); 1000 DC_LOG_DEBUG(" inbox0_wptr : %08x", dc_dmub_srv->dmub->debug.inbox0_wptr); 1001 DC_LOG_DEBUG(" inbox0_size : %08x", dc_dmub_srv->dmub->debug.inbox0_size); 1002 DC_LOG_DEBUG(" outbox1_rptr : %08x", dc_dmub_srv->dmub->debug.outbox1_rptr); 1003 DC_LOG_DEBUG(" outbox1_wptr : %08x", dc_dmub_srv->dmub->debug.outbox1_wptr); 1004 DC_LOG_DEBUG(" outbox1_size : %08x", dc_dmub_srv->dmub->debug.outbox1_size); 1005 DC_LOG_DEBUG(" is_enabled : %d", dc_dmub_srv->dmub->debug.is_dmcub_enabled); 1006 DC_LOG_DEBUG(" is_soft_reset : %d", dc_dmub_srv->dmub->debug.is_dmcub_soft_reset); 1007 DC_LOG_DEBUG(" is_secure_reset : %d", dc_dmub_srv->dmub->debug.is_dmcub_secure_reset); 1008 DC_LOG_DEBUG(" is_traceport_en : %d", dc_dmub_srv->dmub->debug.is_traceport_en); 1009 DC_LOG_DEBUG(" is_cw0_en : %d", dc_dmub_srv->dmub->debug.is_cw0_enabled); 1010 DC_LOG_DEBUG(" is_cw6_en : %d", dc_dmub_srv->dmub->debug.is_cw6_enabled); 1011 DC_LOG_DEBUG(" is_pwait : %d", dc_dmub_srv->dmub->debug.is_pwait); 1012 } 1013 1014 static bool dc_dmub_should_update_cursor_data(struct pipe_ctx *pipe_ctx) 1015 { 1016 if (pipe_ctx->plane_state != NULL) { 1017 if (pipe_ctx->plane_state->address.type == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE || 1018 resource_can_pipe_disable_cursor(pipe_ctx)) 1019 return false; 1020 } 1021 1022 if ((pipe_ctx->stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1 || 1023 pipe_ctx->stream->link->psr_settings.psr_version == DC_PSR_VERSION_1) && 1024 pipe_ctx->stream->ctx->dce_version >= DCN_VERSION_3_1) 1025 return true; 1026 1027 if (pipe_ctx->stream->link->replay_settings.config.replay_supported) 1028 return true; 1029 1030 return false; 1031 } 1032 1033 static void dc_build_cursor_update_payload0( 1034 struct pipe_ctx *pipe_ctx, uint8_t p_idx, 1035 struct dmub_cmd_update_cursor_payload0 *payload) 1036 { 1037 struct dc *dc = pipe_ctx->stream->ctx->dc; 1038 struct hubp *hubp = pipe_ctx->plane_res.hubp; 1039 unsigned int panel_inst = 0; 1040 1041 if (dc->config.frame_update_cmd_version2 == true) { 1042 /* Don't need panel_inst for command version2 */ 1043 payload->cmd_version = DMUB_CMD_CURSOR_UPDATE_VERSION_2; 1044 } else { 1045 if (!dc_get_edp_link_panel_inst(hubp->ctx->dc, 1046 pipe_ctx->stream->link, &panel_inst)) 1047 return; 1048 payload->cmd_version = DMUB_CMD_CURSOR_UPDATE_VERSION_1; 1049 } 1050 1051 /* Payload: Cursor Rect is built from position & attribute 1052 * x & y are obtained from postion 1053 */ 1054 payload->cursor_rect.x = hubp->cur_rect.x; 1055 payload->cursor_rect.y = hubp->cur_rect.y; 1056 /* w & h are obtained from attribute */ 1057 payload->cursor_rect.width = hubp->cur_rect.w; 1058 payload->cursor_rect.height = hubp->cur_rect.h; 1059 1060 payload->enable = hubp->pos.cur_ctl.bits.cur_enable; 1061 payload->pipe_idx = p_idx; 1062 payload->panel_inst = panel_inst; 1063 payload->otg_inst = pipe_ctx->stream_res.tg->inst; 1064 } 1065 1066 static void dc_build_cursor_position_update_payload0( 1067 struct dmub_cmd_update_cursor_payload0 *pl, const uint8_t p_idx, 1068 const struct hubp *hubp, const struct dpp *dpp) 1069 { 1070 /* Hubp */ 1071 pl->position_cfg.pHubp.cur_ctl.raw = hubp->pos.cur_ctl.raw; 1072 pl->position_cfg.pHubp.position.raw = hubp->pos.position.raw; 1073 pl->position_cfg.pHubp.hot_spot.raw = hubp->pos.hot_spot.raw; 1074 pl->position_cfg.pHubp.dst_offset.raw = hubp->pos.dst_offset.raw; 1075 1076 /* dpp */ 1077 pl->position_cfg.pDpp.cur0_ctl.raw = dpp->pos.cur0_ctl.raw; 1078 pl->position_cfg.pipe_idx = p_idx; 1079 } 1080 1081 static void dc_build_cursor_attribute_update_payload1( 1082 struct dmub_cursor_attributes_cfg *pl_A, const uint8_t p_idx, 1083 const struct hubp *hubp, const struct dpp *dpp) 1084 { 1085 /* Hubp */ 1086 pl_A->aHubp.SURFACE_ADDR_HIGH = hubp->att.SURFACE_ADDR_HIGH; 1087 pl_A->aHubp.SURFACE_ADDR = hubp->att.SURFACE_ADDR; 1088 pl_A->aHubp.cur_ctl.raw = hubp->att.cur_ctl.raw; 1089 pl_A->aHubp.size.raw = hubp->att.size.raw; 1090 pl_A->aHubp.settings.raw = hubp->att.settings.raw; 1091 1092 /* dpp */ 1093 pl_A->aDpp.cur0_ctl.raw = dpp->att.cur0_ctl.raw; 1094 } 1095 1096 /** 1097 * dc_send_update_cursor_info_to_dmu - Populate the DMCUB Cursor update info command 1098 * 1099 * @pCtx: [in] pipe context 1100 * @pipe_idx: [in] pipe index 1101 * 1102 * This function would store the cursor related information and pass it into 1103 * dmub 1104 */ 1105 void dc_send_update_cursor_info_to_dmu( 1106 struct pipe_ctx *pCtx, uint8_t pipe_idx) 1107 { 1108 union dmub_rb_cmd cmd[2]; 1109 union dmub_cmd_update_cursor_info_data *update_cursor_info_0 = 1110 &cmd[0].update_cursor_info.update_cursor_info_data; 1111 1112 memset(cmd, 0, sizeof(cmd)); 1113 1114 if (!dc_dmub_should_update_cursor_data(pCtx)) 1115 return; 1116 /* 1117 * Since we use multi_cmd_pending for dmub command, the 2nd command is 1118 * only assigned to store cursor attributes info. 1119 * 1st command can view as 2 parts, 1st is for PSR/Replay data, the other 1120 * is to store cursor position info. 1121 * 1122 * Command heaer type must be the same type if using multi_cmd_pending. 1123 * Besides, while process 2nd command in DMU, the sub type is useless. 1124 * So it's meanless to pass the sub type header with different type. 1125 */ 1126 1127 { 1128 /* Build Payload#0 Header */ 1129 cmd[0].update_cursor_info.header.type = DMUB_CMD__UPDATE_CURSOR_INFO; 1130 cmd[0].update_cursor_info.header.payload_bytes = 1131 sizeof(cmd[0].update_cursor_info.update_cursor_info_data); 1132 cmd[0].update_cursor_info.header.multi_cmd_pending = 1; //To combine multi dmu cmd, 1st cmd 1133 1134 /* Prepare Payload */ 1135 dc_build_cursor_update_payload0(pCtx, pipe_idx, &update_cursor_info_0->payload0); 1136 1137 dc_build_cursor_position_update_payload0(&update_cursor_info_0->payload0, pipe_idx, 1138 pCtx->plane_res.hubp, pCtx->plane_res.dpp); 1139 } 1140 { 1141 /* Build Payload#1 Header */ 1142 cmd[1].update_cursor_info.header.type = DMUB_CMD__UPDATE_CURSOR_INFO; 1143 cmd[1].update_cursor_info.header.payload_bytes = sizeof(struct cursor_attributes_cfg); 1144 cmd[1].update_cursor_info.header.multi_cmd_pending = 0; //Indicate it's the last command. 1145 1146 dc_build_cursor_attribute_update_payload1( 1147 &cmd[1].update_cursor_info.update_cursor_info_data.payload1.attribute_cfg, 1148 pipe_idx, pCtx->plane_res.hubp, pCtx->plane_res.dpp); 1149 1150 /* Combine 2nd cmds update_curosr_info to DMU */ 1151 dc_wake_and_execute_dmub_cmd_list(pCtx->stream->ctx, 2, cmd, DM_DMUB_WAIT_TYPE_WAIT); 1152 } 1153 } 1154 1155 bool dc_dmub_check_min_version(struct dmub_srv *srv) 1156 { 1157 if (!srv->hw_funcs.is_psrsu_supported) 1158 return true; 1159 return srv->hw_funcs.is_psrsu_supported(srv); 1160 } 1161 1162 void dc_dmub_srv_enable_dpia_trace(const struct dc *dc) 1163 { 1164 struct dc_dmub_srv *dc_dmub_srv = dc->ctx->dmub_srv; 1165 1166 if (!dc_dmub_srv || !dc_dmub_srv->dmub) { 1167 DC_LOG_ERROR("%s: invalid parameters.", __func__); 1168 return; 1169 } 1170 1171 if (!dc_wake_and_execute_gpint(dc->ctx, DMUB_GPINT__SET_TRACE_BUFFER_MASK_WORD1, 1172 0x0010, NULL, DM_DMUB_WAIT_TYPE_WAIT)) { 1173 DC_LOG_ERROR("timeout updating trace buffer mask word\n"); 1174 return; 1175 } 1176 1177 if (!dc_wake_and_execute_gpint(dc->ctx, DMUB_GPINT__UPDATE_TRACE_BUFFER_MASK, 1178 0x0000, NULL, DM_DMUB_WAIT_TYPE_WAIT)) { 1179 DC_LOG_ERROR("timeout updating trace buffer mask word\n"); 1180 return; 1181 } 1182 1183 DC_LOG_DEBUG("Enabled DPIA trace\n"); 1184 } 1185 1186 void dc_dmub_srv_subvp_save_surf_addr(const struct dc_dmub_srv *dc_dmub_srv, const struct dc_plane_address *addr, uint8_t subvp_index) 1187 { 1188 dmub_srv_subvp_save_surf_addr(dc_dmub_srv->dmub, addr, subvp_index); 1189 } 1190 1191 void dc_dmub_srv_cursor_offload_init(struct dc *dc) 1192 { 1193 struct dmub_rb_cmd_cursor_offload_init *init; 1194 struct dc_dmub_srv *dc_dmub_srv = dc->ctx->dmub_srv; 1195 union dmub_rb_cmd cmd; 1196 1197 if (!dc->config.enable_cursor_offload) 1198 return; 1199 1200 if (!dc_dmub_srv->dmub->meta_info.feature_bits.bits.cursor_offload_v1_support) 1201 return; 1202 1203 if (!dc_dmub_srv->dmub->cursor_offload_fb.gpu_addr || !dc_dmub_srv->dmub->cursor_offload_fb.cpu_addr) 1204 return; 1205 1206 if (!dc_dmub_srv->dmub->cursor_offload_v1) 1207 return; 1208 1209 if (!dc_dmub_srv->dmub->shared_state) 1210 return; 1211 1212 memset(&cmd, 0, sizeof(cmd)); 1213 1214 init = &cmd.cursor_offload_init; 1215 init->header.type = DMUB_CMD__CURSOR_OFFLOAD; 1216 init->header.sub_type = DMUB_CMD__CURSOR_OFFLOAD_INIT; 1217 init->header.payload_bytes = sizeof(init->init_data); 1218 init->init_data.state_addr.quad_part = dc_dmub_srv->dmub->cursor_offload_fb.gpu_addr; 1219 init->init_data.state_size = dc_dmub_srv->dmub->cursor_offload_fb.size; 1220 1221 dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); 1222 1223 dc_dmub_srv->cursor_offload_enabled = true; 1224 } 1225 1226 void dc_dmub_srv_control_cursor_offload(struct dc *dc, struct dc_state *context, 1227 const struct dc_stream_state *stream, bool enable) 1228 { 1229 struct pipe_ctx const *pipe_ctx; 1230 struct dmub_rb_cmd_cursor_offload_stream_cntl *cntl; 1231 union dmub_rb_cmd cmd; 1232 1233 if (!dc_dmub_srv_is_cursor_offload_enabled(dc)) 1234 return; 1235 1236 if (!stream) 1237 return; 1238 1239 pipe_ctx = resource_get_otg_master_for_stream(&context->res_ctx, stream); 1240 if (!pipe_ctx || !pipe_ctx->stream_res.tg || pipe_ctx->stream != stream) 1241 return; 1242 1243 memset(&cmd, 0, sizeof(cmd)); 1244 1245 cntl = &cmd.cursor_offload_stream_ctnl; 1246 cntl->header.type = DMUB_CMD__CURSOR_OFFLOAD; 1247 cntl->header.sub_type = 1248 enable ? DMUB_CMD__CURSOR_OFFLOAD_STREAM_ENABLE : DMUB_CMD__CURSOR_OFFLOAD_STREAM_DISABLE; 1249 cntl->header.payload_bytes = sizeof(cntl->data); 1250 1251 cntl->data.otg_inst = pipe_ctx->stream_res.tg->inst; 1252 cntl->data.line_time_in_ns = 1u + (uint32_t)(div64_u64(stream->timing.h_total * 1000000ull, 1253 stream->timing.pix_clk_100hz / 10)); 1254 1255 cntl->data.v_total_max = stream->adjust.v_total_max > stream->timing.v_total ? 1256 stream->adjust.v_total_max : 1257 stream->timing.v_total; 1258 1259 dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, 1260 enable ? DM_DMUB_WAIT_TYPE_NO_WAIT : DM_DMUB_WAIT_TYPE_WAIT); 1261 } 1262 1263 void dc_dmub_srv_program_cursor_now(struct dc *dc, const struct pipe_ctx *pipe) 1264 { 1265 struct dmub_rb_cmd_cursor_offload_stream_cntl *cntl; 1266 union dmub_rb_cmd cmd; 1267 1268 if (!dc_dmub_srv_is_cursor_offload_enabled(dc)) 1269 return; 1270 1271 if (!pipe || !pipe->stream || !pipe->stream_res.tg) 1272 return; 1273 1274 memset(&cmd, 0, sizeof(cmd)); 1275 1276 cntl = &cmd.cursor_offload_stream_ctnl; 1277 cntl->header.type = DMUB_CMD__CURSOR_OFFLOAD; 1278 cntl->header.sub_type = DMUB_CMD__CURSOR_OFFLOAD_STREAM_PROGRAM; 1279 cntl->header.payload_bytes = sizeof(cntl->data); 1280 cntl->data.otg_inst = pipe->stream_res.tg->inst; 1281 1282 dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT); 1283 } 1284 1285 bool dc_dmub_srv_is_hw_pwr_up(struct dc_dmub_srv *dc_dmub_srv, bool wait) 1286 { 1287 struct dc_context *dc_ctx; 1288 enum dmub_status status; 1289 1290 if (!dc_dmub_srv || !dc_dmub_srv->dmub) 1291 return true; 1292 1293 if (dc_dmub_srv->ctx->dc->debug.dmcub_emulation) 1294 return true; 1295 1296 dc_ctx = dc_dmub_srv->ctx; 1297 1298 if (wait) { 1299 if (dc_dmub_srv->ctx->dc->debug.disable_timeout) { 1300 do { 1301 status = dmub_srv_wait_for_hw_pwr_up(dc_dmub_srv->dmub, 500000); 1302 } while (status != DMUB_STATUS_OK); 1303 } else { 1304 status = dmub_srv_wait_for_hw_pwr_up(dc_dmub_srv->dmub, 500000); 1305 if (status != DMUB_STATUS_OK) { 1306 DC_ERROR("Error querying DMUB hw power up status: error=%d\n", status); 1307 return false; 1308 } 1309 } 1310 } else 1311 return dmub_srv_is_hw_pwr_up(dc_dmub_srv->dmub); 1312 1313 return true; 1314 } 1315 1316 static int count_active_streams(const struct dc *dc) 1317 { 1318 int i, count = 0; 1319 1320 for (i = 0; i < dc->current_state->stream_count; ++i) { 1321 struct dc_stream_state *stream = dc->current_state->streams[i]; 1322 1323 if (stream && (!stream->dpms_off || dc->config.disable_ips_in_dpms_off)) 1324 count += 1; 1325 } 1326 1327 return count; 1328 } 1329 1330 static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle) 1331 { 1332 volatile const struct dmub_shared_state_ips_fw *ips_fw; 1333 struct dc_dmub_srv *dc_dmub_srv; 1334 union dmub_rb_cmd cmd = {0}; 1335 1336 if (dc->debug.dmcub_emulation) 1337 return; 1338 1339 if (!dc->ctx->dmub_srv || !dc->ctx->dmub_srv->dmub) 1340 return; 1341 1342 dc_dmub_srv = dc->ctx->dmub_srv; 1343 ips_fw = &dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_FW].data.ips_fw; 1344 1345 memset(&cmd, 0, sizeof(cmd)); 1346 cmd.idle_opt_notify_idle.header.type = DMUB_CMD__IDLE_OPT; 1347 cmd.idle_opt_notify_idle.header.sub_type = DMUB_CMD__IDLE_OPT_DCN_NOTIFY_IDLE; 1348 cmd.idle_opt_notify_idle.header.payload_bytes = 1349 sizeof(cmd.idle_opt_notify_idle) - 1350 sizeof(cmd.idle_opt_notify_idle.header); 1351 1352 cmd.idle_opt_notify_idle.cntl_data.driver_idle = allow_idle; 1353 1354 if (dc->work_arounds.skip_psr_ips_crtc_disable) 1355 cmd.idle_opt_notify_idle.cntl_data.skip_otg_disable = true; 1356 1357 if (allow_idle) { 1358 volatile struct dmub_shared_state_ips_driver *ips_driver = 1359 &dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_DRIVER].data.ips_driver; 1360 union dmub_shared_state_ips_driver_signals new_signals; 1361 1362 DC_LOG_IPS( 1363 "%s wait idle (ips1_commit=%u ips2_commit=%u)", 1364 __func__, 1365 ips_fw->signals.bits.ips1_commit, 1366 ips_fw->signals.bits.ips2_commit); 1367 1368 dc_dmub_srv_wait_for_idle(dc->ctx->dmub_srv, DM_DMUB_WAIT_TYPE_WAIT, NULL); 1369 1370 memset(&new_signals, 0, sizeof(new_signals)); 1371 1372 new_signals.bits.allow_idle = 1; /* always set */ 1373 1374 if (dc->config.disable_ips == DMUB_IPS_ENABLE || 1375 dc->config.disable_ips == DMUB_IPS_DISABLE_DYNAMIC) { 1376 new_signals.bits.allow_pg = 1; 1377 new_signals.bits.allow_ips1 = 1; 1378 new_signals.bits.allow_ips2 = 1; 1379 new_signals.bits.allow_z10 = 1; 1380 // New in IPSv2.0 1381 new_signals.bits.allow_ips1z8 = 1; 1382 } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS1) { 1383 new_signals.bits.allow_ips1 = 1; 1384 } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2) { 1385 // IPSv1.0 only 1386 new_signals.bits.allow_pg = 1; 1387 new_signals.bits.allow_ips1 = 1; 1388 } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2_Z10) { 1389 // IPSv1.0 only 1390 new_signals.bits.allow_pg = 1; 1391 new_signals.bits.allow_ips1 = 1; 1392 new_signals.bits.allow_ips2 = 1; 1393 } else if (dc->config.disable_ips == DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF) { 1394 /* TODO: Move this logic out to hwseq */ 1395 if (count_active_streams(dc) == 0) { 1396 /* IPS2 - Display off */ 1397 new_signals.bits.allow_pg = 1; 1398 new_signals.bits.allow_ips1 = 1; 1399 new_signals.bits.allow_ips2 = 1; 1400 new_signals.bits.allow_z10 = 1; 1401 // New in IPSv2.0 1402 new_signals.bits.allow_ips1z8 = 1; 1403 } else { 1404 /* RCG only */ 1405 new_signals.bits.allow_pg = 0; 1406 new_signals.bits.allow_ips1 = 1; 1407 new_signals.bits.allow_ips2 = 0; 1408 new_signals.bits.allow_z10 = 0; 1409 } 1410 } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_Z8_RETENTION) { 1411 new_signals.bits.allow_pg = 1; 1412 new_signals.bits.allow_ips1 = 1; 1413 new_signals.bits.allow_ips2 = 1; 1414 new_signals.bits.allow_z10 = 1; 1415 } 1416 // Setting RCG allow bits (IPSv2.0) 1417 if (dc->config.disable_ips_rcg == DMUB_IPS_RCG_ENABLE) { 1418 new_signals.bits.allow_ips0_rcg = 1; 1419 new_signals.bits.allow_ips1_rcg = 1; 1420 } else if (dc->config.disable_ips_rcg == DMUB_IPS0_RCG_DISABLE) { 1421 new_signals.bits.allow_ips1_rcg = 1; 1422 } else if (dc->config.disable_ips_rcg == DMUB_IPS1_RCG_DISABLE) { 1423 new_signals.bits.allow_ips0_rcg = 1; 1424 } 1425 // IPS dynamic allow bits (IPSv2 change, vpb use case) 1426 if (dc->config.disable_ips_in_vpb == DMUB_IPS_VPB_ENABLE_IPS1_AND_RCG) { 1427 new_signals.bits.allow_dynamic_ips1 = 1; 1428 } else if (dc->config.disable_ips_in_vpb == DMUB_IPS_VPB_ENABLE_ALL) { 1429 new_signals.bits.allow_dynamic_ips1 = 1; 1430 new_signals.bits.allow_dynamic_ips1_z8 = 1; 1431 } 1432 ips_driver->signals = new_signals; 1433 dc_dmub_srv->driver_signals = ips_driver->signals; 1434 } 1435 1436 DC_LOG_IPS( 1437 "%s send allow_idle=%d (ips1_commit=%u ips2_commit=%u)", 1438 __func__, 1439 allow_idle, 1440 ips_fw->signals.bits.ips1_commit, 1441 ips_fw->signals.bits.ips2_commit); 1442 1443 /* NOTE: This does not use the "wake" interface since this is part of the wake path. */ 1444 /* We also do not perform a wait since DMCUB could enter idle after the notification. */ 1445 dm_execute_dmub_cmd(dc->ctx, &cmd, allow_idle ? DM_DMUB_WAIT_TYPE_NO_WAIT : DM_DMUB_WAIT_TYPE_WAIT); 1446 1447 /* Register access should stop at this point. */ 1448 if (allow_idle) 1449 dc_dmub_srv->needs_idle_wake = true; 1450 } 1451 1452 static void dc_dmub_srv_exit_low_power_state(const struct dc *dc) 1453 { 1454 struct dc_dmub_srv *dc_dmub_srv; 1455 uint32_t rcg_exit_count = 0, ips1_exit_count = 0, ips2_exit_count = 0, ips1z8_exit_count = 0; 1456 1457 if (dc->debug.dmcub_emulation) 1458 return; 1459 1460 if (!dc->ctx->dmub_srv || !dc->ctx->dmub_srv->dmub) 1461 return; 1462 1463 dc_dmub_srv = dc->ctx->dmub_srv; 1464 1465 if (dc->clk_mgr->funcs->exit_low_power_state) { 1466 volatile const struct dmub_shared_state_ips_fw *ips_fw = 1467 &dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_FW].data.ips_fw; 1468 volatile struct dmub_shared_state_ips_driver *ips_driver = 1469 &dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_DRIVER].data.ips_driver; 1470 union dmub_shared_state_ips_driver_signals prev_driver_signals = ips_driver->signals; 1471 1472 rcg_exit_count = ips_fw->rcg_exit_count; 1473 ips1_exit_count = ips_fw->ips1_exit_count; 1474 ips2_exit_count = ips_fw->ips2_exit_count; 1475 ips1z8_exit_count = ips_fw->ips1_z8ret_exit_count; 1476 1477 ips_driver->signals.all = 0; 1478 dc_dmub_srv->driver_signals = ips_driver->signals; 1479 1480 DC_LOG_IPS( 1481 "%s (allow ips1=%u ips2=%u) (commit ips1=%u ips2=%u ips1z8=%u) (count rcg=%u ips1=%u ips2=%u ips1_z8=%u)", 1482 __func__, 1483 ips_driver->signals.bits.allow_ips1, 1484 ips_driver->signals.bits.allow_ips2, 1485 ips_fw->signals.bits.ips1_commit, 1486 ips_fw->signals.bits.ips2_commit, 1487 ips_fw->signals.bits.ips1z8_commit, 1488 ips_fw->rcg_entry_count, 1489 ips_fw->ips1_entry_count, 1490 ips_fw->ips2_entry_count, 1491 ips_fw->ips1_z8ret_entry_count); 1492 1493 /* Note: register access has technically not resumed for DCN here, but we 1494 * need to be message PMFW through our standard register interface. 1495 */ 1496 dc_dmub_srv->needs_idle_wake = false; 1497 1498 if (!dc->caps.ips_v2_support && ((prev_driver_signals.bits.allow_ips2 || prev_driver_signals.all == 0) && 1499 (!dc->debug.optimize_ips_handshake || 1500 ips_fw->signals.bits.ips2_commit || !ips_fw->signals.bits.in_idle))) { 1501 DC_LOG_IPS( 1502 "wait IPS2 eval (ips1_commit=%u ips2_commit=%u )", 1503 ips_fw->signals.bits.ips1_commit, 1504 ips_fw->signals.bits.ips2_commit); 1505 1506 if (!dc->debug.optimize_ips_handshake || !ips_fw->signals.bits.ips2_commit) 1507 udelay(dc->debug.ips2_eval_delay_us); 1508 1509 DC_LOG_IPS( 1510 "exit IPS2 #1 (ips1_commit=%u ips2_commit=%u)", 1511 ips_fw->signals.bits.ips1_commit, 1512 ips_fw->signals.bits.ips2_commit); 1513 1514 // Tell PMFW to exit low power state 1515 dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr); 1516 1517 if (ips_fw->signals.bits.ips2_commit) { 1518 1519 DC_LOG_IPS( 1520 "wait IPS2 entry delay (ips1_commit=%u ips2_commit=%u)", 1521 ips_fw->signals.bits.ips1_commit, 1522 ips_fw->signals.bits.ips2_commit); 1523 1524 // Wait for IPS2 entry upper bound 1525 udelay(dc->debug.ips2_entry_delay_us); 1526 1527 DC_LOG_IPS( 1528 "exit IPS2 #2 (ips1_commit=%u ips2_commit=%u)", 1529 ips_fw->signals.bits.ips1_commit, 1530 ips_fw->signals.bits.ips2_commit); 1531 1532 dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr); 1533 1534 DC_LOG_IPS( 1535 "wait IPS2 commit clear (ips1_commit=%u ips2_commit=%u)", 1536 ips_fw->signals.bits.ips1_commit, 1537 ips_fw->signals.bits.ips2_commit); 1538 1539 while (ips_fw->signals.bits.ips2_commit) 1540 udelay(1); 1541 1542 DC_LOG_IPS( 1543 "wait hw_pwr_up (ips1_commit=%u ips2_commit=%u)", 1544 ips_fw->signals.bits.ips1_commit, 1545 ips_fw->signals.bits.ips2_commit); 1546 1547 if (!dc_dmub_srv_is_hw_pwr_up(dc->ctx->dmub_srv, true)) 1548 ASSERT(0); 1549 1550 DC_LOG_IPS( 1551 "resync inbox1 (ips1_commit=%u ips2_commit=%u)", 1552 ips_fw->signals.bits.ips1_commit, 1553 ips_fw->signals.bits.ips2_commit); 1554 1555 dmub_srv_sync_inboxes(dc->ctx->dmub_srv->dmub); 1556 } 1557 } 1558 1559 dc_dmub_srv_notify_idle(dc, false); 1560 if (prev_driver_signals.bits.allow_ips1 || prev_driver_signals.all == 0) { 1561 DC_LOG_IPS( 1562 "wait for IPS1 commit clear (ips1_commit=%u ips2_commit=%u ips1z8=%u)", 1563 ips_fw->signals.bits.ips1_commit, 1564 ips_fw->signals.bits.ips2_commit, 1565 ips_fw->signals.bits.ips1z8_commit); 1566 1567 while (ips_fw->signals.bits.ips1_commit) 1568 udelay(1); 1569 1570 DC_LOG_IPS( 1571 "wait for IPS1 commit clear done (ips1_commit=%u ips2_commit=%u ips1z8=%u)", 1572 ips_fw->signals.bits.ips1_commit, 1573 ips_fw->signals.bits.ips2_commit, 1574 ips_fw->signals.bits.ips1z8_commit); 1575 } 1576 } 1577 1578 if (!dc_dmub_srv_is_hw_pwr_up(dc->ctx->dmub_srv, true)) 1579 ASSERT(0); 1580 1581 DC_LOG_IPS("%s exit (count rcg=%u ips1=%u ips2=%u ips1z8=%u)", 1582 __func__, 1583 rcg_exit_count, 1584 ips1_exit_count, 1585 ips2_exit_count, 1586 ips1z8_exit_count); 1587 } 1588 1589 void dc_dmub_srv_set_power_state(struct dc_dmub_srv *dc_dmub_srv, enum dc_acpi_cm_power_state power_state) 1590 { 1591 struct dmub_srv *dmub; 1592 1593 if (!dc_dmub_srv) 1594 return; 1595 1596 dmub = dc_dmub_srv->dmub; 1597 1598 if (power_state == DC_ACPI_CM_POWER_STATE_D0) 1599 dmub_srv_set_power_state(dmub, DMUB_POWER_STATE_D0); 1600 else 1601 dmub_srv_set_power_state(dmub, DMUB_POWER_STATE_D3); 1602 } 1603 1604 void dc_dmub_srv_notify_fw_dc_power_state(struct dc_dmub_srv *dc_dmub_srv, 1605 enum dc_acpi_cm_power_state power_state) 1606 { 1607 union dmub_rb_cmd cmd; 1608 1609 if (!dc_dmub_srv) 1610 return; 1611 1612 memset(&cmd, 0, sizeof(cmd)); 1613 1614 cmd.idle_opt_set_dc_power_state.header.type = DMUB_CMD__IDLE_OPT; 1615 cmd.idle_opt_set_dc_power_state.header.sub_type = DMUB_CMD__IDLE_OPT_SET_DC_POWER_STATE; 1616 cmd.idle_opt_set_dc_power_state.header.payload_bytes = 1617 sizeof(cmd.idle_opt_set_dc_power_state) - sizeof(cmd.idle_opt_set_dc_power_state.header); 1618 1619 if (power_state == DC_ACPI_CM_POWER_STATE_D0) { 1620 cmd.idle_opt_set_dc_power_state.data.power_state = DMUB_IDLE_OPT_DC_POWER_STATE_D0; 1621 } else if (power_state == DC_ACPI_CM_POWER_STATE_D3) { 1622 cmd.idle_opt_set_dc_power_state.data.power_state = DMUB_IDLE_OPT_DC_POWER_STATE_D3; 1623 } else { 1624 cmd.idle_opt_set_dc_power_state.data.power_state = DMUB_IDLE_OPT_DC_POWER_STATE_UNKNOWN; 1625 } 1626 1627 dc_wake_and_execute_dmub_cmd(dc_dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); 1628 } 1629 1630 bool dc_dmub_srv_should_detect(struct dc_dmub_srv *dc_dmub_srv) 1631 { 1632 volatile const struct dmub_shared_state_ips_fw *ips_fw; 1633 bool reallow_idle = false, should_detect = false; 1634 1635 if (!dc_dmub_srv || !dc_dmub_srv->dmub) 1636 return false; 1637 1638 if (dc_dmub_srv->dmub->shared_state && 1639 dc_dmub_srv->dmub->meta_info.feature_bits.bits.shared_state_link_detection) { 1640 ips_fw = &dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_FW].data.ips_fw; 1641 return ips_fw->signals.bits.detection_required; 1642 } 1643 1644 /* Detection may require reading scratch 0 - exit out of idle prior to the read. */ 1645 if (dc_dmub_srv->idle_allowed) { 1646 dc_dmub_srv_apply_idle_power_optimizations(dc_dmub_srv->ctx->dc, false); 1647 reallow_idle = true; 1648 } 1649 1650 should_detect = dmub_srv_should_detect(dc_dmub_srv->dmub); 1651 1652 /* Re-enter idle if we're not about to immediately redetect links. */ 1653 if (!should_detect && reallow_idle && dc_dmub_srv->idle_exit_counter == 0 && 1654 !dc_dmub_srv->ctx->dc->debug.disable_dmub_reallow_idle) 1655 dc_dmub_srv_apply_idle_power_optimizations(dc_dmub_srv->ctx->dc, true); 1656 1657 return should_detect; 1658 } 1659 1660 void dc_dmub_srv_apply_idle_power_optimizations(const struct dc *dc, bool allow_idle) 1661 { 1662 struct dc_dmub_srv *dc_dmub_srv = dc->ctx->dmub_srv; 1663 1664 if (!dc_dmub_srv || !dc_dmub_srv->dmub) 1665 return; 1666 1667 allow_idle &= (!dc->debug.ips_disallow_entry); 1668 1669 if (dc_dmub_srv->idle_allowed == allow_idle) 1670 return; 1671 1672 DC_LOG_IPS("%s state change: old=%d new=%d", __func__, dc_dmub_srv->idle_allowed, allow_idle); 1673 1674 /* 1675 * Entering a low power state requires a driver notification. 1676 * Powering up the hardware requires notifying PMFW and DMCUB. 1677 * Clearing the driver idle allow requires a DMCUB command. 1678 * DMCUB commands requires the DMCUB to be powered up and restored. 1679 */ 1680 1681 if (!allow_idle) { 1682 dc_dmub_srv->idle_exit_counter += 1; 1683 1684 dc_dmub_srv_exit_low_power_state(dc); 1685 /* 1686 * Idle is considered fully exited only after the sequence above 1687 * fully completes. If we have a race of two threads exiting 1688 * at the same time then it's safe to perform the sequence 1689 * twice as long as we're not re-entering. 1690 * 1691 * Infinite command submission is avoided by using the 1692 * dm_execute_dmub_cmd submission instead of the "wake" helpers. 1693 */ 1694 dc_dmub_srv->idle_allowed = false; 1695 1696 dc_dmub_srv->idle_exit_counter -= 1; 1697 if (dc_dmub_srv->idle_exit_counter < 0) { 1698 ASSERT(0); 1699 dc_dmub_srv->idle_exit_counter = 0; 1700 } 1701 } else { 1702 /* Consider idle as notified prior to the actual submission to 1703 * prevent multiple entries. */ 1704 dc_dmub_srv->idle_allowed = true; 1705 1706 dc_dmub_srv_notify_idle(dc, allow_idle); 1707 } 1708 } 1709 1710 bool dc_wake_and_execute_dmub_cmd(const struct dc_context *ctx, union dmub_rb_cmd *cmd, 1711 enum dm_dmub_wait_type wait_type) 1712 { 1713 return dc_wake_and_execute_dmub_cmd_list(ctx, 1, cmd, wait_type); 1714 } 1715 1716 bool dc_wake_and_execute_dmub_cmd_list(const struct dc_context *ctx, unsigned int count, 1717 union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type) 1718 { 1719 struct dc_dmub_srv *dc_dmub_srv = ctx->dmub_srv; 1720 bool result = false, reallow_idle = false; 1721 1722 if (!dc_dmub_srv || !dc_dmub_srv->dmub) 1723 return false; 1724 1725 if (count == 0) 1726 return true; 1727 1728 if (dc_dmub_srv->idle_allowed) { 1729 dc_dmub_srv_apply_idle_power_optimizations(ctx->dc, false); 1730 reallow_idle = true; 1731 } 1732 1733 /* 1734 * These may have different implementations in DM, so ensure 1735 * that we guide it to the expected helper. 1736 */ 1737 if (count > 1) 1738 result = dm_execute_dmub_cmd_list(ctx, count, cmd, wait_type); 1739 else 1740 result = dm_execute_dmub_cmd(ctx, cmd, wait_type); 1741 1742 if (result && reallow_idle && dc_dmub_srv->idle_exit_counter == 0 && 1743 !ctx->dc->debug.disable_dmub_reallow_idle) 1744 dc_dmub_srv_apply_idle_power_optimizations(ctx->dc, true); 1745 1746 return result; 1747 } 1748 1749 static bool dc_dmub_execute_gpint(const struct dc_context *ctx, enum dmub_gpint_command command_code, 1750 uint16_t param, uint32_t *response, enum dm_dmub_wait_type wait_type) 1751 { 1752 struct dc_dmub_srv *dc_dmub_srv = ctx->dmub_srv; 1753 const uint32_t wait_us = wait_type == DM_DMUB_WAIT_TYPE_NO_WAIT ? 0 : 30; 1754 enum dmub_status status; 1755 1756 if (response) 1757 *response = 0; 1758 1759 if (!dc_dmub_srv || !dc_dmub_srv->dmub) 1760 return false; 1761 1762 status = dmub_srv_send_gpint_command(dc_dmub_srv->dmub, command_code, param, wait_us); 1763 if (status != DMUB_STATUS_OK) { 1764 if (status == DMUB_STATUS_TIMEOUT && wait_type == DM_DMUB_WAIT_TYPE_NO_WAIT) 1765 return true; 1766 1767 return false; 1768 } 1769 1770 if (response && wait_type == DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) 1771 dmub_srv_get_gpint_response(dc_dmub_srv->dmub, response); 1772 1773 return true; 1774 } 1775 1776 bool dc_wake_and_execute_gpint(const struct dc_context *ctx, enum dmub_gpint_command command_code, 1777 uint16_t param, uint32_t *response, enum dm_dmub_wait_type wait_type) 1778 { 1779 struct dc_dmub_srv *dc_dmub_srv = ctx->dmub_srv; 1780 bool result = false, reallow_idle = false; 1781 1782 if (!dc_dmub_srv || !dc_dmub_srv->dmub) 1783 return false; 1784 1785 if (dc_dmub_srv->idle_allowed) { 1786 dc_dmub_srv_apply_idle_power_optimizations(ctx->dc, false); 1787 reallow_idle = true; 1788 } 1789 1790 result = dc_dmub_execute_gpint(ctx, command_code, param, response, wait_type); 1791 1792 if (result && reallow_idle && dc_dmub_srv->idle_exit_counter == 0 && 1793 !ctx->dc->debug.disable_dmub_reallow_idle) 1794 dc_dmub_srv_apply_idle_power_optimizations(ctx->dc, true); 1795 1796 return result; 1797 } 1798 1799 static void dc_dmub_srv_rb_based_fams2_update_config(struct dc *dc, 1800 struct dc_state *context, 1801 bool enable) 1802 { 1803 uint8_t num_cmds = 1; 1804 uint32_t i; 1805 union dmub_rb_cmd cmd[2 * MAX_STREAMS + 1]; 1806 struct dmub_rb_cmd_fams2 *global_cmd = &cmd[0].fams2_config; 1807 1808 memset(cmd, 0, sizeof(union dmub_rb_cmd) * (2 * MAX_STREAMS + 1)); 1809 /* fill in generic command header */ 1810 global_cmd->header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH; 1811 global_cmd->header.sub_type = DMUB_CMD__FAMS2_CONFIG; 1812 global_cmd->header.payload_bytes = 1813 sizeof(struct dmub_rb_cmd_fams2) - sizeof(struct dmub_cmd_header); 1814 1815 if (enable) { 1816 /* send global configuration parameters */ 1817 memcpy(&global_cmd->config.global, &context->bw_ctx.bw.dcn.fams2_global_config, sizeof(struct dmub_cmd_fams2_global_config)); 1818 1819 /* copy static feature configuration overrides */ 1820 global_cmd->config.global.features.bits.enable_stall_recovery = dc->debug.fams2_config.bits.enable_stall_recovery; 1821 global_cmd->config.global.features.bits.enable_debug = dc->debug.fams2_config.bits.enable_debug; 1822 global_cmd->config.global.features.bits.enable_offload_flip = dc->debug.fams2_config.bits.enable_offload_flip; 1823 1824 /* construct per-stream configs */ 1825 for (i = 0; i < context->bw_ctx.bw.dcn.fams2_global_config.num_streams; i++) { 1826 struct dmub_rb_cmd_fams2 *stream_base_cmd = &cmd[i+1].fams2_config; 1827 struct dmub_rb_cmd_fams2 *stream_sub_state_cmd = &cmd[i+1+context->bw_ctx.bw.dcn.fams2_global_config.num_streams].fams2_config; 1828 1829 /* configure command header */ 1830 stream_base_cmd->header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH; 1831 stream_base_cmd->header.sub_type = DMUB_CMD__FAMS2_CONFIG; 1832 stream_base_cmd->header.payload_bytes = 1833 sizeof(struct dmub_rb_cmd_fams2) - sizeof(struct dmub_cmd_header); 1834 stream_base_cmd->header.multi_cmd_pending = 1; 1835 stream_sub_state_cmd->header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH; 1836 stream_sub_state_cmd->header.sub_type = DMUB_CMD__FAMS2_CONFIG; 1837 stream_sub_state_cmd->header.payload_bytes = 1838 sizeof(struct dmub_rb_cmd_fams2) - sizeof(struct dmub_cmd_header); 1839 stream_sub_state_cmd->header.multi_cmd_pending = 1; 1840 /* copy stream static base state */ 1841 memcpy(&stream_base_cmd->config, 1842 &context->bw_ctx.bw.dcn.fams2_stream_base_params[i], 1843 sizeof(union dmub_cmd_fams2_config)); 1844 /* copy stream static sub state */ 1845 memcpy(&stream_sub_state_cmd->config, 1846 &context->bw_ctx.bw.dcn.fams2_stream_sub_params[i], 1847 sizeof(union dmub_cmd_fams2_config)); 1848 } 1849 } 1850 1851 /* apply feature configuration based on current driver state */ 1852 global_cmd->config.global.features.bits.enable_visual_confirm = dc->debug.visual_confirm == VISUAL_CONFIRM_FAMS2; 1853 global_cmd->config.global.features.bits.enable = enable && context->bw_ctx.bw.dcn.fams2_global_config.features.bits.enable; 1854 global_cmd->config.global.features.bits.enable_ppt_check = dc->debug.fams2_config.bits.enable_ppt_check; 1855 1856 if (enable) { 1857 /* set multi pending for global, and unset for last stream cmd */ 1858 global_cmd->header.multi_cmd_pending = 1; 1859 cmd[2 * context->bw_ctx.bw.dcn.fams2_global_config.num_streams].fams2_config.header.multi_cmd_pending = 0; 1860 num_cmds += 2 * context->bw_ctx.bw.dcn.fams2_global_config.num_streams; 1861 } 1862 1863 dm_execute_dmub_cmd_list(dc->ctx, num_cmds, cmd, DM_DMUB_WAIT_TYPE_WAIT); 1864 } 1865 1866 static void dc_dmub_srv_ib_based_fams2_update_config(struct dc *dc, 1867 struct dc_state *context, 1868 bool enable) 1869 { 1870 struct dmub_fams2_config_v2 *config = (struct dmub_fams2_config_v2 *)dc->ctx->dmub_srv->dmub->ib_mem_gart.cpu_addr; 1871 union dmub_rb_cmd cmd; 1872 uint32_t i; 1873 1874 memset(config, 0, sizeof(*config)); 1875 memset(&cmd, 0, sizeof(cmd)); 1876 1877 cmd.ib_fams2_config.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH; 1878 cmd.ib_fams2_config.header.sub_type = DMUB_CMD__FAMS2_IB_CONFIG; 1879 1880 cmd.ib_fams2_config.ib_data.src.quad_part = dc->ctx->dmub_srv->dmub->ib_mem_gart.gpu_addr; 1881 cmd.ib_fams2_config.ib_data.size = sizeof(*config); 1882 1883 if (enable) { 1884 /* send global configuration parameters */ 1885 memcpy(&config->global, &context->bw_ctx.bw.dcn.fams2_global_config, 1886 sizeof(struct dmub_cmd_fams2_global_config)); 1887 1888 /* copy static feature configuration overrides */ 1889 config->global.features.bits.enable_stall_recovery = dc->debug.fams2_config.bits.enable_stall_recovery; 1890 config->global.features.bits.enable_offload_flip = dc->debug.fams2_config.bits.enable_offload_flip; 1891 config->global.features.bits.enable_debug = dc->debug.fams2_config.bits.enable_debug; 1892 1893 /* construct per-stream configs */ 1894 for (i = 0; i < context->bw_ctx.bw.dcn.fams2_global_config.num_streams; i++) { 1895 /* copy stream static base state */ 1896 memcpy(&config->stream_v1[i].base, 1897 &context->bw_ctx.bw.dcn.fams2_stream_base_params[i], 1898 sizeof(config->stream_v1[i].base)); 1899 1900 /* copy stream static sub-state */ 1901 memcpy(&config->stream_v1[i].sub_state, 1902 &context->bw_ctx.bw.dcn.fams2_stream_sub_params_v2[i], 1903 sizeof(config->stream_v1[i].sub_state)); 1904 } 1905 } 1906 1907 config->global.features.bits.enable_visual_confirm = dc->debug.visual_confirm == VISUAL_CONFIRM_FAMS2; 1908 config->global.features.bits.enable = enable && context->bw_ctx.bw.dcn.fams2_global_config.features.bits.enable; 1909 config->global.features.bits.enable_ppt_check = dc->debug.fams2_config.bits.enable_ppt_check; 1910 1911 dm_execute_dmub_cmd_list(dc->ctx, 1, &cmd, DM_DMUB_WAIT_TYPE_WAIT); 1912 } 1913 1914 void dc_dmub_srv_fams2_update_config(struct dc *dc, 1915 struct dc_state *context, 1916 bool enable) 1917 { 1918 if (dc->debug.fams_version.major == 2) 1919 dc_dmub_srv_rb_based_fams2_update_config(dc, context, enable); 1920 if (dc->debug.fams_version.major == 3) 1921 dc_dmub_srv_ib_based_fams2_update_config(dc, context, enable); 1922 } 1923 1924 void dc_dmub_srv_fams2_drr_update(struct dc *dc, 1925 uint32_t tg_inst, 1926 uint32_t vtotal_min, 1927 uint32_t vtotal_max, 1928 uint32_t vtotal_mid, 1929 uint32_t vtotal_mid_frame_num, 1930 bool program_manual_trigger) 1931 { 1932 union dmub_rb_cmd cmd = { 0 }; 1933 1934 cmd.fams2_drr_update.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH; 1935 cmd.fams2_drr_update.header.sub_type = DMUB_CMD__FAMS2_DRR_UPDATE; 1936 cmd.fams2_drr_update.dmub_optc_state_req.tg_inst = tg_inst; 1937 cmd.fams2_drr_update.dmub_optc_state_req.v_total_max = vtotal_max; 1938 cmd.fams2_drr_update.dmub_optc_state_req.v_total_min = vtotal_min; 1939 cmd.fams2_drr_update.dmub_optc_state_req.v_total_mid = vtotal_mid; 1940 cmd.fams2_drr_update.dmub_optc_state_req.v_total_mid_frame_num = vtotal_mid_frame_num; 1941 cmd.fams2_drr_update.dmub_optc_state_req.program_manual_trigger = program_manual_trigger; 1942 1943 cmd.fams2_drr_update.header.payload_bytes = 1944 sizeof(cmd.fams2_drr_update) - sizeof(cmd.fams2_drr_update.header); 1945 1946 dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); 1947 } 1948 1949 void dc_dmub_srv_fams2_passthrough_flip( 1950 struct dc *dc, 1951 struct dc_state *state, 1952 struct dc_stream_state *stream, 1953 struct dc_surface_update *srf_updates, 1954 int surface_count) 1955 { 1956 int plane_index; 1957 union dmub_rb_cmd cmds[MAX_PLANES]; 1958 struct dc_plane_address *address; 1959 struct dc_plane_state *plane_state; 1960 int num_cmds = 0; 1961 struct dc_stream_status *stream_status = dc_stream_get_status(stream); 1962 1963 if (surface_count <= 0 || stream_status == NULL) 1964 return; 1965 1966 memset(cmds, 0, sizeof(union dmub_rb_cmd) * MAX_PLANES); 1967 1968 /* build command for each surface update */ 1969 for (plane_index = 0; plane_index < surface_count; plane_index++) { 1970 plane_state = srf_updates[plane_index].surface; 1971 address = &plane_state->address; 1972 1973 /* skip if there is no address update for plane */ 1974 if (!srf_updates[plane_index].flip_addr) 1975 continue; 1976 1977 /* build command header */ 1978 cmds[num_cmds].fams2_flip.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH; 1979 cmds[num_cmds].fams2_flip.header.sub_type = DMUB_CMD__FAMS2_FLIP; 1980 cmds[num_cmds].fams2_flip.header.payload_bytes = 1981 sizeof(struct dmub_rb_cmd_fams2_flip) - sizeof(struct dmub_cmd_header); 1982 1983 /* for chaining multiple commands, all but last command should set to 1 */ 1984 cmds[num_cmds].fams2_flip.header.multi_cmd_pending = 1; 1985 1986 /* set topology info */ 1987 cmds[num_cmds].fams2_flip.flip_info.pipe_mask = dc_plane_get_pipe_mask(state, plane_state); 1988 if (stream_status) 1989 cmds[num_cmds].fams2_flip.flip_info.otg_inst = stream_status->primary_otg_inst; 1990 1991 cmds[num_cmds].fams2_flip.flip_info.config.bits.is_immediate = plane_state->flip_immediate; 1992 1993 /* build address info for command */ 1994 switch (address->type) { 1995 case PLN_ADDR_TYPE_GRAPHICS: 1996 if (address->grph.addr.quad_part == 0) { 1997 BREAK_TO_DEBUGGER(); 1998 break; 1999 } 2000 2001 cmds[num_cmds].fams2_flip.flip_info.addr_info.meta_addr_lo = 2002 address->grph.meta_addr.low_part; 2003 cmds[num_cmds].fams2_flip.flip_info.addr_info.meta_addr_hi = 2004 (uint16_t)address->grph.meta_addr.high_part; 2005 cmds[num_cmds].fams2_flip.flip_info.addr_info.surf_addr_lo = 2006 address->grph.addr.low_part; 2007 cmds[num_cmds].fams2_flip.flip_info.addr_info.surf_addr_hi = 2008 (uint16_t)address->grph.addr.high_part; 2009 break; 2010 case PLN_ADDR_TYPE_VIDEO_PROGRESSIVE: 2011 if (address->video_progressive.luma_addr.quad_part == 0 || 2012 address->video_progressive.chroma_addr.quad_part == 0) { 2013 BREAK_TO_DEBUGGER(); 2014 break; 2015 } 2016 2017 cmds[num_cmds].fams2_flip.flip_info.addr_info.meta_addr_lo = 2018 address->video_progressive.luma_meta_addr.low_part; 2019 cmds[num_cmds].fams2_flip.flip_info.addr_info.meta_addr_hi = 2020 (uint16_t)address->video_progressive.luma_meta_addr.high_part; 2021 cmds[num_cmds].fams2_flip.flip_info.addr_info.meta_addr_c_lo = 2022 address->video_progressive.chroma_meta_addr.low_part; 2023 cmds[num_cmds].fams2_flip.flip_info.addr_info.meta_addr_c_hi = 2024 (uint16_t)address->video_progressive.chroma_meta_addr.high_part; 2025 cmds[num_cmds].fams2_flip.flip_info.addr_info.surf_addr_lo = 2026 address->video_progressive.luma_addr.low_part; 2027 cmds[num_cmds].fams2_flip.flip_info.addr_info.surf_addr_hi = 2028 (uint16_t)address->video_progressive.luma_addr.high_part; 2029 cmds[num_cmds].fams2_flip.flip_info.addr_info.surf_addr_c_lo = 2030 address->video_progressive.chroma_addr.low_part; 2031 cmds[num_cmds].fams2_flip.flip_info.addr_info.surf_addr_c_hi = 2032 (uint16_t)address->video_progressive.chroma_addr.high_part; 2033 break; 2034 default: 2035 // Should never be hit 2036 BREAK_TO_DEBUGGER(); 2037 break; 2038 } 2039 2040 num_cmds++; 2041 } 2042 2043 if (num_cmds > 0) { 2044 cmds[num_cmds - 1].fams2_flip.header.multi_cmd_pending = 0; 2045 dm_execute_dmub_cmd_list(dc->ctx, num_cmds, cmds, DM_DMUB_WAIT_TYPE_WAIT); 2046 } 2047 } 2048 2049 2050 bool dc_dmub_srv_ips_residency_cntl(const struct dc_context *ctx, uint8_t panel_inst, bool start_measurement) 2051 { 2052 union dmub_rb_cmd cmd; 2053 2054 memset(&cmd, 0, sizeof(cmd)); 2055 2056 cmd.ips_residency_cntl.header.type = DMUB_CMD__IPS; 2057 cmd.ips_residency_cntl.header.sub_type = DMUB_CMD__IPS_RESIDENCY_CNTL; 2058 cmd.ips_residency_cntl.header.payload_bytes = sizeof(struct dmub_cmd_ips_residency_cntl_data); 2059 2060 // only panel_inst=0 is supported at the moment 2061 cmd.ips_residency_cntl.cntl_data.panel_inst = panel_inst; 2062 cmd.ips_residency_cntl.cntl_data.start_measurement = start_measurement; 2063 2064 if (!dc_wake_and_execute_dmub_cmd(ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) 2065 return false; 2066 2067 return true; 2068 } 2069 2070 bool dc_dmub_srv_ips_query_residency_info(const struct dc_context *ctx, uint8_t panel_inst, struct dmub_ips_residency_info *driver_info, 2071 enum ips_residency_mode ips_mode) 2072 { 2073 union dmub_rb_cmd cmd; 2074 uint32_t bytes = sizeof(struct dmub_ips_residency_info); 2075 2076 dmub_flush_buffer_mem(&ctx->dmub_srv->dmub->scratch_mem_fb); 2077 memset(&cmd, 0, sizeof(cmd)); 2078 2079 cmd.ips_query_residency_info.header.type = DMUB_CMD__IPS; 2080 cmd.ips_query_residency_info.header.sub_type = DMUB_CMD__IPS_QUERY_RESIDENCY_INFO; 2081 cmd.ips_query_residency_info.header.payload_bytes = sizeof(struct dmub_cmd_ips_query_residency_info_data); 2082 2083 cmd.ips_query_residency_info.info_data.dest.quad_part = ctx->dmub_srv->dmub->scratch_mem_fb.gpu_addr; 2084 cmd.ips_query_residency_info.info_data.size = bytes; 2085 cmd.ips_query_residency_info.info_data.panel_inst = panel_inst; 2086 cmd.ips_query_residency_info.info_data.ips_mode = (uint32_t)ips_mode; 2087 2088 if (!dc_wake_and_execute_dmub_cmd(ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) || 2089 cmd.ips_query_residency_info.header.ret_status == 0) 2090 return false; 2091 2092 // copy the result to the output since ret_status != 0 means the command returned data 2093 memcpy(driver_info, ctx->dmub_srv->dmub->scratch_mem_fb.cpu_addr, bytes); 2094 2095 return true; 2096 } 2097 2098 bool dmub_lsdma_init(struct dc_dmub_srv *dc_dmub_srv) 2099 { 2100 struct dc_context *dc_ctx = dc_dmub_srv->ctx; 2101 union dmub_rb_cmd cmd; 2102 enum dm_dmub_wait_type wait_type; 2103 struct dmub_cmd_lsdma_data *lsdma_data = &cmd.lsdma.lsdma_data; 2104 bool result; 2105 2106 if (!dc_dmub_srv->dmub->feature_caps.lsdma_support_in_dmu) 2107 return false; 2108 2109 memset(&cmd, 0, sizeof(cmd)); 2110 2111 cmd.cmd_common.header.type = DMUB_CMD__LSDMA; 2112 cmd.cmd_common.header.sub_type = DMUB_CMD__LSDMA_INIT_CONFIG; 2113 wait_type = DM_DMUB_WAIT_TYPE_NO_WAIT; 2114 2115 lsdma_data->u.init_data.gpu_addr_base.quad_part = dc_ctx->dmub_srv->dmub->lsdma_rb_fb.gpu_addr; 2116 lsdma_data->u.init_data.ring_size = dc_ctx->dmub_srv->dmub->lsdma_rb_fb.size; 2117 2118 result = dc_wake_and_execute_dmub_cmd(dc_ctx, &cmd, wait_type); 2119 2120 if (!result) 2121 DC_ERROR("LSDMA Init failed in DMUB"); 2122 2123 return result; 2124 } 2125 2126 bool dmub_lsdma_send_linear_copy_command( 2127 struct dc_dmub_srv *dc_dmub_srv, 2128 uint64_t src_addr, 2129 uint64_t dst_addr, 2130 uint32_t count 2131 ) 2132 { 2133 struct dc_context *dc_ctx = dc_dmub_srv->ctx; 2134 union dmub_rb_cmd cmd; 2135 enum dm_dmub_wait_type wait_type; 2136 struct dmub_cmd_lsdma_data *lsdma_data = &cmd.lsdma.lsdma_data; 2137 bool result; 2138 2139 memset(&cmd, 0, sizeof(cmd)); 2140 2141 cmd.cmd_common.header.type = DMUB_CMD__LSDMA; 2142 cmd.cmd_common.header.sub_type = DMUB_CMD__LSDMA_LINEAR_COPY; 2143 wait_type = DM_DMUB_WAIT_TYPE_NO_WAIT; 2144 2145 lsdma_data->u.linear_copy_data.count = count - 1; // LSDMA controller expects bytes to copy -1 2146 lsdma_data->u.linear_copy_data.src_lo = src_addr & 0xFFFFFFFF; 2147 lsdma_data->u.linear_copy_data.src_hi = (src_addr >> 32) & 0xFFFFFFFF; 2148 lsdma_data->u.linear_copy_data.dst_lo = dst_addr & 0xFFFFFFFF; 2149 lsdma_data->u.linear_copy_data.dst_hi = (dst_addr >> 32) & 0xFFFFFFFF; 2150 2151 result = dc_wake_and_execute_dmub_cmd(dc_ctx, &cmd, wait_type); 2152 2153 if (!result) 2154 DC_ERROR("LSDMA Linear Copy failed in DMUB"); 2155 2156 return result; 2157 } 2158 2159 bool dmub_lsdma_send_linear_sub_window_copy_command( 2160 struct dc_dmub_srv *dc_dmub_srv, 2161 struct lsdma_linear_sub_window_copy_params copy_data 2162 ) 2163 { 2164 struct dc_context *dc_ctx = dc_dmub_srv->ctx; 2165 union dmub_rb_cmd cmd; 2166 enum dm_dmub_wait_type wait_type; 2167 struct dmub_cmd_lsdma_data *lsdma_data = &cmd.lsdma.lsdma_data; 2168 bool result; 2169 2170 memset(&cmd, 0, sizeof(cmd)); 2171 2172 cmd.cmd_common.header.type = DMUB_CMD__LSDMA; 2173 cmd.cmd_common.header.sub_type = DMUB_CMD__LSDMA_LINEAR_SUB_WINDOW_COPY; 2174 wait_type = DM_DMUB_WAIT_TYPE_NO_WAIT; 2175 2176 lsdma_data->u.linear_sub_window_copy_data.tmz = copy_data.tmz; 2177 lsdma_data->u.linear_sub_window_copy_data.element_size = copy_data.element_size; 2178 lsdma_data->u.linear_sub_window_copy_data.src_lo = copy_data.src_lo; 2179 lsdma_data->u.linear_sub_window_copy_data.src_hi = copy_data.src_hi; 2180 lsdma_data->u.linear_sub_window_copy_data.src_x = copy_data.src_x; 2181 lsdma_data->u.linear_sub_window_copy_data.src_y = copy_data.src_y; 2182 lsdma_data->u.linear_sub_window_copy_data.src_pitch = copy_data.src_pitch; 2183 lsdma_data->u.linear_sub_window_copy_data.src_slice_pitch = copy_data.src_slice_pitch; 2184 lsdma_data->u.linear_sub_window_copy_data.dst_lo = copy_data.dst_lo; 2185 lsdma_data->u.linear_sub_window_copy_data.dst_hi = copy_data.dst_hi; 2186 lsdma_data->u.linear_sub_window_copy_data.dst_x = copy_data.dst_x; 2187 lsdma_data->u.linear_sub_window_copy_data.dst_y = copy_data.dst_y; 2188 lsdma_data->u.linear_sub_window_copy_data.dst_pitch = copy_data.dst_pitch; 2189 lsdma_data->u.linear_sub_window_copy_data.dst_slice_pitch = copy_data.dst_slice_pitch; 2190 lsdma_data->u.linear_sub_window_copy_data.rect_x = copy_data.rect_x; 2191 lsdma_data->u.linear_sub_window_copy_data.rect_y = copy_data.rect_y; 2192 lsdma_data->u.linear_sub_window_copy_data.src_cache_policy = copy_data.src_cache_policy; 2193 lsdma_data->u.linear_sub_window_copy_data.dst_cache_policy = copy_data.dst_cache_policy; 2194 2195 result = dc_wake_and_execute_dmub_cmd(dc_ctx, &cmd, wait_type); 2196 2197 if (!result) 2198 DC_ERROR("LSDMA Linear Sub Window Copy failed in DMUB"); 2199 2200 return result; 2201 } 2202 2203 bool dmub_lsdma_send_tiled_to_tiled_copy_command( 2204 struct dc_dmub_srv *dc_dmub_srv, 2205 struct lsdma_send_tiled_to_tiled_copy_command_params params 2206 ) 2207 { 2208 struct dc_context *dc_ctx = dc_dmub_srv->ctx; 2209 union dmub_rb_cmd cmd; 2210 enum dm_dmub_wait_type wait_type; 2211 struct dmub_cmd_lsdma_data *lsdma_data = &cmd.lsdma.lsdma_data; 2212 bool result; 2213 2214 memset(&cmd, 0, sizeof(cmd)); 2215 2216 cmd.cmd_common.header.type = DMUB_CMD__LSDMA; 2217 cmd.cmd_common.header.sub_type = DMUB_CMD__LSDMA_TILED_TO_TILED_COPY; 2218 wait_type = DM_DMUB_WAIT_TYPE_NO_WAIT; 2219 2220 lsdma_data->u.tiled_copy_data.src_addr_lo = params.src_addr & 0xFFFFFFFF; 2221 lsdma_data->u.tiled_copy_data.src_addr_hi = (params.src_addr >> 32) & 0xFFFFFFFF; 2222 lsdma_data->u.tiled_copy_data.dst_addr_lo = params.dst_addr & 0xFFFFFFFF; 2223 lsdma_data->u.tiled_copy_data.dst_addr_hi = (params.dst_addr >> 32) & 0xFFFFFFFF; 2224 lsdma_data->u.tiled_copy_data.src_x = params.src_x; 2225 lsdma_data->u.tiled_copy_data.src_y = params.src_y; 2226 lsdma_data->u.tiled_copy_data.dst_x = params.dst_x; 2227 lsdma_data->u.tiled_copy_data.dst_y = params.dst_y; 2228 lsdma_data->u.tiled_copy_data.src_width = params.src_width; 2229 lsdma_data->u.tiled_copy_data.dst_width = params.dst_width; 2230 lsdma_data->u.tiled_copy_data.src_swizzle_mode = params.swizzle_mode; 2231 lsdma_data->u.tiled_copy_data.dst_swizzle_mode = params.swizzle_mode; 2232 lsdma_data->u.tiled_copy_data.src_element_size = params.element_size; 2233 lsdma_data->u.tiled_copy_data.dst_element_size = params.element_size; 2234 lsdma_data->u.tiled_copy_data.rect_x = params.rect_x; 2235 lsdma_data->u.tiled_copy_data.rect_y = params.rect_y; 2236 lsdma_data->u.tiled_copy_data.dcc = params.dcc; 2237 lsdma_data->u.tiled_copy_data.tmz = params.tmz; 2238 lsdma_data->u.tiled_copy_data.read_compress = params.read_compress; 2239 lsdma_data->u.tiled_copy_data.write_compress = params.write_compress; 2240 lsdma_data->u.tiled_copy_data.src_height = params.src_height; 2241 lsdma_data->u.tiled_copy_data.dst_height = params.dst_height; 2242 lsdma_data->u.tiled_copy_data.data_format = params.data_format; 2243 lsdma_data->u.tiled_copy_data.max_com = params.max_com; 2244 lsdma_data->u.tiled_copy_data.max_uncom = params.max_uncom; 2245 2246 result = dc_wake_and_execute_dmub_cmd(dc_ctx, &cmd, wait_type); 2247 2248 if (!result) 2249 DC_ERROR("LSDMA Tiled to Tiled Copy failed in DMUB"); 2250 2251 return result; 2252 } 2253 2254 bool dmub_lsdma_send_pio_copy_command( 2255 struct dc_dmub_srv *dc_dmub_srv, 2256 uint64_t src_addr, 2257 uint64_t dst_addr, 2258 uint32_t byte_count, 2259 uint32_t overlap_disable 2260 ) 2261 { 2262 struct dc_context *dc_ctx = dc_dmub_srv->ctx; 2263 union dmub_rb_cmd cmd; 2264 enum dm_dmub_wait_type wait_type; 2265 struct dmub_cmd_lsdma_data *lsdma_data = &cmd.lsdma.lsdma_data; 2266 bool result; 2267 2268 memset(&cmd, 0, sizeof(cmd)); 2269 2270 cmd.cmd_common.header.type = DMUB_CMD__LSDMA; 2271 cmd.cmd_common.header.sub_type = DMUB_CMD__LSDMA_PIO_COPY; 2272 wait_type = DM_DMUB_WAIT_TYPE_NO_WAIT; 2273 2274 lsdma_data->u.pio_copy_data.packet.fields.byte_count = byte_count; 2275 lsdma_data->u.pio_copy_data.packet.fields.overlap_disable = overlap_disable; 2276 lsdma_data->u.pio_copy_data.src_lo = src_addr & 0xFFFFFFFF; 2277 lsdma_data->u.pio_copy_data.src_hi = (src_addr >> 32) & 0xFFFFFFFF; 2278 lsdma_data->u.pio_copy_data.dst_lo = dst_addr & 0xFFFFFFFF; 2279 lsdma_data->u.pio_copy_data.dst_hi = (dst_addr >> 32) & 0xFFFFFFFF; 2280 2281 result = dc_wake_and_execute_dmub_cmd(dc_ctx, &cmd, wait_type); 2282 2283 if (!result) 2284 DC_ERROR("LSDMA PIO Copy failed in DMUB"); 2285 2286 return result; 2287 } 2288 2289 bool dmub_lsdma_send_pio_constfill_command( 2290 struct dc_dmub_srv *dc_dmub_srv, 2291 uint64_t dst_addr, 2292 uint32_t byte_count, 2293 uint32_t data 2294 ) 2295 { 2296 struct dc_context *dc_ctx = dc_dmub_srv->ctx; 2297 union dmub_rb_cmd cmd; 2298 enum dm_dmub_wait_type wait_type; 2299 struct dmub_cmd_lsdma_data *lsdma_data = &cmd.lsdma.lsdma_data; 2300 bool result; 2301 2302 memset(&cmd, 0, sizeof(cmd)); 2303 2304 cmd.cmd_common.header.type = DMUB_CMD__LSDMA; 2305 cmd.cmd_common.header.sub_type = DMUB_CMD__LSDMA_PIO_CONSTFILL; 2306 wait_type = DM_DMUB_WAIT_TYPE_NO_WAIT; 2307 2308 lsdma_data->u.pio_constfill_data.packet.fields.constant_fill = 1; 2309 lsdma_data->u.pio_constfill_data.packet.fields.byte_count = byte_count; 2310 lsdma_data->u.pio_constfill_data.dst_lo = dst_addr & 0xFFFFFFFF; 2311 lsdma_data->u.pio_constfill_data.dst_hi = (dst_addr >> 32) & 0xFFFFFFFF; 2312 lsdma_data->u.pio_constfill_data.data = data; 2313 2314 result = dc_wake_and_execute_dmub_cmd(dc_ctx, &cmd, wait_type); 2315 2316 if (!result) 2317 DC_ERROR("LSDMA PIO Constfill failed in DMUB"); 2318 2319 return result; 2320 } 2321 2322 bool dmub_lsdma_send_poll_reg_write_command(struct dc_dmub_srv *dc_dmub_srv, uint32_t reg_addr, uint32_t reg_data) 2323 { 2324 struct dc_context *dc_ctx = dc_dmub_srv->ctx; 2325 union dmub_rb_cmd cmd; 2326 enum dm_dmub_wait_type wait_type; 2327 struct dmub_cmd_lsdma_data *lsdma_data = &cmd.lsdma.lsdma_data; 2328 bool result; 2329 2330 memset(&cmd, 0, sizeof(cmd)); 2331 2332 cmd.cmd_common.header.type = DMUB_CMD__LSDMA; 2333 cmd.cmd_common.header.sub_type = DMUB_CMD__LSDMA_POLL_REG_WRITE; 2334 wait_type = DM_DMUB_WAIT_TYPE_NO_WAIT; 2335 2336 lsdma_data->u.reg_write_data.reg_addr = reg_addr; 2337 lsdma_data->u.reg_write_data.reg_data = reg_data; 2338 2339 result = dc_wake_and_execute_dmub_cmd(dc_ctx, &cmd, wait_type); 2340 2341 if (!result) 2342 DC_ERROR("LSDMA Poll Reg failed in DMUB"); 2343 2344 return result; 2345 } 2346 2347 bool dc_dmub_srv_is_cursor_offload_enabled(const struct dc *dc) 2348 { 2349 return dc->ctx->dmub_srv && dc->ctx->dmub_srv->cursor_offload_enabled; 2350 } 2351 2352 void dc_dmub_srv_release_hw(const struct dc *dc) 2353 { 2354 struct dc_dmub_srv *dc_dmub_srv = dc->ctx->dmub_srv; 2355 union dmub_rb_cmd cmd = {0}; 2356 2357 if (!dc_dmub_srv || !dc_dmub_srv->dmub) 2358 return; 2359 2360 memset(&cmd, 0, sizeof(cmd)); 2361 cmd.idle_opt_notify_idle.header.type = DMUB_CMD__IDLE_OPT; 2362 cmd.idle_opt_notify_idle.header.sub_type = DMUB_CMD__IDLE_OPT_RELEASE_HW; 2363 cmd.idle_opt_notify_idle.header.payload_bytes = 2364 sizeof(cmd.idle_opt_notify_idle) - 2365 sizeof(cmd.idle_opt_notify_idle.header); 2366 2367 dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); 2368 } 2369 2370 void dc_dmub_srv_log_preos_dmcub_info(struct dc_dmub_srv *dc_dmub_srv) 2371 { 2372 struct dmub_srv *dmub; 2373 2374 if (!dc_dmub_srv || !dc_dmub_srv->dmub) 2375 return; 2376 2377 dmub = dc_dmub_srv->dmub; 2378 2379 if (dmub_srv_get_preos_info(dmub)) { 2380 DC_LOG_DEBUG("%s: PreOS DMCUB Info", __func__); 2381 DC_LOG_DEBUG("fw_version : 0x%08x", dmub->preos_info.fw_version); 2382 DC_LOG_DEBUG("boot_options : 0x%08x", dmub->preos_info.boot_options); 2383 DC_LOG_DEBUG("boot_status : 0x%08x", dmub->preos_info.boot_status); 2384 DC_LOG_DEBUG("trace_buffer_phy_addr : 0x%016llx", dmub->preos_info.trace_buffer_phy_addr); 2385 DC_LOG_DEBUG("trace_buffer_size_bytes : 0x%08x", dmub->preos_info.trace_buffer_size); 2386 DC_LOG_DEBUG("fb_base : 0x%016llx", dmub->preos_info.fb_base); 2387 DC_LOG_DEBUG("fb_offset : 0x%016llx", dmub->preos_info.fb_offset); 2388 } 2389 } 2390