1 /* 2 * Copyright 2019 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: AMD 23 * 24 */ 25 26 #include "dm_services.h" 27 #include "dc.h" 28 #include "dc_dmub_srv.h" 29 #include "../dmub/dmub_srv.h" 30 #include "dm_helpers.h" 31 #include "dc_hw_types.h" 32 #include "core_types.h" 33 #include "../basics/conversion.h" 34 #include "cursor_reg_cache.h" 35 #include "resource.h" 36 #include "clk_mgr.h" 37 #include "dc_state_priv.h" 38 #include "dc_plane_priv.h" 39 40 #define CTX dc_dmub_srv->ctx 41 #define DC_LOGGER CTX->logger 42 43 static void dc_dmub_srv_construct(struct dc_dmub_srv *dc_srv, struct dc *dc, 44 struct dmub_srv *dmub) 45 { 46 dc_srv->dmub = dmub; 47 dc_srv->ctx = dc->ctx; 48 } 49 50 struct dc_dmub_srv *dc_dmub_srv_create(struct dc *dc, struct dmub_srv *dmub) 51 { 52 struct dc_dmub_srv *dc_srv = 53 kzalloc(sizeof(struct dc_dmub_srv), GFP_KERNEL); 54 55 if (dc_srv == NULL) { 56 BREAK_TO_DEBUGGER(); 57 return NULL; 58 } 59 60 dc_dmub_srv_construct(dc_srv, dc, dmub); 61 62 return dc_srv; 63 } 64 65 void dc_dmub_srv_destroy(struct dc_dmub_srv **dmub_srv) 66 { 67 if (*dmub_srv) { 68 kfree(*dmub_srv); 69 *dmub_srv = NULL; 70 } 71 } 72 73 void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv) 74 { 75 struct dmub_srv *dmub = dc_dmub_srv->dmub; 76 struct dc_context *dc_ctx = dc_dmub_srv->ctx; 77 enum dmub_status status; 78 79 do { 80 status = dmub_srv_wait_for_idle(dmub, 100000); 81 } while (dc_dmub_srv->ctx->dc->debug.disable_timeout && status != DMUB_STATUS_OK); 82 83 if (status != DMUB_STATUS_OK) { 84 DC_ERROR("Error waiting for DMUB idle: status=%d\n", status); 85 dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); 86 } 87 } 88 89 void dc_dmub_srv_clear_inbox0_ack(struct dc_dmub_srv *dc_dmub_srv) 90 { 91 struct dmub_srv *dmub = dc_dmub_srv->dmub; 92 struct dc_context *dc_ctx = dc_dmub_srv->ctx; 93 enum dmub_status status = DMUB_STATUS_OK; 94 95 status = dmub_srv_clear_inbox0_ack(dmub); 96 if (status != DMUB_STATUS_OK) { 97 DC_ERROR("Error clearing INBOX0 ack: status=%d\n", status); 98 dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); 99 } 100 } 101 102 void dc_dmub_srv_wait_for_inbox0_ack(struct dc_dmub_srv *dc_dmub_srv) 103 { 104 struct dmub_srv *dmub = dc_dmub_srv->dmub; 105 struct dc_context *dc_ctx = dc_dmub_srv->ctx; 106 enum dmub_status status = DMUB_STATUS_OK; 107 108 status = dmub_srv_wait_for_inbox0_ack(dmub, 100000); 109 if (status != DMUB_STATUS_OK) { 110 DC_ERROR("Error waiting for INBOX0 HW Lock Ack\n"); 111 dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); 112 } 113 } 114 115 void dc_dmub_srv_send_inbox0_cmd(struct dc_dmub_srv *dc_dmub_srv, 116 union dmub_inbox0_data_register data) 117 { 118 struct dmub_srv *dmub = dc_dmub_srv->dmub; 119 struct dc_context *dc_ctx = dc_dmub_srv->ctx; 120 enum dmub_status status = DMUB_STATUS_OK; 121 122 status = dmub_srv_send_inbox0_cmd(dmub, data); 123 if (status != DMUB_STATUS_OK) { 124 DC_ERROR("Error sending INBOX0 cmd\n"); 125 dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); 126 } 127 } 128 129 bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv, 130 unsigned int count, 131 union dmub_rb_cmd *cmd_list) 132 { 133 struct dc_context *dc_ctx; 134 struct dmub_srv *dmub; 135 enum dmub_status status; 136 int i; 137 138 if (!dc_dmub_srv || !dc_dmub_srv->dmub) 139 return false; 140 141 dc_ctx = dc_dmub_srv->ctx; 142 dmub = dc_dmub_srv->dmub; 143 144 for (i = 0 ; i < count; i++) { 145 // Queue command 146 status = dmub_srv_cmd_queue(dmub, &cmd_list[i]); 147 148 if (status == DMUB_STATUS_QUEUE_FULL) { 149 /* Execute and wait for queue to become empty again. */ 150 status = dmub_srv_cmd_execute(dmub); 151 if (status == DMUB_STATUS_POWER_STATE_D3) 152 return false; 153 154 do { 155 status = dmub_srv_wait_for_idle(dmub, 100000); 156 } while (dc_dmub_srv->ctx->dc->debug.disable_timeout && status != DMUB_STATUS_OK); 157 158 /* Requeue the command. */ 159 status = dmub_srv_cmd_queue(dmub, &cmd_list[i]); 160 } 161 162 if (status != DMUB_STATUS_OK) { 163 if (status != DMUB_STATUS_POWER_STATE_D3) { 164 DC_ERROR("Error queueing DMUB command: status=%d\n", status); 165 dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); 166 } 167 return false; 168 } 169 } 170 171 status = dmub_srv_cmd_execute(dmub); 172 if (status != DMUB_STATUS_OK) { 173 if (status != DMUB_STATUS_POWER_STATE_D3) { 174 DC_ERROR("Error starting DMUB execution: status=%d\n", status); 175 dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); 176 } 177 return false; 178 } 179 180 return true; 181 } 182 183 bool dc_dmub_srv_wait_for_idle(struct dc_dmub_srv *dc_dmub_srv, 184 enum dm_dmub_wait_type wait_type, 185 union dmub_rb_cmd *cmd_list) 186 { 187 struct dmub_srv *dmub; 188 enum dmub_status status; 189 190 if (!dc_dmub_srv || !dc_dmub_srv->dmub) 191 return false; 192 193 dmub = dc_dmub_srv->dmub; 194 195 // Wait for DMUB to process command 196 if (wait_type != DM_DMUB_WAIT_TYPE_NO_WAIT) { 197 do { 198 status = dmub_srv_wait_for_idle(dmub, 100000); 199 } while (dc_dmub_srv->ctx->dc->debug.disable_timeout && status != DMUB_STATUS_OK); 200 201 if (status != DMUB_STATUS_OK) { 202 DC_LOG_DEBUG("No reply for DMUB command: status=%d\n", status); 203 if (!dmub->debug.timeout_occured) { 204 dmub->debug.timeout_occured = true; 205 dmub->debug.timeout_cmd = *cmd_list; 206 dmub->debug.timestamp = dm_get_timestamp(dc_dmub_srv->ctx); 207 } 208 dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); 209 return false; 210 } 211 212 // Copy data back from ring buffer into command 213 if (wait_type == DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) 214 dmub_rb_get_return_data(&dmub->inbox1_rb, cmd_list); 215 } 216 217 return true; 218 } 219 220 bool dc_dmub_srv_cmd_run(struct dc_dmub_srv *dc_dmub_srv, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type) 221 { 222 return dc_dmub_srv_cmd_run_list(dc_dmub_srv, 1, cmd, wait_type); 223 } 224 225 bool dc_dmub_srv_cmd_run_list(struct dc_dmub_srv *dc_dmub_srv, unsigned int count, union dmub_rb_cmd *cmd_list, enum dm_dmub_wait_type wait_type) 226 { 227 struct dc_context *dc_ctx; 228 struct dmub_srv *dmub; 229 enum dmub_status status; 230 int i; 231 232 if (!dc_dmub_srv || !dc_dmub_srv->dmub) 233 return false; 234 235 dc_ctx = dc_dmub_srv->ctx; 236 dmub = dc_dmub_srv->dmub; 237 238 for (i = 0 ; i < count; i++) { 239 // Queue command 240 status = dmub_srv_cmd_queue(dmub, &cmd_list[i]); 241 242 if (status == DMUB_STATUS_QUEUE_FULL) { 243 /* Execute and wait for queue to become empty again. */ 244 status = dmub_srv_cmd_execute(dmub); 245 if (status == DMUB_STATUS_POWER_STATE_D3) 246 return false; 247 248 status = dmub_srv_wait_for_idle(dmub, 100000); 249 if (status != DMUB_STATUS_OK) 250 return false; 251 252 /* Requeue the command. */ 253 status = dmub_srv_cmd_queue(dmub, &cmd_list[i]); 254 } 255 256 if (status != DMUB_STATUS_OK) { 257 if (status != DMUB_STATUS_POWER_STATE_D3) { 258 DC_ERROR("Error queueing DMUB command: status=%d\n", status); 259 dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); 260 } 261 return false; 262 } 263 } 264 265 status = dmub_srv_cmd_execute(dmub); 266 if (status != DMUB_STATUS_OK) { 267 if (status != DMUB_STATUS_POWER_STATE_D3) { 268 DC_ERROR("Error starting DMUB execution: status=%d\n", status); 269 dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); 270 } 271 return false; 272 } 273 274 // Wait for DMUB to process command 275 if (wait_type != DM_DMUB_WAIT_TYPE_NO_WAIT) { 276 if (dc_dmub_srv->ctx->dc->debug.disable_timeout) { 277 do { 278 status = dmub_srv_wait_for_idle(dmub, 100000); 279 } while (status != DMUB_STATUS_OK); 280 } else 281 status = dmub_srv_wait_for_idle(dmub, 100000); 282 283 if (status != DMUB_STATUS_OK) { 284 DC_LOG_DEBUG("No reply for DMUB command: status=%d\n", status); 285 dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); 286 return false; 287 } 288 289 // Copy data back from ring buffer into command 290 if (wait_type == DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) 291 dmub_rb_get_return_data(&dmub->inbox1_rb, cmd_list); 292 } 293 294 return true; 295 } 296 297 bool dc_dmub_srv_optimized_init_done(struct dc_dmub_srv *dc_dmub_srv) 298 { 299 struct dmub_srv *dmub; 300 struct dc_context *dc_ctx; 301 union dmub_fw_boot_status boot_status; 302 enum dmub_status status; 303 304 if (!dc_dmub_srv || !dc_dmub_srv->dmub) 305 return false; 306 307 dmub = dc_dmub_srv->dmub; 308 dc_ctx = dc_dmub_srv->ctx; 309 310 status = dmub_srv_get_fw_boot_status(dmub, &boot_status); 311 if (status != DMUB_STATUS_OK) { 312 DC_ERROR("Error querying DMUB boot status: error=%d\n", status); 313 return false; 314 } 315 316 return boot_status.bits.optimized_init_done; 317 } 318 319 bool dc_dmub_srv_notify_stream_mask(struct dc_dmub_srv *dc_dmub_srv, 320 unsigned int stream_mask) 321 { 322 if (!dc_dmub_srv || !dc_dmub_srv->dmub) 323 return false; 324 325 return dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__IDLE_OPT_NOTIFY_STREAM_MASK, 326 stream_mask, NULL, DM_DMUB_WAIT_TYPE_WAIT); 327 } 328 329 bool dc_dmub_srv_is_restore_required(struct dc_dmub_srv *dc_dmub_srv) 330 { 331 struct dmub_srv *dmub; 332 struct dc_context *dc_ctx; 333 union dmub_fw_boot_status boot_status; 334 enum dmub_status status; 335 336 if (!dc_dmub_srv || !dc_dmub_srv->dmub) 337 return false; 338 339 dmub = dc_dmub_srv->dmub; 340 dc_ctx = dc_dmub_srv->ctx; 341 342 status = dmub_srv_get_fw_boot_status(dmub, &boot_status); 343 if (status != DMUB_STATUS_OK) { 344 DC_ERROR("Error querying DMUB boot status: error=%d\n", status); 345 return false; 346 } 347 348 return boot_status.bits.restore_required; 349 } 350 351 bool dc_dmub_srv_get_dmub_outbox0_msg(const struct dc *dc, struct dmcub_trace_buf_entry *entry) 352 { 353 struct dmub_srv *dmub = dc->ctx->dmub_srv->dmub; 354 return dmub_srv_get_outbox0_msg(dmub, entry); 355 } 356 357 void dc_dmub_trace_event_control(struct dc *dc, bool enable) 358 { 359 dm_helpers_dmub_outbox_interrupt_control(dc->ctx, enable); 360 } 361 362 void dc_dmub_srv_drr_update_cmd(struct dc *dc, uint32_t tg_inst, uint32_t vtotal_min, uint32_t vtotal_max) 363 { 364 union dmub_rb_cmd cmd = { 0 }; 365 366 cmd.drr_update.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH; 367 cmd.drr_update.header.sub_type = DMUB_CMD__FAMS_DRR_UPDATE; 368 cmd.drr_update.dmub_optc_state_req.v_total_max = vtotal_max; 369 cmd.drr_update.dmub_optc_state_req.v_total_min = vtotal_min; 370 cmd.drr_update.dmub_optc_state_req.tg_inst = tg_inst; 371 372 cmd.drr_update.header.payload_bytes = sizeof(cmd.drr_update) - sizeof(cmd.drr_update.header); 373 374 // Send the command to the DMCUB. 375 dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); 376 } 377 378 void dc_dmub_srv_set_drr_manual_trigger_cmd(struct dc *dc, uint32_t tg_inst) 379 { 380 union dmub_rb_cmd cmd = { 0 }; 381 382 cmd.drr_update.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH; 383 cmd.drr_update.header.sub_type = DMUB_CMD__FAMS_SET_MANUAL_TRIGGER; 384 cmd.drr_update.dmub_optc_state_req.tg_inst = tg_inst; 385 386 cmd.drr_update.header.payload_bytes = sizeof(cmd.drr_update) - sizeof(cmd.drr_update.header); 387 388 // Send the command to the DMCUB. 389 dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); 390 } 391 392 static uint8_t dc_dmub_srv_get_pipes_for_stream(struct dc *dc, struct dc_stream_state *stream) 393 { 394 uint8_t pipes = 0; 395 int i = 0; 396 397 for (i = 0; i < MAX_PIPES; i++) { 398 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 399 400 if (pipe->stream == stream && pipe->stream_res.tg) 401 pipes = i; 402 } 403 return pipes; 404 } 405 406 static void dc_dmub_srv_populate_fams_pipe_info(struct dc *dc, struct dc_state *context, 407 struct pipe_ctx *head_pipe, 408 struct dmub_cmd_fw_assisted_mclk_switch_pipe_data *fams_pipe_data) 409 { 410 int j; 411 int pipe_idx = 0; 412 413 fams_pipe_data->pipe_index[pipe_idx++] = head_pipe->plane_res.hubp->inst; 414 for (j = 0; j < dc->res_pool->pipe_count; j++) { 415 struct pipe_ctx *split_pipe = &context->res_ctx.pipe_ctx[j]; 416 417 if (split_pipe->stream == head_pipe->stream && (split_pipe->top_pipe || split_pipe->prev_odm_pipe)) { 418 fams_pipe_data->pipe_index[pipe_idx++] = split_pipe->plane_res.hubp->inst; 419 } 420 } 421 fams_pipe_data->pipe_count = pipe_idx; 422 } 423 424 bool dc_dmub_srv_p_state_delegate(struct dc *dc, bool should_manage_pstate, struct dc_state *context) 425 { 426 union dmub_rb_cmd cmd = { 0 }; 427 struct dmub_cmd_fw_assisted_mclk_switch_config *config_data = &cmd.fw_assisted_mclk_switch.config_data; 428 int i = 0, k = 0; 429 int ramp_up_num_steps = 1; // TODO: Ramp is currently disabled. Reenable it. 430 uint8_t visual_confirm_enabled; 431 int pipe_idx = 0; 432 struct dc_stream_status *stream_status = NULL; 433 434 if (dc == NULL) 435 return false; 436 437 visual_confirm_enabled = dc->debug.visual_confirm == VISUAL_CONFIRM_FAMS; 438 439 // Format command. 440 cmd.fw_assisted_mclk_switch.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH; 441 cmd.fw_assisted_mclk_switch.header.sub_type = DMUB_CMD__FAMS_SETUP_FW_CTRL; 442 cmd.fw_assisted_mclk_switch.config_data.fams_enabled = should_manage_pstate; 443 cmd.fw_assisted_mclk_switch.config_data.visual_confirm_enabled = visual_confirm_enabled; 444 445 if (should_manage_pstate) { 446 for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { 447 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 448 449 if (!pipe->stream) 450 continue; 451 452 /* If FAMS is being used to support P-State and there is a stream 453 * that does not use FAMS, we are in an FPO + VActive scenario. 454 * Assign vactive stretch margin in this case. 455 */ 456 stream_status = dc_state_get_stream_status(context, pipe->stream); 457 if (stream_status && !stream_status->fpo_in_use) { 458 cmd.fw_assisted_mclk_switch.config_data.vactive_stretch_margin_us = dc->debug.fpo_vactive_margin_us; 459 break; 460 } 461 pipe_idx++; 462 } 463 } 464 465 for (i = 0, k = 0; context && i < dc->res_pool->pipe_count; i++) { 466 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 467 468 if (!resource_is_pipe_type(pipe, OTG_MASTER)) 469 continue; 470 471 stream_status = dc_state_get_stream_status(context, pipe->stream); 472 if (stream_status && stream_status->fpo_in_use) { 473 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 474 uint8_t min_refresh_in_hz = (pipe->stream->timing.min_refresh_in_uhz + 999999) / 1000000; 475 476 config_data->pipe_data[k].pix_clk_100hz = pipe->stream->timing.pix_clk_100hz; 477 config_data->pipe_data[k].min_refresh_in_hz = min_refresh_in_hz; 478 config_data->pipe_data[k].max_ramp_step = ramp_up_num_steps; 479 config_data->pipe_data[k].pipes = dc_dmub_srv_get_pipes_for_stream(dc, pipe->stream); 480 dc_dmub_srv_populate_fams_pipe_info(dc, context, pipe, &config_data->pipe_data[k]); 481 k++; 482 } 483 } 484 cmd.fw_assisted_mclk_switch.header.payload_bytes = 485 sizeof(cmd.fw_assisted_mclk_switch) - sizeof(cmd.fw_assisted_mclk_switch.header); 486 487 // Send the command to the DMCUB. 488 dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); 489 490 return true; 491 } 492 493 void dc_dmub_srv_query_caps_cmd(struct dc_dmub_srv *dc_dmub_srv) 494 { 495 union dmub_rb_cmd cmd = { 0 }; 496 497 if (dc_dmub_srv->ctx->dc->debug.dmcub_emulation) 498 return; 499 500 memset(&cmd, 0, sizeof(cmd)); 501 502 /* Prepare fw command */ 503 cmd.query_feature_caps.header.type = DMUB_CMD__QUERY_FEATURE_CAPS; 504 cmd.query_feature_caps.header.sub_type = 0; 505 cmd.query_feature_caps.header.ret_status = 1; 506 cmd.query_feature_caps.header.payload_bytes = sizeof(struct dmub_cmd_query_feature_caps_data); 507 508 /* If command was processed, copy feature caps to dmub srv */ 509 if (dc_wake_and_execute_dmub_cmd(dc_dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) && 510 cmd.query_feature_caps.header.ret_status == 0) { 511 memcpy(&dc_dmub_srv->dmub->feature_caps, 512 &cmd.query_feature_caps.query_feature_caps_data, 513 sizeof(struct dmub_feature_caps)); 514 } 515 } 516 517 void dc_dmub_srv_get_visual_confirm_color_cmd(struct dc *dc, struct pipe_ctx *pipe_ctx) 518 { 519 union dmub_rb_cmd cmd = { 0 }; 520 unsigned int panel_inst = 0; 521 522 if (!dc_get_edp_link_panel_inst(dc, pipe_ctx->stream->link, &panel_inst)) 523 return; 524 525 memset(&cmd, 0, sizeof(cmd)); 526 527 // Prepare fw command 528 cmd.visual_confirm_color.header.type = DMUB_CMD__GET_VISUAL_CONFIRM_COLOR; 529 cmd.visual_confirm_color.header.sub_type = 0; 530 cmd.visual_confirm_color.header.ret_status = 1; 531 cmd.visual_confirm_color.header.payload_bytes = sizeof(struct dmub_cmd_visual_confirm_color_data); 532 cmd.visual_confirm_color.visual_confirm_color_data.visual_confirm_color.panel_inst = panel_inst; 533 534 // If command was processed, copy feature caps to dmub srv 535 if (dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) && 536 cmd.visual_confirm_color.header.ret_status == 0) { 537 memcpy(&dc->ctx->dmub_srv->dmub->visual_confirm_color, 538 &cmd.visual_confirm_color.visual_confirm_color_data, 539 sizeof(struct dmub_visual_confirm_color)); 540 } 541 } 542 543 /** 544 * populate_subvp_cmd_drr_info - Helper to populate DRR pipe info for the DMCUB subvp command 545 * 546 * @dc: [in] pointer to dc object 547 * @subvp_pipe: [in] pipe_ctx for the SubVP pipe 548 * @vblank_pipe: [in] pipe_ctx for the DRR pipe 549 * @pipe_data: [in] Pipe data which stores the VBLANK/DRR info 550 * @context: [in] DC state for access to phantom stream 551 * 552 * Populate the DMCUB SubVP command with DRR pipe info. All the information 553 * required for calculating the SubVP + DRR microschedule is populated here. 554 * 555 * High level algorithm: 556 * 1. Get timing for SubVP pipe, phantom pipe, and DRR pipe 557 * 2. Calculate the min and max vtotal which supports SubVP + DRR microschedule 558 * 3. Populate the drr_info with the min and max supported vtotal values 559 */ 560 static void populate_subvp_cmd_drr_info(struct dc *dc, 561 struct dc_state *context, 562 struct pipe_ctx *subvp_pipe, 563 struct pipe_ctx *vblank_pipe, 564 struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 *pipe_data) 565 { 566 struct dc_stream_state *phantom_stream = dc_state_get_paired_subvp_stream(context, subvp_pipe->stream); 567 struct dc_crtc_timing *main_timing = &subvp_pipe->stream->timing; 568 struct dc_crtc_timing *phantom_timing; 569 struct dc_crtc_timing *drr_timing = &vblank_pipe->stream->timing; 570 uint16_t drr_frame_us = 0; 571 uint16_t min_drr_supported_us = 0; 572 uint16_t max_drr_supported_us = 0; 573 uint16_t max_drr_vblank_us = 0; 574 uint16_t max_drr_mallregion_us = 0; 575 uint16_t mall_region_us = 0; 576 uint16_t prefetch_us = 0; 577 uint16_t subvp_active_us = 0; 578 uint16_t drr_active_us = 0; 579 uint16_t min_vtotal_supported = 0; 580 uint16_t max_vtotal_supported = 0; 581 582 if (!phantom_stream) 583 return; 584 585 phantom_timing = &phantom_stream->timing; 586 587 pipe_data->pipe_config.vblank_data.drr_info.drr_in_use = true; 588 pipe_data->pipe_config.vblank_data.drr_info.use_ramping = false; // for now don't use ramping 589 pipe_data->pipe_config.vblank_data.drr_info.drr_window_size_ms = 4; // hardcode 4ms DRR window for now 590 591 drr_frame_us = div64_u64(((uint64_t)drr_timing->v_total * drr_timing->h_total * 1000000), 592 (((uint64_t)drr_timing->pix_clk_100hz * 100))); 593 // P-State allow width and FW delays already included phantom_timing->v_addressable 594 mall_region_us = div64_u64(((uint64_t)phantom_timing->v_addressable * phantom_timing->h_total * 1000000), 595 (((uint64_t)phantom_timing->pix_clk_100hz * 100))); 596 min_drr_supported_us = drr_frame_us + mall_region_us + SUBVP_DRR_MARGIN_US; 597 min_vtotal_supported = div64_u64(((uint64_t)drr_timing->pix_clk_100hz * 100 * min_drr_supported_us), 598 (((uint64_t)drr_timing->h_total * 1000000))); 599 600 prefetch_us = div64_u64(((uint64_t)(phantom_timing->v_total - phantom_timing->v_front_porch) * phantom_timing->h_total * 1000000), 601 (((uint64_t)phantom_timing->pix_clk_100hz * 100) + dc->caps.subvp_prefetch_end_to_mall_start_us)); 602 subvp_active_us = div64_u64(((uint64_t)main_timing->v_addressable * main_timing->h_total * 1000000), 603 (((uint64_t)main_timing->pix_clk_100hz * 100))); 604 drr_active_us = div64_u64(((uint64_t)drr_timing->v_addressable * drr_timing->h_total * 1000000), 605 (((uint64_t)drr_timing->pix_clk_100hz * 100))); 606 max_drr_vblank_us = div64_u64((subvp_active_us - prefetch_us - 607 dc->caps.subvp_fw_processing_delay_us - drr_active_us), 2) + drr_active_us; 608 max_drr_mallregion_us = subvp_active_us - prefetch_us - mall_region_us - dc->caps.subvp_fw_processing_delay_us; 609 max_drr_supported_us = max_drr_vblank_us > max_drr_mallregion_us ? max_drr_vblank_us : max_drr_mallregion_us; 610 max_vtotal_supported = div64_u64(((uint64_t)drr_timing->pix_clk_100hz * 100 * max_drr_supported_us), 611 (((uint64_t)drr_timing->h_total * 1000000))); 612 613 /* When calculating the max vtotal supported for SubVP + DRR cases, add 614 * margin due to possible rounding errors (being off by 1 line in the 615 * FW calculation can incorrectly push the P-State switch to wait 1 frame 616 * longer). 617 */ 618 max_vtotal_supported = max_vtotal_supported - dc->caps.subvp_drr_max_vblank_margin_us; 619 620 pipe_data->pipe_config.vblank_data.drr_info.min_vtotal_supported = min_vtotal_supported; 621 pipe_data->pipe_config.vblank_data.drr_info.max_vtotal_supported = max_vtotal_supported; 622 pipe_data->pipe_config.vblank_data.drr_info.drr_vblank_start_margin = dc->caps.subvp_drr_vblank_start_margin_us; 623 } 624 625 /** 626 * populate_subvp_cmd_vblank_pipe_info - Helper to populate VBLANK pipe info for the DMUB subvp command 627 * 628 * @dc: [in] current dc state 629 * @context: [in] new dc state 630 * @cmd: [in] DMUB cmd to be populated with SubVP info 631 * @vblank_pipe: [in] pipe_ctx for the VBLANK pipe 632 * @cmd_pipe_index: [in] index for the pipe array in DMCUB SubVP cmd 633 * 634 * Populate the DMCUB SubVP command with VBLANK pipe info. All the information 635 * required to calculate the microschedule for SubVP + VBLANK case is stored in 636 * the pipe_data (subvp_data and vblank_data). Also check if the VBLANK pipe 637 * is a DRR display -- if it is make a call to populate drr_info. 638 */ 639 static void populate_subvp_cmd_vblank_pipe_info(struct dc *dc, 640 struct dc_state *context, 641 union dmub_rb_cmd *cmd, 642 struct pipe_ctx *vblank_pipe, 643 uint8_t cmd_pipe_index) 644 { 645 uint32_t i; 646 struct pipe_ctx *pipe = NULL; 647 struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 *pipe_data = 648 &cmd->fw_assisted_mclk_switch_v2.config_data.pipe_data[cmd_pipe_index]; 649 650 // Find the SubVP pipe 651 for (i = 0; i < dc->res_pool->pipe_count; i++) { 652 pipe = &context->res_ctx.pipe_ctx[i]; 653 654 // We check for master pipe, but it shouldn't matter since we only need 655 // the pipe for timing info (stream should be same for any pipe splits) 656 if (!resource_is_pipe_type(pipe, OTG_MASTER) || 657 !resource_is_pipe_type(pipe, DPP_PIPE)) 658 continue; 659 660 // Find the SubVP pipe 661 if (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN) 662 break; 663 } 664 665 pipe_data->mode = VBLANK; 666 pipe_data->pipe_config.vblank_data.pix_clk_100hz = vblank_pipe->stream->timing.pix_clk_100hz; 667 pipe_data->pipe_config.vblank_data.vblank_start = vblank_pipe->stream->timing.v_total - 668 vblank_pipe->stream->timing.v_front_porch; 669 pipe_data->pipe_config.vblank_data.vtotal = vblank_pipe->stream->timing.v_total; 670 pipe_data->pipe_config.vblank_data.htotal = vblank_pipe->stream->timing.h_total; 671 pipe_data->pipe_config.vblank_data.vblank_pipe_index = vblank_pipe->pipe_idx; 672 pipe_data->pipe_config.vblank_data.vstartup_start = vblank_pipe->pipe_dlg_param.vstartup_start; 673 pipe_data->pipe_config.vblank_data.vblank_end = 674 vblank_pipe->stream->timing.v_total - vblank_pipe->stream->timing.v_front_porch - vblank_pipe->stream->timing.v_addressable; 675 676 if (vblank_pipe->stream->ignore_msa_timing_param && 677 (vblank_pipe->stream->allow_freesync || vblank_pipe->stream->vrr_active_variable || vblank_pipe->stream->vrr_active_fixed)) 678 populate_subvp_cmd_drr_info(dc, context, pipe, vblank_pipe, pipe_data); 679 } 680 681 /** 682 * update_subvp_prefetch_end_to_mall_start - Helper for SubVP + SubVP case 683 * 684 * @dc: [in] current dc state 685 * @context: [in] new dc state 686 * @cmd: [in] DMUB cmd to be populated with SubVP info 687 * @subvp_pipes: [in] Array of SubVP pipes (should always be length 2) 688 * 689 * For SubVP + SubVP, we use a single vertical interrupt to start the 690 * microschedule for both SubVP pipes. In order for this to work correctly, the 691 * MALL REGION of both SubVP pipes must start at the same time. This function 692 * lengthens the prefetch end to mall start delay of the SubVP pipe that has 693 * the shorter prefetch so that both MALL REGION's will start at the same time. 694 */ 695 static void update_subvp_prefetch_end_to_mall_start(struct dc *dc, 696 struct dc_state *context, 697 union dmub_rb_cmd *cmd, 698 struct pipe_ctx *subvp_pipes[]) 699 { 700 uint32_t subvp0_prefetch_us = 0; 701 uint32_t subvp1_prefetch_us = 0; 702 uint32_t prefetch_delta_us = 0; 703 struct dc_stream_state *phantom_stream0 = NULL; 704 struct dc_stream_state *phantom_stream1 = NULL; 705 struct dc_crtc_timing *phantom_timing0 = NULL; 706 struct dc_crtc_timing *phantom_timing1 = NULL; 707 struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 *pipe_data = NULL; 708 709 phantom_stream0 = dc_state_get_paired_subvp_stream(context, subvp_pipes[0]->stream); 710 if (!phantom_stream0) 711 return; 712 713 phantom_stream1 = dc_state_get_paired_subvp_stream(context, subvp_pipes[1]->stream); 714 if (!phantom_stream1) 715 return; 716 717 phantom_timing0 = &phantom_stream0->timing; 718 phantom_timing1 = &phantom_stream1->timing; 719 720 subvp0_prefetch_us = div64_u64(((uint64_t)(phantom_timing0->v_total - phantom_timing0->v_front_porch) * 721 (uint64_t)phantom_timing0->h_total * 1000000), 722 (((uint64_t)phantom_timing0->pix_clk_100hz * 100) + dc->caps.subvp_prefetch_end_to_mall_start_us)); 723 subvp1_prefetch_us = div64_u64(((uint64_t)(phantom_timing1->v_total - phantom_timing1->v_front_porch) * 724 (uint64_t)phantom_timing1->h_total * 1000000), 725 (((uint64_t)phantom_timing1->pix_clk_100hz * 100) + dc->caps.subvp_prefetch_end_to_mall_start_us)); 726 727 // Whichever SubVP PIPE has the smaller prefetch (including the prefetch end to mall start time) 728 // should increase it's prefetch time to match the other 729 if (subvp0_prefetch_us > subvp1_prefetch_us) { 730 pipe_data = &cmd->fw_assisted_mclk_switch_v2.config_data.pipe_data[1]; 731 prefetch_delta_us = subvp0_prefetch_us - subvp1_prefetch_us; 732 pipe_data->pipe_config.subvp_data.prefetch_to_mall_start_lines = 733 div64_u64(((uint64_t)(dc->caps.subvp_prefetch_end_to_mall_start_us + prefetch_delta_us) * 734 ((uint64_t)phantom_timing1->pix_clk_100hz * 100) + ((uint64_t)phantom_timing1->h_total * 1000000 - 1)), 735 ((uint64_t)phantom_timing1->h_total * 1000000)); 736 737 } else if (subvp1_prefetch_us > subvp0_prefetch_us) { 738 pipe_data = &cmd->fw_assisted_mclk_switch_v2.config_data.pipe_data[0]; 739 prefetch_delta_us = subvp1_prefetch_us - subvp0_prefetch_us; 740 pipe_data->pipe_config.subvp_data.prefetch_to_mall_start_lines = 741 div64_u64(((uint64_t)(dc->caps.subvp_prefetch_end_to_mall_start_us + prefetch_delta_us) * 742 ((uint64_t)phantom_timing0->pix_clk_100hz * 100) + ((uint64_t)phantom_timing0->h_total * 1000000 - 1)), 743 ((uint64_t)phantom_timing0->h_total * 1000000)); 744 } 745 } 746 747 /** 748 * populate_subvp_cmd_pipe_info - Helper to populate the SubVP pipe info for the DMUB subvp command 749 * 750 * @dc: [in] current dc state 751 * @context: [in] new dc state 752 * @cmd: [in] DMUB cmd to be populated with SubVP info 753 * @subvp_pipe: [in] pipe_ctx for the SubVP pipe 754 * @cmd_pipe_index: [in] index for the pipe array in DMCUB SubVP cmd 755 * 756 * Populate the DMCUB SubVP command with SubVP pipe info. All the information 757 * required to calculate the microschedule for the SubVP pipe is stored in the 758 * pipe_data of the DMCUB SubVP command. 759 */ 760 static void populate_subvp_cmd_pipe_info(struct dc *dc, 761 struct dc_state *context, 762 union dmub_rb_cmd *cmd, 763 struct pipe_ctx *subvp_pipe, 764 uint8_t cmd_pipe_index) 765 { 766 uint32_t j; 767 struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 *pipe_data = 768 &cmd->fw_assisted_mclk_switch_v2.config_data.pipe_data[cmd_pipe_index]; 769 struct dc_stream_state *phantom_stream = dc_state_get_paired_subvp_stream(context, subvp_pipe->stream); 770 struct dc_crtc_timing *main_timing = &subvp_pipe->stream->timing; 771 struct dc_crtc_timing *phantom_timing; 772 uint32_t out_num_stream, out_den_stream, out_num_plane, out_den_plane, out_num, out_den; 773 774 if (!phantom_stream) 775 return; 776 777 phantom_timing = &phantom_stream->timing; 778 779 pipe_data->mode = SUBVP; 780 pipe_data->pipe_config.subvp_data.pix_clk_100hz = subvp_pipe->stream->timing.pix_clk_100hz; 781 pipe_data->pipe_config.subvp_data.htotal = subvp_pipe->stream->timing.h_total; 782 pipe_data->pipe_config.subvp_data.vtotal = subvp_pipe->stream->timing.v_total; 783 pipe_data->pipe_config.subvp_data.main_vblank_start = 784 main_timing->v_total - main_timing->v_front_porch; 785 pipe_data->pipe_config.subvp_data.main_vblank_end = 786 main_timing->v_total - main_timing->v_front_porch - main_timing->v_addressable; 787 pipe_data->pipe_config.subvp_data.mall_region_lines = phantom_timing->v_addressable; 788 pipe_data->pipe_config.subvp_data.main_pipe_index = subvp_pipe->stream_res.tg->inst; 789 pipe_data->pipe_config.subvp_data.is_drr = subvp_pipe->stream->ignore_msa_timing_param && 790 (subvp_pipe->stream->allow_freesync || subvp_pipe->stream->vrr_active_variable || subvp_pipe->stream->vrr_active_fixed); 791 792 /* Calculate the scaling factor from the src and dst height. 793 * e.g. If 3840x2160 being downscaled to 1920x1080, the scaling factor is 1/2. 794 * Reduce the fraction 1080/2160 = 1/2 for the "scaling factor" 795 * 796 * Make sure to combine stream and plane scaling together. 797 */ 798 reduce_fraction(subvp_pipe->stream->src.height, subvp_pipe->stream->dst.height, 799 &out_num_stream, &out_den_stream); 800 reduce_fraction(subvp_pipe->plane_state->src_rect.height, subvp_pipe->plane_state->dst_rect.height, 801 &out_num_plane, &out_den_plane); 802 reduce_fraction(out_num_stream * out_num_plane, out_den_stream * out_den_plane, &out_num, &out_den); 803 pipe_data->pipe_config.subvp_data.scale_factor_numerator = out_num; 804 pipe_data->pipe_config.subvp_data.scale_factor_denominator = out_den; 805 806 // Prefetch lines is equal to VACTIVE + BP + VSYNC 807 pipe_data->pipe_config.subvp_data.prefetch_lines = 808 phantom_timing->v_total - phantom_timing->v_front_porch; 809 810 // Round up 811 pipe_data->pipe_config.subvp_data.prefetch_to_mall_start_lines = 812 div64_u64(((uint64_t)dc->caps.subvp_prefetch_end_to_mall_start_us * ((uint64_t)phantom_timing->pix_clk_100hz * 100) + 813 ((uint64_t)phantom_timing->h_total * 1000000 - 1)), ((uint64_t)phantom_timing->h_total * 1000000)); 814 pipe_data->pipe_config.subvp_data.processing_delay_lines = 815 div64_u64(((uint64_t)(dc->caps.subvp_fw_processing_delay_us) * ((uint64_t)phantom_timing->pix_clk_100hz * 100) + 816 ((uint64_t)phantom_timing->h_total * 1000000 - 1)), ((uint64_t)phantom_timing->h_total * 1000000)); 817 818 if (subvp_pipe->bottom_pipe) { 819 pipe_data->pipe_config.subvp_data.main_split_pipe_index = subvp_pipe->bottom_pipe->pipe_idx; 820 } else if (subvp_pipe->next_odm_pipe) { 821 pipe_data->pipe_config.subvp_data.main_split_pipe_index = subvp_pipe->next_odm_pipe->pipe_idx; 822 } else { 823 pipe_data->pipe_config.subvp_data.main_split_pipe_index = 0xF; 824 } 825 826 // Find phantom pipe index based on phantom stream 827 for (j = 0; j < dc->res_pool->pipe_count; j++) { 828 struct pipe_ctx *phantom_pipe = &context->res_ctx.pipe_ctx[j]; 829 830 if (resource_is_pipe_type(phantom_pipe, OTG_MASTER) && 831 phantom_pipe->stream == dc_state_get_paired_subvp_stream(context, subvp_pipe->stream)) { 832 pipe_data->pipe_config.subvp_data.phantom_pipe_index = phantom_pipe->stream_res.tg->inst; 833 if (phantom_pipe->bottom_pipe) { 834 pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = phantom_pipe->bottom_pipe->plane_res.hubp->inst; 835 } else if (phantom_pipe->next_odm_pipe) { 836 pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = phantom_pipe->next_odm_pipe->plane_res.hubp->inst; 837 } else { 838 pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = 0xF; 839 } 840 break; 841 } 842 } 843 } 844 845 /** 846 * dc_dmub_setup_subvp_dmub_command - Populate the DMCUB SubVP command 847 * 848 * @dc: [in] current dc state 849 * @context: [in] new dc state 850 * @enable: [in] if true enables the pipes population 851 * 852 * This function loops through each pipe and populates the DMUB SubVP CMD info 853 * based on the pipe (e.g. SubVP, VBLANK). 854 */ 855 void dc_dmub_setup_subvp_dmub_command(struct dc *dc, 856 struct dc_state *context, 857 bool enable) 858 { 859 uint8_t cmd_pipe_index = 0; 860 uint32_t i, pipe_idx; 861 uint8_t subvp_count = 0; 862 union dmub_rb_cmd cmd; 863 struct pipe_ctx *subvp_pipes[2]; 864 uint32_t wm_val_refclk = 0; 865 enum mall_stream_type pipe_mall_type; 866 867 memset(&cmd, 0, sizeof(cmd)); 868 // FW command for SUBVP 869 cmd.fw_assisted_mclk_switch_v2.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH; 870 cmd.fw_assisted_mclk_switch_v2.header.sub_type = DMUB_CMD__HANDLE_SUBVP_CMD; 871 cmd.fw_assisted_mclk_switch_v2.header.payload_bytes = 872 sizeof(cmd.fw_assisted_mclk_switch_v2) - sizeof(cmd.fw_assisted_mclk_switch_v2.header); 873 874 for (i = 0; i < dc->res_pool->pipe_count; i++) { 875 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 876 877 /* For SubVP pipe count, only count the top most (ODM / MPC) pipe 878 */ 879 if (resource_is_pipe_type(pipe, OTG_MASTER) && 880 resource_is_pipe_type(pipe, DPP_PIPE) && 881 dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN) 882 subvp_pipes[subvp_count++] = pipe; 883 } 884 885 if (enable) { 886 // For each pipe that is a "main" SUBVP pipe, fill in pipe data for DMUB SUBVP cmd 887 for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { 888 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 889 pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe); 890 891 if (!pipe->stream) 892 continue; 893 894 /* When populating subvp cmd info, only pass in the top most (ODM / MPC) pipe. 895 * Any ODM or MPC splits being used in SubVP will be handled internally in 896 * populate_subvp_cmd_pipe_info 897 */ 898 if (resource_is_pipe_type(pipe, OTG_MASTER) && 899 resource_is_pipe_type(pipe, DPP_PIPE) && 900 pipe_mall_type == SUBVP_MAIN) { 901 populate_subvp_cmd_pipe_info(dc, context, &cmd, pipe, cmd_pipe_index++); 902 } else if (resource_is_pipe_type(pipe, OTG_MASTER) && 903 resource_is_pipe_type(pipe, DPP_PIPE) && 904 pipe_mall_type == SUBVP_NONE) { 905 // Don't need to check for ActiveDRAMClockChangeMargin < 0, not valid in cases where 906 // we run through DML without calculating "natural" P-state support 907 populate_subvp_cmd_vblank_pipe_info(dc, context, &cmd, pipe, cmd_pipe_index++); 908 909 } 910 pipe_idx++; 911 } 912 if (subvp_count == 2) { 913 update_subvp_prefetch_end_to_mall_start(dc, context, &cmd, subvp_pipes); 914 } 915 cmd.fw_assisted_mclk_switch_v2.config_data.pstate_allow_width_us = dc->caps.subvp_pstate_allow_width_us; 916 cmd.fw_assisted_mclk_switch_v2.config_data.vertical_int_margin_us = dc->caps.subvp_vertical_int_margin_us; 917 918 // Store the original watermark value for this SubVP config so we can lower it when the 919 // MCLK switch starts 920 wm_val_refclk = context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns * 921 (dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000) / 1000; 922 923 cmd.fw_assisted_mclk_switch_v2.config_data.watermark_a_cache = wm_val_refclk < 0xFFFF ? wm_val_refclk : 0xFFFF; 924 } 925 926 dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); 927 } 928 929 bool dc_dmub_srv_get_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv, struct dmub_diagnostic_data *diag_data) 930 { 931 if (!dc_dmub_srv || !dc_dmub_srv->dmub || !diag_data) 932 return false; 933 return dmub_srv_get_diagnostic_data(dc_dmub_srv->dmub, diag_data); 934 } 935 936 void dc_dmub_srv_log_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv) 937 { 938 struct dmub_diagnostic_data diag_data = {0}; 939 uint32_t i; 940 941 if (!dc_dmub_srv || !dc_dmub_srv->dmub) { 942 DC_LOG_ERROR("%s: invalid parameters.", __func__); 943 return; 944 } 945 946 DC_LOG_ERROR("%s: DMCUB error - collecting diagnostic data\n", __func__); 947 948 if (!dc_dmub_srv_get_diagnostic_data(dc_dmub_srv, &diag_data)) { 949 DC_LOG_ERROR("%s: dc_dmub_srv_get_diagnostic_data failed.", __func__); 950 return; 951 } 952 953 DC_LOG_DEBUG("DMCUB STATE:"); 954 DC_LOG_DEBUG(" dmcub_version : %08x", diag_data.dmcub_version); 955 DC_LOG_DEBUG(" scratch [0] : %08x", diag_data.scratch[0]); 956 DC_LOG_DEBUG(" scratch [1] : %08x", diag_data.scratch[1]); 957 DC_LOG_DEBUG(" scratch [2] : %08x", diag_data.scratch[2]); 958 DC_LOG_DEBUG(" scratch [3] : %08x", diag_data.scratch[3]); 959 DC_LOG_DEBUG(" scratch [4] : %08x", diag_data.scratch[4]); 960 DC_LOG_DEBUG(" scratch [5] : %08x", diag_data.scratch[5]); 961 DC_LOG_DEBUG(" scratch [6] : %08x", diag_data.scratch[6]); 962 DC_LOG_DEBUG(" scratch [7] : %08x", diag_data.scratch[7]); 963 DC_LOG_DEBUG(" scratch [8] : %08x", diag_data.scratch[8]); 964 DC_LOG_DEBUG(" scratch [9] : %08x", diag_data.scratch[9]); 965 DC_LOG_DEBUG(" scratch [10] : %08x", diag_data.scratch[10]); 966 DC_LOG_DEBUG(" scratch [11] : %08x", diag_data.scratch[11]); 967 DC_LOG_DEBUG(" scratch [12] : %08x", diag_data.scratch[12]); 968 DC_LOG_DEBUG(" scratch [13] : %08x", diag_data.scratch[13]); 969 DC_LOG_DEBUG(" scratch [14] : %08x", diag_data.scratch[14]); 970 DC_LOG_DEBUG(" scratch [15] : %08x", diag_data.scratch[15]); 971 for (i = 0; i < DMUB_PC_SNAPSHOT_COUNT; i++) 972 DC_LOG_DEBUG(" pc[%d] : %08x", i, diag_data.pc[i]); 973 DC_LOG_DEBUG(" unk_fault_addr : %08x", diag_data.undefined_address_fault_addr); 974 DC_LOG_DEBUG(" inst_fault_addr : %08x", diag_data.inst_fetch_fault_addr); 975 DC_LOG_DEBUG(" data_fault_addr : %08x", diag_data.data_write_fault_addr); 976 DC_LOG_DEBUG(" inbox1_rptr : %08x", diag_data.inbox1_rptr); 977 DC_LOG_DEBUG(" inbox1_wptr : %08x", diag_data.inbox1_wptr); 978 DC_LOG_DEBUG(" inbox1_size : %08x", diag_data.inbox1_size); 979 DC_LOG_DEBUG(" inbox0_rptr : %08x", diag_data.inbox0_rptr); 980 DC_LOG_DEBUG(" inbox0_wptr : %08x", diag_data.inbox0_wptr); 981 DC_LOG_DEBUG(" inbox0_size : %08x", diag_data.inbox0_size); 982 DC_LOG_DEBUG(" outbox1_rptr : %08x", diag_data.outbox1_rptr); 983 DC_LOG_DEBUG(" outbox1_wptr : %08x", diag_data.outbox1_wptr); 984 DC_LOG_DEBUG(" outbox1_size : %08x", diag_data.outbox1_size); 985 DC_LOG_DEBUG(" is_enabled : %d", diag_data.is_dmcub_enabled); 986 DC_LOG_DEBUG(" is_soft_reset : %d", diag_data.is_dmcub_soft_reset); 987 DC_LOG_DEBUG(" is_secure_reset : %d", diag_data.is_dmcub_secure_reset); 988 DC_LOG_DEBUG(" is_traceport_en : %d", diag_data.is_traceport_en); 989 DC_LOG_DEBUG(" is_cw0_en : %d", diag_data.is_cw0_enabled); 990 DC_LOG_DEBUG(" is_cw6_en : %d", diag_data.is_cw6_enabled); 991 } 992 993 static bool dc_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx) 994 { 995 struct pipe_ctx *test_pipe, *split_pipe; 996 const struct scaler_data *scl_data = &pipe_ctx->plane_res.scl_data; 997 struct rect r1 = scl_data->recout, r2, r2_half; 998 int r1_r = r1.x + r1.width, r1_b = r1.y + r1.height, r2_r, r2_b; 999 int cur_layer = pipe_ctx->plane_state->layer_index; 1000 1001 /** 1002 * Disable the cursor if there's another pipe above this with a 1003 * plane that contains this pipe's viewport to prevent double cursor 1004 * and incorrect scaling artifacts. 1005 */ 1006 for (test_pipe = pipe_ctx->top_pipe; test_pipe; 1007 test_pipe = test_pipe->top_pipe) { 1008 // Skip invisible layer and pipe-split plane on same layer 1009 if (!test_pipe->plane_state->visible || test_pipe->plane_state->layer_index == cur_layer) 1010 continue; 1011 1012 r2 = test_pipe->plane_res.scl_data.recout; 1013 r2_r = r2.x + r2.width; 1014 r2_b = r2.y + r2.height; 1015 split_pipe = test_pipe; 1016 1017 /** 1018 * There is another half plane on same layer because of 1019 * pipe-split, merge together per same height. 1020 */ 1021 for (split_pipe = pipe_ctx->top_pipe; split_pipe; 1022 split_pipe = split_pipe->top_pipe) 1023 if (split_pipe->plane_state->layer_index == test_pipe->plane_state->layer_index) { 1024 r2_half = split_pipe->plane_res.scl_data.recout; 1025 r2.x = (r2_half.x < r2.x) ? r2_half.x : r2.x; 1026 r2.width = r2.width + r2_half.width; 1027 r2_r = r2.x + r2.width; 1028 break; 1029 } 1030 1031 if (r1.x >= r2.x && r1.y >= r2.y && r1_r <= r2_r && r1_b <= r2_b) 1032 return true; 1033 } 1034 1035 return false; 1036 } 1037 1038 static bool dc_dmub_should_update_cursor_data(struct pipe_ctx *pipe_ctx) 1039 { 1040 if (pipe_ctx->plane_state != NULL) { 1041 if (pipe_ctx->plane_state->address.type == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE) 1042 return false; 1043 1044 if (dc_can_pipe_disable_cursor(pipe_ctx)) 1045 return false; 1046 } 1047 1048 if ((pipe_ctx->stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1 || 1049 pipe_ctx->stream->link->psr_settings.psr_version == DC_PSR_VERSION_1) && 1050 pipe_ctx->stream->ctx->dce_version >= DCN_VERSION_3_1) 1051 return true; 1052 1053 if (pipe_ctx->stream->link->replay_settings.config.replay_supported) 1054 return true; 1055 1056 return false; 1057 } 1058 1059 static void dc_build_cursor_update_payload0( 1060 struct pipe_ctx *pipe_ctx, uint8_t p_idx, 1061 struct dmub_cmd_update_cursor_payload0 *payload) 1062 { 1063 struct hubp *hubp = pipe_ctx->plane_res.hubp; 1064 unsigned int panel_inst = 0; 1065 1066 if (!dc_get_edp_link_panel_inst(hubp->ctx->dc, 1067 pipe_ctx->stream->link, &panel_inst)) 1068 return; 1069 1070 /* Payload: Cursor Rect is built from position & attribute 1071 * x & y are obtained from postion 1072 */ 1073 payload->cursor_rect.x = hubp->cur_rect.x; 1074 payload->cursor_rect.y = hubp->cur_rect.y; 1075 /* w & h are obtained from attribute */ 1076 payload->cursor_rect.width = hubp->cur_rect.w; 1077 payload->cursor_rect.height = hubp->cur_rect.h; 1078 1079 payload->enable = hubp->pos.cur_ctl.bits.cur_enable; 1080 payload->pipe_idx = p_idx; 1081 payload->cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1; 1082 payload->panel_inst = panel_inst; 1083 } 1084 1085 static void dc_build_cursor_position_update_payload0( 1086 struct dmub_cmd_update_cursor_payload0 *pl, const uint8_t p_idx, 1087 const struct hubp *hubp, const struct dpp *dpp) 1088 { 1089 /* Hubp */ 1090 pl->position_cfg.pHubp.cur_ctl.raw = hubp->pos.cur_ctl.raw; 1091 pl->position_cfg.pHubp.position.raw = hubp->pos.position.raw; 1092 pl->position_cfg.pHubp.hot_spot.raw = hubp->pos.hot_spot.raw; 1093 pl->position_cfg.pHubp.dst_offset.raw = hubp->pos.dst_offset.raw; 1094 1095 /* dpp */ 1096 pl->position_cfg.pDpp.cur0_ctl.raw = dpp->pos.cur0_ctl.raw; 1097 pl->position_cfg.pipe_idx = p_idx; 1098 } 1099 1100 static void dc_build_cursor_attribute_update_payload1( 1101 struct dmub_cursor_attributes_cfg *pl_A, const uint8_t p_idx, 1102 const struct hubp *hubp, const struct dpp *dpp) 1103 { 1104 /* Hubp */ 1105 pl_A->aHubp.SURFACE_ADDR_HIGH = hubp->att.SURFACE_ADDR_HIGH; 1106 pl_A->aHubp.SURFACE_ADDR = hubp->att.SURFACE_ADDR; 1107 pl_A->aHubp.cur_ctl.raw = hubp->att.cur_ctl.raw; 1108 pl_A->aHubp.size.raw = hubp->att.size.raw; 1109 pl_A->aHubp.settings.raw = hubp->att.settings.raw; 1110 1111 /* dpp */ 1112 pl_A->aDpp.cur0_ctl.raw = dpp->att.cur0_ctl.raw; 1113 } 1114 1115 /** 1116 * dc_send_update_cursor_info_to_dmu - Populate the DMCUB Cursor update info command 1117 * 1118 * @pCtx: [in] pipe context 1119 * @pipe_idx: [in] pipe index 1120 * 1121 * This function would store the cursor related information and pass it into 1122 * dmub 1123 */ 1124 void dc_send_update_cursor_info_to_dmu( 1125 struct pipe_ctx *pCtx, uint8_t pipe_idx) 1126 { 1127 union dmub_rb_cmd cmd[2]; 1128 union dmub_cmd_update_cursor_info_data *update_cursor_info_0 = 1129 &cmd[0].update_cursor_info.update_cursor_info_data; 1130 1131 memset(cmd, 0, sizeof(cmd)); 1132 1133 if (!dc_dmub_should_update_cursor_data(pCtx)) 1134 return; 1135 /* 1136 * Since we use multi_cmd_pending for dmub command, the 2nd command is 1137 * only assigned to store cursor attributes info. 1138 * 1st command can view as 2 parts, 1st is for PSR/Replay data, the other 1139 * is to store cursor position info. 1140 * 1141 * Command heaer type must be the same type if using multi_cmd_pending. 1142 * Besides, while process 2nd command in DMU, the sub type is useless. 1143 * So it's meanless to pass the sub type header with different type. 1144 */ 1145 1146 { 1147 /* Build Payload#0 Header */ 1148 cmd[0].update_cursor_info.header.type = DMUB_CMD__UPDATE_CURSOR_INFO; 1149 cmd[0].update_cursor_info.header.payload_bytes = 1150 sizeof(cmd[0].update_cursor_info.update_cursor_info_data); 1151 cmd[0].update_cursor_info.header.multi_cmd_pending = 1; //To combine multi dmu cmd, 1st cmd 1152 1153 /* Prepare Payload */ 1154 dc_build_cursor_update_payload0(pCtx, pipe_idx, &update_cursor_info_0->payload0); 1155 1156 dc_build_cursor_position_update_payload0(&update_cursor_info_0->payload0, pipe_idx, 1157 pCtx->plane_res.hubp, pCtx->plane_res.dpp); 1158 } 1159 { 1160 /* Build Payload#1 Header */ 1161 cmd[1].update_cursor_info.header.type = DMUB_CMD__UPDATE_CURSOR_INFO; 1162 cmd[1].update_cursor_info.header.payload_bytes = sizeof(struct cursor_attributes_cfg); 1163 cmd[1].update_cursor_info.header.multi_cmd_pending = 0; //Indicate it's the last command. 1164 1165 dc_build_cursor_attribute_update_payload1( 1166 &cmd[1].update_cursor_info.update_cursor_info_data.payload1.attribute_cfg, 1167 pipe_idx, pCtx->plane_res.hubp, pCtx->plane_res.dpp); 1168 1169 /* Combine 2nd cmds update_curosr_info to DMU */ 1170 dc_wake_and_execute_dmub_cmd_list(pCtx->stream->ctx, 2, cmd, DM_DMUB_WAIT_TYPE_WAIT); 1171 } 1172 } 1173 1174 bool dc_dmub_check_min_version(struct dmub_srv *srv) 1175 { 1176 if (!srv->hw_funcs.is_psrsu_supported) 1177 return true; 1178 return srv->hw_funcs.is_psrsu_supported(srv); 1179 } 1180 1181 void dc_dmub_srv_enable_dpia_trace(const struct dc *dc) 1182 { 1183 struct dc_dmub_srv *dc_dmub_srv = dc->ctx->dmub_srv; 1184 1185 if (!dc_dmub_srv || !dc_dmub_srv->dmub) { 1186 DC_LOG_ERROR("%s: invalid parameters.", __func__); 1187 return; 1188 } 1189 1190 if (!dc_wake_and_execute_gpint(dc->ctx, DMUB_GPINT__SET_TRACE_BUFFER_MASK_WORD1, 1191 0x0010, NULL, DM_DMUB_WAIT_TYPE_WAIT)) { 1192 DC_LOG_ERROR("timeout updating trace buffer mask word\n"); 1193 return; 1194 } 1195 1196 if (!dc_wake_and_execute_gpint(dc->ctx, DMUB_GPINT__UPDATE_TRACE_BUFFER_MASK, 1197 0x0000, NULL, DM_DMUB_WAIT_TYPE_WAIT)) { 1198 DC_LOG_ERROR("timeout updating trace buffer mask word\n"); 1199 return; 1200 } 1201 1202 DC_LOG_DEBUG("Enabled DPIA trace\n"); 1203 } 1204 1205 void dc_dmub_srv_subvp_save_surf_addr(const struct dc_dmub_srv *dc_dmub_srv, const struct dc_plane_address *addr, uint8_t subvp_index) 1206 { 1207 dmub_srv_subvp_save_surf_addr(dc_dmub_srv->dmub, addr, subvp_index); 1208 } 1209 1210 bool dc_dmub_srv_is_hw_pwr_up(struct dc_dmub_srv *dc_dmub_srv, bool wait) 1211 { 1212 struct dc_context *dc_ctx; 1213 enum dmub_status status; 1214 1215 if (!dc_dmub_srv || !dc_dmub_srv->dmub) 1216 return true; 1217 1218 if (dc_dmub_srv->ctx->dc->debug.dmcub_emulation) 1219 return true; 1220 1221 dc_ctx = dc_dmub_srv->ctx; 1222 1223 if (wait) { 1224 if (dc_dmub_srv->ctx->dc->debug.disable_timeout) { 1225 do { 1226 status = dmub_srv_wait_for_hw_pwr_up(dc_dmub_srv->dmub, 500000); 1227 } while (status != DMUB_STATUS_OK); 1228 } else { 1229 status = dmub_srv_wait_for_hw_pwr_up(dc_dmub_srv->dmub, 500000); 1230 if (status != DMUB_STATUS_OK) { 1231 DC_ERROR("Error querying DMUB hw power up status: error=%d\n", status); 1232 return false; 1233 } 1234 } 1235 } else 1236 return dmub_srv_is_hw_pwr_up(dc_dmub_srv->dmub); 1237 1238 return true; 1239 } 1240 1241 static int count_active_streams(const struct dc *dc) 1242 { 1243 int i, count = 0; 1244 1245 for (i = 0; i < dc->current_state->stream_count; ++i) { 1246 struct dc_stream_state *stream = dc->current_state->streams[i]; 1247 1248 if (stream && !stream->dpms_off) 1249 count += 1; 1250 } 1251 1252 return count; 1253 } 1254 1255 static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle) 1256 { 1257 volatile const struct dmub_shared_state_ips_fw *ips_fw; 1258 struct dc_dmub_srv *dc_dmub_srv; 1259 union dmub_rb_cmd cmd = {0}; 1260 1261 if (dc->debug.dmcub_emulation) 1262 return; 1263 1264 if (!dc->ctx->dmub_srv || !dc->ctx->dmub_srv->dmub) 1265 return; 1266 1267 dc_dmub_srv = dc->ctx->dmub_srv; 1268 ips_fw = &dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_FW].data.ips_fw; 1269 1270 memset(&cmd, 0, sizeof(cmd)); 1271 cmd.idle_opt_notify_idle.header.type = DMUB_CMD__IDLE_OPT; 1272 cmd.idle_opt_notify_idle.header.sub_type = DMUB_CMD__IDLE_OPT_DCN_NOTIFY_IDLE; 1273 cmd.idle_opt_notify_idle.header.payload_bytes = 1274 sizeof(cmd.idle_opt_notify_idle) - 1275 sizeof(cmd.idle_opt_notify_idle.header); 1276 1277 cmd.idle_opt_notify_idle.cntl_data.driver_idle = allow_idle; 1278 1279 if (dc->work_arounds.skip_psr_ips_crtc_disable) 1280 cmd.idle_opt_notify_idle.cntl_data.skip_otg_disable = true; 1281 1282 if (allow_idle) { 1283 volatile struct dmub_shared_state_ips_driver *ips_driver = 1284 &dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_DRIVER].data.ips_driver; 1285 union dmub_shared_state_ips_driver_signals new_signals; 1286 1287 DC_LOG_IPS( 1288 "%s wait idle (ips1_commit=%u ips2_commit=%u)", 1289 __func__, 1290 ips_fw->signals.bits.ips1_commit, 1291 ips_fw->signals.bits.ips2_commit); 1292 1293 dc_dmub_srv_wait_idle(dc->ctx->dmub_srv); 1294 1295 memset(&new_signals, 0, sizeof(new_signals)); 1296 1297 if (dc->config.disable_ips == DMUB_IPS_ENABLE || 1298 dc->config.disable_ips == DMUB_IPS_DISABLE_DYNAMIC) { 1299 new_signals.bits.allow_pg = 1; 1300 new_signals.bits.allow_ips1 = 1; 1301 new_signals.bits.allow_ips2 = 1; 1302 new_signals.bits.allow_z10 = 1; 1303 } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS1) { 1304 new_signals.bits.allow_ips1 = 1; 1305 } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2) { 1306 new_signals.bits.allow_pg = 1; 1307 new_signals.bits.allow_ips1 = 1; 1308 } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2_Z10) { 1309 new_signals.bits.allow_pg = 1; 1310 new_signals.bits.allow_ips1 = 1; 1311 new_signals.bits.allow_ips2 = 1; 1312 } else if (dc->config.disable_ips == DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF) { 1313 /* TODO: Move this logic out to hwseq */ 1314 if (count_active_streams(dc) == 0) { 1315 /* IPS2 - Display off */ 1316 new_signals.bits.allow_pg = 1; 1317 new_signals.bits.allow_ips1 = 1; 1318 new_signals.bits.allow_ips2 = 1; 1319 new_signals.bits.allow_z10 = 1; 1320 } else { 1321 /* RCG only */ 1322 new_signals.bits.allow_pg = 0; 1323 new_signals.bits.allow_ips1 = 1; 1324 new_signals.bits.allow_ips2 = 0; 1325 new_signals.bits.allow_z10 = 0; 1326 } 1327 } 1328 1329 ips_driver->signals = new_signals; 1330 dc_dmub_srv->driver_signals = ips_driver->signals; 1331 } 1332 1333 DC_LOG_IPS( 1334 "%s send allow_idle=%d (ips1_commit=%u ips2_commit=%u)", 1335 __func__, 1336 allow_idle, 1337 ips_fw->signals.bits.ips1_commit, 1338 ips_fw->signals.bits.ips2_commit); 1339 1340 /* NOTE: This does not use the "wake" interface since this is part of the wake path. */ 1341 /* We also do not perform a wait since DMCUB could enter idle after the notification. */ 1342 dm_execute_dmub_cmd(dc->ctx, &cmd, allow_idle ? DM_DMUB_WAIT_TYPE_NO_WAIT : DM_DMUB_WAIT_TYPE_WAIT); 1343 1344 /* Register access should stop at this point. */ 1345 if (allow_idle) 1346 dc_dmub_srv->needs_idle_wake = true; 1347 } 1348 1349 static void dc_dmub_srv_exit_low_power_state(const struct dc *dc) 1350 { 1351 struct dc_dmub_srv *dc_dmub_srv; 1352 uint32_t rcg_exit_count = 0, ips1_exit_count = 0, ips2_exit_count = 0; 1353 1354 if (dc->debug.dmcub_emulation) 1355 return; 1356 1357 if (!dc->ctx->dmub_srv || !dc->ctx->dmub_srv->dmub) 1358 return; 1359 1360 dc_dmub_srv = dc->ctx->dmub_srv; 1361 1362 if (dc->clk_mgr->funcs->exit_low_power_state) { 1363 volatile const struct dmub_shared_state_ips_fw *ips_fw = 1364 &dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_FW].data.ips_fw; 1365 volatile struct dmub_shared_state_ips_driver *ips_driver = 1366 &dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_DRIVER].data.ips_driver; 1367 union dmub_shared_state_ips_driver_signals prev_driver_signals = ips_driver->signals; 1368 1369 rcg_exit_count = ips_fw->rcg_exit_count; 1370 ips1_exit_count = ips_fw->ips1_exit_count; 1371 ips2_exit_count = ips_fw->ips2_exit_count; 1372 1373 ips_driver->signals.all = 0; 1374 dc_dmub_srv->driver_signals = ips_driver->signals; 1375 1376 DC_LOG_IPS( 1377 "%s (allow ips1=%u ips2=%u) (commit ips1=%u ips2=%u) (count rcg=%u ips1=%u ips2=%u)", 1378 __func__, 1379 ips_driver->signals.bits.allow_ips1, 1380 ips_driver->signals.bits.allow_ips2, 1381 ips_fw->signals.bits.ips1_commit, 1382 ips_fw->signals.bits.ips2_commit, 1383 ips_fw->rcg_entry_count, 1384 ips_fw->ips1_entry_count, 1385 ips_fw->ips2_entry_count); 1386 1387 /* Note: register access has technically not resumed for DCN here, but we 1388 * need to be message PMFW through our standard register interface. 1389 */ 1390 dc_dmub_srv->needs_idle_wake = false; 1391 1392 if (prev_driver_signals.bits.allow_ips2 && 1393 (!dc->debug.optimize_ips_handshake || 1394 ips_fw->signals.bits.ips2_commit || !ips_fw->signals.bits.in_idle)) { 1395 DC_LOG_IPS( 1396 "wait IPS2 eval (ips1_commit=%u ips2_commit=%u)", 1397 ips_fw->signals.bits.ips1_commit, 1398 ips_fw->signals.bits.ips2_commit); 1399 1400 if (!dc->debug.optimize_ips_handshake || !ips_fw->signals.bits.ips2_commit) 1401 udelay(dc->debug.ips2_eval_delay_us); 1402 1403 if (ips_fw->signals.bits.ips2_commit) { 1404 DC_LOG_IPS( 1405 "exit IPS2 #1 (ips1_commit=%u ips2_commit=%u)", 1406 ips_fw->signals.bits.ips1_commit, 1407 ips_fw->signals.bits.ips2_commit); 1408 1409 // Tell PMFW to exit low power state 1410 dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr); 1411 1412 DC_LOG_IPS( 1413 "wait IPS2 entry delay (ips1_commit=%u ips2_commit=%u)", 1414 ips_fw->signals.bits.ips1_commit, 1415 ips_fw->signals.bits.ips2_commit); 1416 1417 // Wait for IPS2 entry upper bound 1418 udelay(dc->debug.ips2_entry_delay_us); 1419 1420 DC_LOG_IPS( 1421 "exit IPS2 #2 (ips1_commit=%u ips2_commit=%u)", 1422 ips_fw->signals.bits.ips1_commit, 1423 ips_fw->signals.bits.ips2_commit); 1424 1425 dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr); 1426 1427 DC_LOG_IPS( 1428 "wait IPS2 commit clear (ips1_commit=%u ips2_commit=%u)", 1429 ips_fw->signals.bits.ips1_commit, 1430 ips_fw->signals.bits.ips2_commit); 1431 1432 while (ips_fw->signals.bits.ips2_commit) 1433 udelay(1); 1434 1435 DC_LOG_IPS( 1436 "wait hw_pwr_up (ips1_commit=%u ips2_commit=%u)", 1437 ips_fw->signals.bits.ips1_commit, 1438 ips_fw->signals.bits.ips2_commit); 1439 1440 if (!dc_dmub_srv_is_hw_pwr_up(dc->ctx->dmub_srv, true)) 1441 ASSERT(0); 1442 1443 DC_LOG_IPS( 1444 "resync inbox1 (ips1_commit=%u ips2_commit=%u)", 1445 ips_fw->signals.bits.ips1_commit, 1446 ips_fw->signals.bits.ips2_commit); 1447 1448 dmub_srv_sync_inbox1(dc->ctx->dmub_srv->dmub); 1449 } 1450 } 1451 1452 dc_dmub_srv_notify_idle(dc, false); 1453 if (prev_driver_signals.bits.allow_ips1) { 1454 DC_LOG_IPS( 1455 "wait for IPS1 commit clear (ips1_commit=%u ips2_commit=%u)", 1456 ips_fw->signals.bits.ips1_commit, 1457 ips_fw->signals.bits.ips2_commit); 1458 1459 while (ips_fw->signals.bits.ips1_commit) 1460 udelay(1); 1461 1462 DC_LOG_IPS( 1463 "wait for IPS1 commit clear done (ips1_commit=%u ips2_commit=%u)", 1464 ips_fw->signals.bits.ips1_commit, 1465 ips_fw->signals.bits.ips2_commit); 1466 } 1467 } 1468 1469 if (!dc_dmub_srv_is_hw_pwr_up(dc->ctx->dmub_srv, true)) 1470 ASSERT(0); 1471 1472 DC_LOG_IPS("%s exit (count rcg=%u ips1=%u ips2=%u)", 1473 __func__, 1474 rcg_exit_count, 1475 ips1_exit_count, 1476 ips2_exit_count); 1477 } 1478 1479 void dc_dmub_srv_set_power_state(struct dc_dmub_srv *dc_dmub_srv, enum dc_acpi_cm_power_state power_state) 1480 { 1481 struct dmub_srv *dmub; 1482 1483 if (!dc_dmub_srv) 1484 return; 1485 1486 dmub = dc_dmub_srv->dmub; 1487 1488 if (power_state == DC_ACPI_CM_POWER_STATE_D0) 1489 dmub_srv_set_power_state(dmub, DMUB_POWER_STATE_D0); 1490 else 1491 dmub_srv_set_power_state(dmub, DMUB_POWER_STATE_D3); 1492 } 1493 1494 void dc_dmub_srv_notify_fw_dc_power_state(struct dc_dmub_srv *dc_dmub_srv, 1495 enum dc_acpi_cm_power_state power_state) 1496 { 1497 union dmub_rb_cmd cmd; 1498 1499 if (!dc_dmub_srv) 1500 return; 1501 1502 memset(&cmd, 0, sizeof(cmd)); 1503 1504 cmd.idle_opt_set_dc_power_state.header.type = DMUB_CMD__IDLE_OPT; 1505 cmd.idle_opt_set_dc_power_state.header.sub_type = DMUB_CMD__IDLE_OPT_SET_DC_POWER_STATE; 1506 cmd.idle_opt_set_dc_power_state.header.payload_bytes = 1507 sizeof(cmd.idle_opt_set_dc_power_state) - sizeof(cmd.idle_opt_set_dc_power_state.header); 1508 1509 if (power_state == DC_ACPI_CM_POWER_STATE_D0) { 1510 cmd.idle_opt_set_dc_power_state.data.power_state = DMUB_IDLE_OPT_DC_POWER_STATE_D0; 1511 } else if (power_state == DC_ACPI_CM_POWER_STATE_D3) { 1512 cmd.idle_opt_set_dc_power_state.data.power_state = DMUB_IDLE_OPT_DC_POWER_STATE_D3; 1513 } else { 1514 cmd.idle_opt_set_dc_power_state.data.power_state = DMUB_IDLE_OPT_DC_POWER_STATE_UNKNOWN; 1515 } 1516 1517 dc_wake_and_execute_dmub_cmd(dc_dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); 1518 } 1519 1520 bool dc_dmub_srv_should_detect(struct dc_dmub_srv *dc_dmub_srv) 1521 { 1522 volatile const struct dmub_shared_state_ips_fw *ips_fw; 1523 bool reallow_idle = false, should_detect = false; 1524 1525 if (!dc_dmub_srv || !dc_dmub_srv->dmub) 1526 return false; 1527 1528 if (dc_dmub_srv->dmub->shared_state && 1529 dc_dmub_srv->dmub->meta_info.feature_bits.bits.shared_state_link_detection) { 1530 ips_fw = &dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_FW].data.ips_fw; 1531 return ips_fw->signals.bits.detection_required; 1532 } 1533 1534 /* Detection may require reading scratch 0 - exit out of idle prior to the read. */ 1535 if (dc_dmub_srv->idle_allowed) { 1536 dc_dmub_srv_apply_idle_power_optimizations(dc_dmub_srv->ctx->dc, false); 1537 reallow_idle = true; 1538 } 1539 1540 should_detect = dmub_srv_should_detect(dc_dmub_srv->dmub); 1541 1542 /* Re-enter idle if we're not about to immediately redetect links. */ 1543 if (!should_detect && reallow_idle && dc_dmub_srv->idle_exit_counter == 0 && 1544 !dc_dmub_srv->ctx->dc->debug.disable_dmub_reallow_idle) 1545 dc_dmub_srv_apply_idle_power_optimizations(dc_dmub_srv->ctx->dc, true); 1546 1547 return should_detect; 1548 } 1549 1550 void dc_dmub_srv_apply_idle_power_optimizations(const struct dc *dc, bool allow_idle) 1551 { 1552 struct dc_dmub_srv *dc_dmub_srv = dc->ctx->dmub_srv; 1553 1554 if (!dc_dmub_srv || !dc_dmub_srv->dmub) 1555 return; 1556 1557 allow_idle &= (!dc->debug.ips_disallow_entry); 1558 1559 if (dc_dmub_srv->idle_allowed == allow_idle) 1560 return; 1561 1562 DC_LOG_IPS("%s state change: old=%d new=%d", __func__, dc_dmub_srv->idle_allowed, allow_idle); 1563 1564 /* 1565 * Entering a low power state requires a driver notification. 1566 * Powering up the hardware requires notifying PMFW and DMCUB. 1567 * Clearing the driver idle allow requires a DMCUB command. 1568 * DMCUB commands requires the DMCUB to be powered up and restored. 1569 */ 1570 1571 if (!allow_idle) { 1572 dc_dmub_srv->idle_exit_counter += 1; 1573 1574 dc_dmub_srv_exit_low_power_state(dc); 1575 /* 1576 * Idle is considered fully exited only after the sequence above 1577 * fully completes. If we have a race of two threads exiting 1578 * at the same time then it's safe to perform the sequence 1579 * twice as long as we're not re-entering. 1580 * 1581 * Infinite command submission is avoided by using the 1582 * dm_execute_dmub_cmd submission instead of the "wake" helpers. 1583 */ 1584 dc_dmub_srv->idle_allowed = false; 1585 1586 dc_dmub_srv->idle_exit_counter -= 1; 1587 if (dc_dmub_srv->idle_exit_counter < 0) { 1588 ASSERT(0); 1589 dc_dmub_srv->idle_exit_counter = 0; 1590 } 1591 } else { 1592 /* Consider idle as notified prior to the actual submission to 1593 * prevent multiple entries. */ 1594 dc_dmub_srv->idle_allowed = true; 1595 1596 dc_dmub_srv_notify_idle(dc, allow_idle); 1597 } 1598 } 1599 1600 bool dc_wake_and_execute_dmub_cmd(const struct dc_context *ctx, union dmub_rb_cmd *cmd, 1601 enum dm_dmub_wait_type wait_type) 1602 { 1603 return dc_wake_and_execute_dmub_cmd_list(ctx, 1, cmd, wait_type); 1604 } 1605 1606 bool dc_wake_and_execute_dmub_cmd_list(const struct dc_context *ctx, unsigned int count, 1607 union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type) 1608 { 1609 struct dc_dmub_srv *dc_dmub_srv = ctx->dmub_srv; 1610 bool result = false, reallow_idle = false; 1611 1612 if (!dc_dmub_srv || !dc_dmub_srv->dmub) 1613 return false; 1614 1615 if (count == 0) 1616 return true; 1617 1618 if (dc_dmub_srv->idle_allowed) { 1619 dc_dmub_srv_apply_idle_power_optimizations(ctx->dc, false); 1620 reallow_idle = true; 1621 } 1622 1623 /* 1624 * These may have different implementations in DM, so ensure 1625 * that we guide it to the expected helper. 1626 */ 1627 if (count > 1) 1628 result = dm_execute_dmub_cmd_list(ctx, count, cmd, wait_type); 1629 else 1630 result = dm_execute_dmub_cmd(ctx, cmd, wait_type); 1631 1632 if (result && reallow_idle && dc_dmub_srv->idle_exit_counter == 0 && 1633 !ctx->dc->debug.disable_dmub_reallow_idle) 1634 dc_dmub_srv_apply_idle_power_optimizations(ctx->dc, true); 1635 1636 return result; 1637 } 1638 1639 static bool dc_dmub_execute_gpint(const struct dc_context *ctx, enum dmub_gpint_command command_code, 1640 uint16_t param, uint32_t *response, enum dm_dmub_wait_type wait_type) 1641 { 1642 struct dc_dmub_srv *dc_dmub_srv = ctx->dmub_srv; 1643 const uint32_t wait_us = wait_type == DM_DMUB_WAIT_TYPE_NO_WAIT ? 0 : 30; 1644 enum dmub_status status; 1645 1646 if (response) 1647 *response = 0; 1648 1649 if (!dc_dmub_srv || !dc_dmub_srv->dmub) 1650 return false; 1651 1652 status = dmub_srv_send_gpint_command(dc_dmub_srv->dmub, command_code, param, wait_us); 1653 if (status != DMUB_STATUS_OK) { 1654 if (status == DMUB_STATUS_TIMEOUT && wait_type == DM_DMUB_WAIT_TYPE_NO_WAIT) 1655 return true; 1656 1657 return false; 1658 } 1659 1660 if (response && wait_type == DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) 1661 dmub_srv_get_gpint_response(dc_dmub_srv->dmub, response); 1662 1663 return true; 1664 } 1665 1666 bool dc_wake_and_execute_gpint(const struct dc_context *ctx, enum dmub_gpint_command command_code, 1667 uint16_t param, uint32_t *response, enum dm_dmub_wait_type wait_type) 1668 { 1669 struct dc_dmub_srv *dc_dmub_srv = ctx->dmub_srv; 1670 bool result = false, reallow_idle = false; 1671 1672 if (!dc_dmub_srv || !dc_dmub_srv->dmub) 1673 return false; 1674 1675 if (dc_dmub_srv->idle_allowed) { 1676 dc_dmub_srv_apply_idle_power_optimizations(ctx->dc, false); 1677 reallow_idle = true; 1678 } 1679 1680 result = dc_dmub_execute_gpint(ctx, command_code, param, response, wait_type); 1681 1682 if (result && reallow_idle && dc_dmub_srv->idle_exit_counter == 0 && 1683 !ctx->dc->debug.disable_dmub_reallow_idle) 1684 dc_dmub_srv_apply_idle_power_optimizations(ctx->dc, true); 1685 1686 return result; 1687 } 1688 1689 void dc_dmub_srv_fams2_update_config(struct dc *dc, 1690 struct dc_state *context, 1691 bool enable) 1692 { 1693 uint8_t num_cmds = 1; 1694 uint32_t i; 1695 union dmub_rb_cmd cmd[MAX_STREAMS + 1]; 1696 struct dmub_rb_cmd_fams2 *global_cmd = &cmd[0].fams2_config; 1697 1698 memset(cmd, 0, sizeof(union dmub_rb_cmd) * (MAX_STREAMS + 1)); 1699 /* fill in generic command header */ 1700 global_cmd->header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH; 1701 global_cmd->header.sub_type = DMUB_CMD__FAMS2_CONFIG; 1702 global_cmd->header.payload_bytes = sizeof(struct dmub_rb_cmd_fams2) - sizeof(struct dmub_cmd_header); 1703 1704 if (enable) { 1705 /* send global configuration parameters */ 1706 memcpy(&global_cmd->config.global, &context->bw_ctx.bw.dcn.fams2_global_config, sizeof(struct dmub_cmd_fams2_global_config)); 1707 1708 /* copy static feature configuration overrides */ 1709 global_cmd->config.global.features.bits.enable_stall_recovery = dc->debug.fams2_config.bits.enable_stall_recovery; 1710 global_cmd->config.global.features.bits.enable_debug = dc->debug.fams2_config.bits.enable_debug; 1711 global_cmd->config.global.features.bits.enable_offload_flip = dc->debug.fams2_config.bits.enable_offload_flip; 1712 1713 /* construct per-stream configs */ 1714 for (i = 0; i < context->bw_ctx.bw.dcn.fams2_global_config.num_streams; i++) { 1715 struct dmub_rb_cmd_fams2 *stream_cmd = &cmd[i+1].fams2_config; 1716 1717 /* configure command header */ 1718 stream_cmd->header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH; 1719 stream_cmd->header.sub_type = DMUB_CMD__FAMS2_CONFIG; 1720 stream_cmd->header.payload_bytes = sizeof(struct dmub_rb_cmd_fams2) - sizeof(struct dmub_cmd_header); 1721 stream_cmd->header.multi_cmd_pending = 1; 1722 /* copy stream static state */ 1723 memcpy(&stream_cmd->config.stream, 1724 &context->bw_ctx.bw.dcn.fams2_stream_params[i], 1725 sizeof(struct dmub_fams2_stream_static_state)); 1726 } 1727 } 1728 1729 /* apply feature configuration based on current driver state */ 1730 global_cmd->config.global.features.bits.enable_visual_confirm = dc->debug.visual_confirm == VISUAL_CONFIRM_FAMS2; 1731 global_cmd->config.global.features.bits.enable = enable; 1732 1733 if (enable && context->bw_ctx.bw.dcn.fams2_global_config.features.bits.enable) { 1734 /* set multi pending for global, and unset for last stream cmd */ 1735 global_cmd->header.multi_cmd_pending = 1; 1736 cmd[context->bw_ctx.bw.dcn.fams2_global_config.num_streams].fams2_config.header.multi_cmd_pending = 0; 1737 num_cmds += context->bw_ctx.bw.dcn.fams2_global_config.num_streams; 1738 } 1739 1740 dm_execute_dmub_cmd_list(dc->ctx, num_cmds, cmd, DM_DMUB_WAIT_TYPE_WAIT); 1741 } 1742 1743 void dc_dmub_srv_fams2_drr_update(struct dc *dc, 1744 uint32_t tg_inst, 1745 uint32_t vtotal_min, 1746 uint32_t vtotal_max, 1747 uint32_t vtotal_mid, 1748 uint32_t vtotal_mid_frame_num, 1749 bool program_manual_trigger) 1750 { 1751 union dmub_rb_cmd cmd = { 0 }; 1752 1753 cmd.fams2_drr_update.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH; 1754 cmd.fams2_drr_update.header.sub_type = DMUB_CMD__FAMS2_DRR_UPDATE; 1755 cmd.fams2_drr_update.dmub_optc_state_req.tg_inst = tg_inst; 1756 cmd.fams2_drr_update.dmub_optc_state_req.v_total_max = vtotal_max; 1757 cmd.fams2_drr_update.dmub_optc_state_req.v_total_min = vtotal_min; 1758 cmd.fams2_drr_update.dmub_optc_state_req.v_total_mid = vtotal_mid; 1759 cmd.fams2_drr_update.dmub_optc_state_req.v_total_mid_frame_num = vtotal_mid_frame_num; 1760 cmd.fams2_drr_update.dmub_optc_state_req.program_manual_trigger = program_manual_trigger; 1761 1762 cmd.fams2_drr_update.header.payload_bytes = sizeof(cmd.fams2_drr_update) - sizeof(cmd.fams2_drr_update.header); 1763 1764 dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); 1765 } 1766 1767 void dc_dmub_srv_fams2_passthrough_flip( 1768 struct dc *dc, 1769 struct dc_state *state, 1770 struct dc_stream_state *stream, 1771 struct dc_surface_update *srf_updates, 1772 int surface_count) 1773 { 1774 int plane_index; 1775 union dmub_rb_cmd cmds[MAX_PLANES]; 1776 struct dc_plane_address *address; 1777 struct dc_plane_state *plane_state; 1778 int num_cmds = 0; 1779 struct dc_stream_status *stream_status = dc_stream_get_status(stream); 1780 1781 if (surface_count <= 0 || stream_status == NULL) 1782 return; 1783 1784 memset(cmds, 0, sizeof(union dmub_rb_cmd) * MAX_PLANES); 1785 1786 /* build command for each surface update */ 1787 for (plane_index = 0; plane_index < surface_count; plane_index++) { 1788 plane_state = srf_updates[plane_index].surface; 1789 address = &plane_state->address; 1790 1791 /* skip if there is no address update for plane */ 1792 if (!srf_updates[plane_index].flip_addr) 1793 continue; 1794 1795 /* build command header */ 1796 cmds[num_cmds].fams2_flip.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH; 1797 cmds[num_cmds].fams2_flip.header.sub_type = DMUB_CMD__FAMS2_FLIP; 1798 cmds[num_cmds].fams2_flip.header.payload_bytes = sizeof(struct dmub_rb_cmd_fams2_flip); 1799 1800 /* for chaining multiple commands, all but last command should set to 1 */ 1801 cmds[num_cmds].fams2_flip.header.multi_cmd_pending = 1; 1802 1803 /* set topology info */ 1804 cmds[num_cmds].fams2_flip.flip_info.pipe_mask = dc_plane_get_pipe_mask(state, plane_state); 1805 if (stream_status) 1806 cmds[num_cmds].fams2_flip.flip_info.otg_inst = stream_status->primary_otg_inst; 1807 1808 cmds[num_cmds].fams2_flip.flip_info.config.bits.is_immediate = plane_state->flip_immediate; 1809 1810 /* build address info for command */ 1811 switch (address->type) { 1812 case PLN_ADDR_TYPE_GRAPHICS: 1813 if (address->grph.addr.quad_part == 0) { 1814 BREAK_TO_DEBUGGER(); 1815 break; 1816 } 1817 1818 cmds[num_cmds].fams2_flip.flip_info.addr_info.meta_addr_lo = 1819 address->grph.meta_addr.low_part; 1820 cmds[num_cmds].fams2_flip.flip_info.addr_info.meta_addr_hi = 1821 (uint16_t)address->grph.meta_addr.high_part; 1822 cmds[num_cmds].fams2_flip.flip_info.addr_info.surf_addr_lo = 1823 address->grph.addr.low_part; 1824 cmds[num_cmds].fams2_flip.flip_info.addr_info.surf_addr_hi = 1825 (uint16_t)address->grph.addr.high_part; 1826 break; 1827 case PLN_ADDR_TYPE_VIDEO_PROGRESSIVE: 1828 if (address->video_progressive.luma_addr.quad_part == 0 || 1829 address->video_progressive.chroma_addr.quad_part == 0) { 1830 BREAK_TO_DEBUGGER(); 1831 break; 1832 } 1833 1834 cmds[num_cmds].fams2_flip.flip_info.addr_info.meta_addr_lo = 1835 address->video_progressive.luma_meta_addr.low_part; 1836 cmds[num_cmds].fams2_flip.flip_info.addr_info.meta_addr_hi = 1837 (uint16_t)address->video_progressive.luma_meta_addr.high_part; 1838 cmds[num_cmds].fams2_flip.flip_info.addr_info.meta_addr_c_lo = 1839 address->video_progressive.chroma_meta_addr.low_part; 1840 cmds[num_cmds].fams2_flip.flip_info.addr_info.meta_addr_c_hi = 1841 (uint16_t)address->video_progressive.chroma_meta_addr.high_part; 1842 cmds[num_cmds].fams2_flip.flip_info.addr_info.surf_addr_lo = 1843 address->video_progressive.luma_addr.low_part; 1844 cmds[num_cmds].fams2_flip.flip_info.addr_info.surf_addr_hi = 1845 (uint16_t)address->video_progressive.luma_addr.high_part; 1846 cmds[num_cmds].fams2_flip.flip_info.addr_info.surf_addr_c_lo = 1847 address->video_progressive.chroma_addr.low_part; 1848 cmds[num_cmds].fams2_flip.flip_info.addr_info.surf_addr_c_hi = 1849 (uint16_t)address->video_progressive.chroma_addr.high_part; 1850 break; 1851 default: 1852 // Should never be hit 1853 BREAK_TO_DEBUGGER(); 1854 break; 1855 } 1856 1857 num_cmds++; 1858 } 1859 1860 if (num_cmds > 0) { 1861 cmds[num_cmds - 1].fams2_flip.header.multi_cmd_pending = 0; 1862 dm_execute_dmub_cmd_list(dc->ctx, num_cmds, cmds, DM_DMUB_WAIT_TYPE_WAIT); 1863 } 1864 } 1865