1 /* 2 * Copyright 2019 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: AMD 23 * 24 */ 25 26 #include "dm_services.h" 27 #include "dc.h" 28 #include "dc_dmub_srv.h" 29 #include "../dmub/dmub_srv.h" 30 #include "dm_helpers.h" 31 #include "dc_hw_types.h" 32 #include "core_types.h" 33 #include "../basics/conversion.h" 34 #include "cursor_reg_cache.h" 35 #include "resource.h" 36 #include "clk_mgr.h" 37 #include "dc_state_priv.h" 38 #include "dc_plane_priv.h" 39 40 #define CTX dc_dmub_srv->ctx 41 #define DC_LOGGER CTX->logger 42 43 static void dc_dmub_srv_construct(struct dc_dmub_srv *dc_srv, struct dc *dc, 44 struct dmub_srv *dmub) 45 { 46 dc_srv->dmub = dmub; 47 dc_srv->ctx = dc->ctx; 48 } 49 50 struct dc_dmub_srv *dc_dmub_srv_create(struct dc *dc, struct dmub_srv *dmub) 51 { 52 struct dc_dmub_srv *dc_srv = 53 kzalloc(sizeof(struct dc_dmub_srv), GFP_KERNEL); 54 55 if (dc_srv == NULL) { 56 BREAK_TO_DEBUGGER(); 57 return NULL; 58 } 59 60 dc_dmub_srv_construct(dc_srv, dc, dmub); 61 62 return dc_srv; 63 } 64 65 void dc_dmub_srv_destroy(struct dc_dmub_srv **dmub_srv) 66 { 67 if (*dmub_srv) { 68 kfree(*dmub_srv); 69 *dmub_srv = NULL; 70 } 71 } 72 73 bool dc_dmub_srv_wait_for_pending(struct dc_dmub_srv *dc_dmub_srv) 74 { 75 struct dmub_srv *dmub; 76 struct dc_context *dc_ctx; 77 enum dmub_status status; 78 79 if (!dc_dmub_srv || !dc_dmub_srv->dmub) 80 return false; 81 82 dc_ctx = dc_dmub_srv->ctx; 83 dmub = dc_dmub_srv->dmub; 84 85 do { 86 status = dmub_srv_wait_for_pending(dmub, 100000); 87 } while (dc_dmub_srv->ctx->dc->debug.disable_timeout && status != DMUB_STATUS_OK); 88 89 if (status != DMUB_STATUS_OK) { 90 DC_ERROR("Error waiting for DMUB idle: status=%d\n", status); 91 dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); 92 } 93 94 return status == DMUB_STATUS_OK; 95 } 96 97 void dc_dmub_srv_clear_inbox0_ack(struct dc_dmub_srv *dc_dmub_srv) 98 { 99 struct dmub_srv *dmub = dc_dmub_srv->dmub; 100 struct dc_context *dc_ctx = dc_dmub_srv->ctx; 101 enum dmub_status status = DMUB_STATUS_OK; 102 103 status = dmub_srv_clear_inbox0_ack(dmub); 104 if (status != DMUB_STATUS_OK) { 105 DC_ERROR("Error clearing INBOX0 ack: status=%d\n", status); 106 dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); 107 } 108 } 109 110 void dc_dmub_srv_wait_for_inbox0_ack(struct dc_dmub_srv *dc_dmub_srv) 111 { 112 struct dmub_srv *dmub = dc_dmub_srv->dmub; 113 struct dc_context *dc_ctx = dc_dmub_srv->ctx; 114 enum dmub_status status = DMUB_STATUS_OK; 115 116 status = dmub_srv_wait_for_inbox0_ack(dmub, 100000); 117 if (status != DMUB_STATUS_OK) { 118 DC_ERROR("Error waiting for INBOX0 HW Lock Ack\n"); 119 dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); 120 } 121 } 122 123 void dc_dmub_srv_send_inbox0_cmd(struct dc_dmub_srv *dc_dmub_srv, 124 union dmub_inbox0_data_register data) 125 { 126 struct dmub_srv *dmub = dc_dmub_srv->dmub; 127 struct dc_context *dc_ctx = dc_dmub_srv->ctx; 128 enum dmub_status status = DMUB_STATUS_OK; 129 130 status = dmub_srv_send_inbox0_cmd(dmub, data); 131 if (status != DMUB_STATUS_OK) { 132 DC_ERROR("Error sending INBOX0 cmd\n"); 133 dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); 134 } 135 } 136 137 static bool dc_dmub_srv_reg_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv, 138 unsigned int count, 139 union dmub_rb_cmd *cmd_list) 140 { 141 struct dc_context *dc_ctx; 142 struct dmub_srv *dmub; 143 enum dmub_status status = DMUB_STATUS_OK; 144 int i; 145 146 if (!dc_dmub_srv || !dc_dmub_srv->dmub) 147 return false; 148 149 dc_ctx = dc_dmub_srv->ctx; 150 dmub = dc_dmub_srv->dmub; 151 152 for (i = 0 ; i < count; i++) { 153 /* confirm no messages pending */ 154 do { 155 status = dmub_srv_wait_for_idle(dmub, 100000); 156 } while (dc_dmub_srv->ctx->dc->debug.disable_timeout && status != DMUB_STATUS_OK); 157 158 /* queue command */ 159 if (status == DMUB_STATUS_OK) 160 status = dmub_srv_reg_cmd_execute(dmub, &cmd_list[i]); 161 162 /* check for errors */ 163 if (status != DMUB_STATUS_OK) { 164 break; 165 } 166 } 167 168 if (status != DMUB_STATUS_OK) { 169 if (status != DMUB_STATUS_POWER_STATE_D3) { 170 DC_ERROR("Error starting DMUB execution: status=%d\n", status); 171 dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); 172 } 173 return false; 174 } 175 176 return true; 177 } 178 179 static bool dc_dmub_srv_fb_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv, 180 unsigned int count, 181 union dmub_rb_cmd *cmd_list) 182 { 183 struct dc_context *dc_ctx; 184 struct dmub_srv *dmub; 185 enum dmub_status status; 186 int i; 187 188 if (!dc_dmub_srv || !dc_dmub_srv->dmub) 189 return false; 190 191 dc_ctx = dc_dmub_srv->ctx; 192 dmub = dc_dmub_srv->dmub; 193 194 for (i = 0 ; i < count; i++) { 195 // Queue command 196 if (!cmd_list[i].cmd_common.header.multi_cmd_pending || 197 dmub_rb_num_free(&dmub->inbox1.rb) >= count - i) { 198 status = dmub_srv_fb_cmd_queue(dmub, &cmd_list[i]); 199 } else { 200 status = DMUB_STATUS_QUEUE_FULL; 201 } 202 203 if (status == DMUB_STATUS_QUEUE_FULL) { 204 /* Execute and wait for queue to become empty again. */ 205 status = dmub_srv_fb_cmd_execute(dmub); 206 if (status == DMUB_STATUS_POWER_STATE_D3) 207 return false; 208 209 do { 210 status = dmub_srv_wait_for_idle(dmub, 100000); 211 } while (dc_dmub_srv->ctx->dc->debug.disable_timeout && status != DMUB_STATUS_OK); 212 213 /* Requeue the command. */ 214 status = dmub_srv_fb_cmd_queue(dmub, &cmd_list[i]); 215 } 216 217 if (status != DMUB_STATUS_OK) { 218 if (status != DMUB_STATUS_POWER_STATE_D3) { 219 DC_ERROR("Error queueing DMUB command: status=%d\n", status); 220 dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); 221 } 222 return false; 223 } 224 } 225 226 status = dmub_srv_fb_cmd_execute(dmub); 227 if (status != DMUB_STATUS_OK) { 228 if (status != DMUB_STATUS_POWER_STATE_D3) { 229 DC_ERROR("Error starting DMUB execution: status=%d\n", status); 230 dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); 231 } 232 return false; 233 } 234 235 return true; 236 } 237 238 bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv, 239 unsigned int count, 240 union dmub_rb_cmd *cmd_list) 241 { 242 bool res = false; 243 244 if (dc_dmub_srv && dc_dmub_srv->dmub) { 245 if (dc_dmub_srv->dmub->inbox_type == DMUB_CMD_INTERFACE_REG) { 246 res = dc_dmub_srv_reg_cmd_list_queue_execute(dc_dmub_srv, count, cmd_list); 247 } else { 248 res = dc_dmub_srv_fb_cmd_list_queue_execute(dc_dmub_srv, count, cmd_list); 249 } 250 } 251 252 return res; 253 } 254 255 bool dc_dmub_srv_wait_for_idle(struct dc_dmub_srv *dc_dmub_srv, 256 enum dm_dmub_wait_type wait_type, 257 union dmub_rb_cmd *cmd_list) 258 { 259 struct dmub_srv *dmub; 260 enum dmub_status status; 261 262 if (!dc_dmub_srv || !dc_dmub_srv->dmub) 263 return false; 264 265 dmub = dc_dmub_srv->dmub; 266 267 // Wait for DMUB to process command 268 if (wait_type != DM_DMUB_WAIT_TYPE_NO_WAIT) { 269 do { 270 status = dmub_srv_wait_for_idle(dmub, 100000); 271 } while (dc_dmub_srv->ctx->dc->debug.disable_timeout && status != DMUB_STATUS_OK); 272 273 if (status != DMUB_STATUS_OK) { 274 DC_LOG_DEBUG("No reply for DMUB command: status=%d\n", status); 275 if (!dmub->debug.timeout_info.timeout_occured) { 276 dmub->debug.timeout_info.timeout_occured = true; 277 if (cmd_list) 278 dmub->debug.timeout_info.timeout_cmd = *cmd_list; 279 dmub->debug.timeout_info.timestamp = dm_get_timestamp(dc_dmub_srv->ctx); 280 } 281 dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); 282 return false; 283 } 284 285 // Copy data back from ring buffer into command 286 if (wait_type == DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY && cmd_list) { 287 dmub_srv_cmd_get_response(dc_dmub_srv->dmub, cmd_list); 288 } 289 } 290 291 return true; 292 } 293 294 bool dc_dmub_srv_cmd_run(struct dc_dmub_srv *dc_dmub_srv, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type) 295 { 296 return dc_dmub_srv_cmd_run_list(dc_dmub_srv, 1, cmd, wait_type); 297 } 298 299 bool dc_dmub_srv_cmd_run_list(struct dc_dmub_srv *dc_dmub_srv, unsigned int count, union dmub_rb_cmd *cmd_list, enum dm_dmub_wait_type wait_type) 300 { 301 if (!dc_dmub_srv_cmd_list_queue_execute(dc_dmub_srv, count, cmd_list)) 302 return false; 303 304 return dc_dmub_srv_wait_for_idle(dc_dmub_srv, wait_type, cmd_list); 305 } 306 307 bool dc_dmub_srv_optimized_init_done(struct dc_dmub_srv *dc_dmub_srv) 308 { 309 struct dmub_srv *dmub; 310 struct dc_context *dc_ctx; 311 union dmub_fw_boot_status boot_status; 312 enum dmub_status status; 313 314 if (!dc_dmub_srv || !dc_dmub_srv->dmub) 315 return false; 316 317 dmub = dc_dmub_srv->dmub; 318 dc_ctx = dc_dmub_srv->ctx; 319 320 status = dmub_srv_get_fw_boot_status(dmub, &boot_status); 321 if (status != DMUB_STATUS_OK) { 322 DC_ERROR("Error querying DMUB boot status: error=%d\n", status); 323 return false; 324 } 325 326 return boot_status.bits.optimized_init_done; 327 } 328 329 bool dc_dmub_srv_notify_stream_mask(struct dc_dmub_srv *dc_dmub_srv, 330 unsigned int stream_mask) 331 { 332 if (!dc_dmub_srv || !dc_dmub_srv->dmub) 333 return false; 334 335 return dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__IDLE_OPT_NOTIFY_STREAM_MASK, 336 stream_mask, NULL, DM_DMUB_WAIT_TYPE_WAIT); 337 } 338 339 bool dc_dmub_srv_is_restore_required(struct dc_dmub_srv *dc_dmub_srv) 340 { 341 struct dmub_srv *dmub; 342 struct dc_context *dc_ctx; 343 union dmub_fw_boot_status boot_status; 344 enum dmub_status status; 345 346 if (!dc_dmub_srv || !dc_dmub_srv->dmub) 347 return false; 348 349 dmub = dc_dmub_srv->dmub; 350 dc_ctx = dc_dmub_srv->ctx; 351 352 status = dmub_srv_get_fw_boot_status(dmub, &boot_status); 353 if (status != DMUB_STATUS_OK) { 354 DC_ERROR("Error querying DMUB boot status: error=%d\n", status); 355 return false; 356 } 357 358 return boot_status.bits.restore_required; 359 } 360 361 bool dc_dmub_srv_get_dmub_outbox0_msg(const struct dc *dc, struct dmcub_trace_buf_entry *entry) 362 { 363 struct dmub_srv *dmub = dc->ctx->dmub_srv->dmub; 364 return dmub_srv_get_outbox0_msg(dmub, entry); 365 } 366 367 void dc_dmub_trace_event_control(struct dc *dc, bool enable) 368 { 369 dm_helpers_dmub_outbox_interrupt_control(dc->ctx, enable); 370 } 371 372 void dc_dmub_srv_drr_update_cmd(struct dc *dc, uint32_t tg_inst, uint32_t vtotal_min, uint32_t vtotal_max) 373 { 374 union dmub_rb_cmd cmd = { 0 }; 375 376 cmd.drr_update.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH; 377 cmd.drr_update.header.sub_type = DMUB_CMD__FAMS_DRR_UPDATE; 378 cmd.drr_update.dmub_optc_state_req.v_total_max = vtotal_max; 379 cmd.drr_update.dmub_optc_state_req.v_total_min = vtotal_min; 380 cmd.drr_update.dmub_optc_state_req.tg_inst = tg_inst; 381 382 cmd.drr_update.header.payload_bytes = sizeof(cmd.drr_update) - sizeof(cmd.drr_update.header); 383 384 // Send the command to the DMCUB. 385 dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); 386 } 387 388 void dc_dmub_srv_set_drr_manual_trigger_cmd(struct dc *dc, uint32_t tg_inst) 389 { 390 union dmub_rb_cmd cmd = { 0 }; 391 392 cmd.drr_update.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH; 393 cmd.drr_update.header.sub_type = DMUB_CMD__FAMS_SET_MANUAL_TRIGGER; 394 cmd.drr_update.dmub_optc_state_req.tg_inst = tg_inst; 395 396 cmd.drr_update.header.payload_bytes = sizeof(cmd.drr_update) - sizeof(cmd.drr_update.header); 397 398 // Send the command to the DMCUB. 399 dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); 400 } 401 402 static uint8_t dc_dmub_srv_get_pipes_for_stream(struct dc *dc, struct dc_stream_state *stream) 403 { 404 uint8_t pipes = 0; 405 int i = 0; 406 407 for (i = 0; i < MAX_PIPES; i++) { 408 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 409 410 if (pipe->stream == stream && pipe->stream_res.tg) 411 pipes = i; 412 } 413 return pipes; 414 } 415 416 static void dc_dmub_srv_populate_fams_pipe_info(struct dc *dc, struct dc_state *context, 417 struct pipe_ctx *head_pipe, 418 struct dmub_cmd_fw_assisted_mclk_switch_pipe_data *fams_pipe_data) 419 { 420 int j; 421 int pipe_idx = 0; 422 423 fams_pipe_data->pipe_index[pipe_idx++] = head_pipe->plane_res.hubp->inst; 424 for (j = 0; j < dc->res_pool->pipe_count; j++) { 425 struct pipe_ctx *split_pipe = &context->res_ctx.pipe_ctx[j]; 426 427 if (split_pipe->stream == head_pipe->stream && (split_pipe->top_pipe || split_pipe->prev_odm_pipe)) { 428 fams_pipe_data->pipe_index[pipe_idx++] = split_pipe->plane_res.hubp->inst; 429 } 430 } 431 fams_pipe_data->pipe_count = pipe_idx; 432 } 433 434 bool dc_dmub_srv_p_state_delegate(struct dc *dc, bool should_manage_pstate, struct dc_state *context) 435 { 436 union dmub_rb_cmd cmd = { 0 }; 437 struct dmub_cmd_fw_assisted_mclk_switch_config *config_data = &cmd.fw_assisted_mclk_switch.config_data; 438 int i = 0, k = 0; 439 int ramp_up_num_steps = 1; // TODO: Ramp is currently disabled. Reenable it. 440 uint8_t visual_confirm_enabled; 441 int pipe_idx = 0; 442 struct dc_stream_status *stream_status = NULL; 443 444 if (dc == NULL) 445 return false; 446 447 visual_confirm_enabled = dc->debug.visual_confirm == VISUAL_CONFIRM_FAMS; 448 449 // Format command. 450 cmd.fw_assisted_mclk_switch.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH; 451 cmd.fw_assisted_mclk_switch.header.sub_type = DMUB_CMD__FAMS_SETUP_FW_CTRL; 452 cmd.fw_assisted_mclk_switch.config_data.fams_enabled = should_manage_pstate; 453 cmd.fw_assisted_mclk_switch.config_data.visual_confirm_enabled = visual_confirm_enabled; 454 455 if (should_manage_pstate) { 456 for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { 457 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 458 459 if (!pipe->stream) 460 continue; 461 462 /* If FAMS is being used to support P-State and there is a stream 463 * that does not use FAMS, we are in an FPO + VActive scenario. 464 * Assign vactive stretch margin in this case. 465 */ 466 stream_status = dc_state_get_stream_status(context, pipe->stream); 467 if (stream_status && !stream_status->fpo_in_use) { 468 cmd.fw_assisted_mclk_switch.config_data.vactive_stretch_margin_us = dc->debug.fpo_vactive_margin_us; 469 break; 470 } 471 pipe_idx++; 472 } 473 } 474 475 for (i = 0, k = 0; context && i < dc->res_pool->pipe_count; i++) { 476 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 477 478 if (!resource_is_pipe_type(pipe, OTG_MASTER)) 479 continue; 480 481 stream_status = dc_state_get_stream_status(context, pipe->stream); 482 if (stream_status && stream_status->fpo_in_use) { 483 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 484 uint8_t min_refresh_in_hz = (pipe->stream->timing.min_refresh_in_uhz + 999999) / 1000000; 485 486 config_data->pipe_data[k].pix_clk_100hz = pipe->stream->timing.pix_clk_100hz; 487 config_data->pipe_data[k].min_refresh_in_hz = min_refresh_in_hz; 488 config_data->pipe_data[k].max_ramp_step = ramp_up_num_steps; 489 config_data->pipe_data[k].pipes = dc_dmub_srv_get_pipes_for_stream(dc, pipe->stream); 490 dc_dmub_srv_populate_fams_pipe_info(dc, context, pipe, &config_data->pipe_data[k]); 491 k++; 492 } 493 } 494 cmd.fw_assisted_mclk_switch.header.payload_bytes = 495 sizeof(cmd.fw_assisted_mclk_switch) - sizeof(cmd.fw_assisted_mclk_switch.header); 496 497 // Send the command to the DMCUB. 498 dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); 499 500 return true; 501 } 502 503 void dc_dmub_srv_query_caps_cmd(struct dc_dmub_srv *dc_dmub_srv) 504 { 505 union dmub_rb_cmd cmd = { 0 }; 506 507 if (dc_dmub_srv->ctx->dc->debug.dmcub_emulation) 508 return; 509 510 memset(&cmd, 0, sizeof(cmd)); 511 512 /* Prepare fw command */ 513 cmd.query_feature_caps.header.type = DMUB_CMD__QUERY_FEATURE_CAPS; 514 cmd.query_feature_caps.header.sub_type = 0; 515 cmd.query_feature_caps.header.ret_status = 1; 516 cmd.query_feature_caps.header.payload_bytes = sizeof(struct dmub_cmd_query_feature_caps_data); 517 518 /* If command was processed, copy feature caps to dmub srv */ 519 if (dc_wake_and_execute_dmub_cmd(dc_dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) && 520 cmd.query_feature_caps.header.ret_status == 0) { 521 memcpy(&dc_dmub_srv->dmub->feature_caps, 522 &cmd.query_feature_caps.query_feature_caps_data, 523 sizeof(struct dmub_feature_caps)); 524 } 525 } 526 527 void dc_dmub_srv_get_visual_confirm_color_cmd(struct dc *dc, struct pipe_ctx *pipe_ctx) 528 { 529 union dmub_rb_cmd cmd = { 0 }; 530 unsigned int panel_inst = 0; 531 532 if (!dc_get_edp_link_panel_inst(dc, pipe_ctx->stream->link, &panel_inst) && 533 dc->debug.visual_confirm == VISUAL_CONFIRM_DISABLE) 534 return; 535 536 memset(&cmd, 0, sizeof(cmd)); 537 538 // Prepare fw command 539 cmd.visual_confirm_color.header.type = DMUB_CMD__GET_VISUAL_CONFIRM_COLOR; 540 cmd.visual_confirm_color.header.sub_type = 0; 541 cmd.visual_confirm_color.header.ret_status = 1; 542 cmd.visual_confirm_color.header.payload_bytes = sizeof(struct dmub_cmd_visual_confirm_color_data); 543 cmd.visual_confirm_color.visual_confirm_color_data.visual_confirm_color.panel_inst = panel_inst; 544 545 // If command was processed, copy feature caps to dmub srv 546 if (dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) && 547 cmd.visual_confirm_color.header.ret_status == 0) { 548 memcpy(&dc->ctx->dmub_srv->dmub->visual_confirm_color, 549 &cmd.visual_confirm_color.visual_confirm_color_data, 550 sizeof(struct dmub_visual_confirm_color)); 551 } 552 } 553 554 /** 555 * populate_subvp_cmd_drr_info - Helper to populate DRR pipe info for the DMCUB subvp command 556 * 557 * @dc: [in] pointer to dc object 558 * @subvp_pipe: [in] pipe_ctx for the SubVP pipe 559 * @vblank_pipe: [in] pipe_ctx for the DRR pipe 560 * @pipe_data: [in] Pipe data which stores the VBLANK/DRR info 561 * @context: [in] DC state for access to phantom stream 562 * 563 * Populate the DMCUB SubVP command with DRR pipe info. All the information 564 * required for calculating the SubVP + DRR microschedule is populated here. 565 * 566 * High level algorithm: 567 * 1. Get timing for SubVP pipe, phantom pipe, and DRR pipe 568 * 2. Calculate the min and max vtotal which supports SubVP + DRR microschedule 569 * 3. Populate the drr_info with the min and max supported vtotal values 570 */ 571 static void populate_subvp_cmd_drr_info(struct dc *dc, 572 struct dc_state *context, 573 struct pipe_ctx *subvp_pipe, 574 struct pipe_ctx *vblank_pipe, 575 struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 *pipe_data) 576 { 577 struct dc_stream_state *phantom_stream = dc_state_get_paired_subvp_stream(context, subvp_pipe->stream); 578 struct dc_crtc_timing *main_timing = &subvp_pipe->stream->timing; 579 struct dc_crtc_timing *phantom_timing; 580 struct dc_crtc_timing *drr_timing = &vblank_pipe->stream->timing; 581 uint16_t drr_frame_us = 0; 582 uint16_t min_drr_supported_us = 0; 583 uint16_t max_drr_supported_us = 0; 584 uint16_t max_drr_vblank_us = 0; 585 uint16_t max_drr_mallregion_us = 0; 586 uint16_t mall_region_us = 0; 587 uint16_t prefetch_us = 0; 588 uint16_t subvp_active_us = 0; 589 uint16_t drr_active_us = 0; 590 uint16_t min_vtotal_supported = 0; 591 uint16_t max_vtotal_supported = 0; 592 593 if (!phantom_stream) 594 return; 595 596 phantom_timing = &phantom_stream->timing; 597 598 pipe_data->pipe_config.vblank_data.drr_info.drr_in_use = true; 599 pipe_data->pipe_config.vblank_data.drr_info.use_ramping = false; // for now don't use ramping 600 pipe_data->pipe_config.vblank_data.drr_info.drr_window_size_ms = 4; // hardcode 4ms DRR window for now 601 602 drr_frame_us = div64_u64(((uint64_t)drr_timing->v_total * drr_timing->h_total * 1000000), 603 (((uint64_t)drr_timing->pix_clk_100hz * 100))); 604 // P-State allow width and FW delays already included phantom_timing->v_addressable 605 mall_region_us = div64_u64(((uint64_t)phantom_timing->v_addressable * phantom_timing->h_total * 1000000), 606 (((uint64_t)phantom_timing->pix_clk_100hz * 100))); 607 min_drr_supported_us = drr_frame_us + mall_region_us + SUBVP_DRR_MARGIN_US; 608 min_vtotal_supported = div64_u64(((uint64_t)drr_timing->pix_clk_100hz * 100 * min_drr_supported_us), 609 (((uint64_t)drr_timing->h_total * 1000000))); 610 611 prefetch_us = div64_u64(((uint64_t)(phantom_timing->v_total - phantom_timing->v_front_porch) * phantom_timing->h_total * 1000000), 612 (((uint64_t)phantom_timing->pix_clk_100hz * 100) + dc->caps.subvp_prefetch_end_to_mall_start_us)); 613 subvp_active_us = div64_u64(((uint64_t)main_timing->v_addressable * main_timing->h_total * 1000000), 614 (((uint64_t)main_timing->pix_clk_100hz * 100))); 615 drr_active_us = div64_u64(((uint64_t)drr_timing->v_addressable * drr_timing->h_total * 1000000), 616 (((uint64_t)drr_timing->pix_clk_100hz * 100))); 617 max_drr_vblank_us = div64_u64((subvp_active_us - prefetch_us - 618 dc->caps.subvp_fw_processing_delay_us - drr_active_us), 2) + drr_active_us; 619 max_drr_mallregion_us = subvp_active_us - prefetch_us - mall_region_us - dc->caps.subvp_fw_processing_delay_us; 620 max_drr_supported_us = max_drr_vblank_us > max_drr_mallregion_us ? max_drr_vblank_us : max_drr_mallregion_us; 621 max_vtotal_supported = div64_u64(((uint64_t)drr_timing->pix_clk_100hz * 100 * max_drr_supported_us), 622 (((uint64_t)drr_timing->h_total * 1000000))); 623 624 /* When calculating the max vtotal supported for SubVP + DRR cases, add 625 * margin due to possible rounding errors (being off by 1 line in the 626 * FW calculation can incorrectly push the P-State switch to wait 1 frame 627 * longer). 628 */ 629 max_vtotal_supported = max_vtotal_supported - dc->caps.subvp_drr_max_vblank_margin_us; 630 631 pipe_data->pipe_config.vblank_data.drr_info.min_vtotal_supported = min_vtotal_supported; 632 pipe_data->pipe_config.vblank_data.drr_info.max_vtotal_supported = max_vtotal_supported; 633 pipe_data->pipe_config.vblank_data.drr_info.drr_vblank_start_margin = dc->caps.subvp_drr_vblank_start_margin_us; 634 } 635 636 /** 637 * populate_subvp_cmd_vblank_pipe_info - Helper to populate VBLANK pipe info for the DMUB subvp command 638 * 639 * @dc: [in] current dc state 640 * @context: [in] new dc state 641 * @cmd: [in] DMUB cmd to be populated with SubVP info 642 * @vblank_pipe: [in] pipe_ctx for the VBLANK pipe 643 * @cmd_pipe_index: [in] index for the pipe array in DMCUB SubVP cmd 644 * 645 * Populate the DMCUB SubVP command with VBLANK pipe info. All the information 646 * required to calculate the microschedule for SubVP + VBLANK case is stored in 647 * the pipe_data (subvp_data and vblank_data). Also check if the VBLANK pipe 648 * is a DRR display -- if it is make a call to populate drr_info. 649 */ 650 static void populate_subvp_cmd_vblank_pipe_info(struct dc *dc, 651 struct dc_state *context, 652 union dmub_rb_cmd *cmd, 653 struct pipe_ctx *vblank_pipe, 654 uint8_t cmd_pipe_index) 655 { 656 uint32_t i; 657 struct pipe_ctx *pipe = NULL; 658 struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 *pipe_data = 659 &cmd->fw_assisted_mclk_switch_v2.config_data.pipe_data[cmd_pipe_index]; 660 661 // Find the SubVP pipe 662 for (i = 0; i < dc->res_pool->pipe_count; i++) { 663 pipe = &context->res_ctx.pipe_ctx[i]; 664 665 // We check for master pipe, but it shouldn't matter since we only need 666 // the pipe for timing info (stream should be same for any pipe splits) 667 if (!resource_is_pipe_type(pipe, OTG_MASTER) || 668 !resource_is_pipe_type(pipe, DPP_PIPE)) 669 continue; 670 671 // Find the SubVP pipe 672 if (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN) 673 break; 674 } 675 676 pipe_data->mode = VBLANK; 677 pipe_data->pipe_config.vblank_data.pix_clk_100hz = vblank_pipe->stream->timing.pix_clk_100hz; 678 pipe_data->pipe_config.vblank_data.vblank_start = vblank_pipe->stream->timing.v_total - 679 vblank_pipe->stream->timing.v_front_porch; 680 pipe_data->pipe_config.vblank_data.vtotal = vblank_pipe->stream->timing.v_total; 681 pipe_data->pipe_config.vblank_data.htotal = vblank_pipe->stream->timing.h_total; 682 pipe_data->pipe_config.vblank_data.vblank_pipe_index = vblank_pipe->pipe_idx; 683 pipe_data->pipe_config.vblank_data.vstartup_start = vblank_pipe->pipe_dlg_param.vstartup_start; 684 pipe_data->pipe_config.vblank_data.vblank_end = 685 vblank_pipe->stream->timing.v_total - vblank_pipe->stream->timing.v_front_porch - vblank_pipe->stream->timing.v_addressable; 686 687 if (vblank_pipe->stream->ignore_msa_timing_param && 688 (vblank_pipe->stream->allow_freesync || vblank_pipe->stream->vrr_active_variable || vblank_pipe->stream->vrr_active_fixed)) 689 populate_subvp_cmd_drr_info(dc, context, pipe, vblank_pipe, pipe_data); 690 } 691 692 /** 693 * update_subvp_prefetch_end_to_mall_start - Helper for SubVP + SubVP case 694 * 695 * @dc: [in] current dc state 696 * @context: [in] new dc state 697 * @cmd: [in] DMUB cmd to be populated with SubVP info 698 * @subvp_pipes: [in] Array of SubVP pipes (should always be length 2) 699 * 700 * For SubVP + SubVP, we use a single vertical interrupt to start the 701 * microschedule for both SubVP pipes. In order for this to work correctly, the 702 * MALL REGION of both SubVP pipes must start at the same time. This function 703 * lengthens the prefetch end to mall start delay of the SubVP pipe that has 704 * the shorter prefetch so that both MALL REGION's will start at the same time. 705 */ 706 static void update_subvp_prefetch_end_to_mall_start(struct dc *dc, 707 struct dc_state *context, 708 union dmub_rb_cmd *cmd, 709 struct pipe_ctx *subvp_pipes[]) 710 { 711 uint32_t subvp0_prefetch_us = 0; 712 uint32_t subvp1_prefetch_us = 0; 713 uint32_t prefetch_delta_us = 0; 714 struct dc_stream_state *phantom_stream0 = NULL; 715 struct dc_stream_state *phantom_stream1 = NULL; 716 struct dc_crtc_timing *phantom_timing0 = NULL; 717 struct dc_crtc_timing *phantom_timing1 = NULL; 718 struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 *pipe_data = NULL; 719 720 phantom_stream0 = dc_state_get_paired_subvp_stream(context, subvp_pipes[0]->stream); 721 if (!phantom_stream0) 722 return; 723 724 phantom_stream1 = dc_state_get_paired_subvp_stream(context, subvp_pipes[1]->stream); 725 if (!phantom_stream1) 726 return; 727 728 phantom_timing0 = &phantom_stream0->timing; 729 phantom_timing1 = &phantom_stream1->timing; 730 731 subvp0_prefetch_us = div64_u64(((uint64_t)(phantom_timing0->v_total - phantom_timing0->v_front_porch) * 732 (uint64_t)phantom_timing0->h_total * 1000000), 733 (((uint64_t)phantom_timing0->pix_clk_100hz * 100) + dc->caps.subvp_prefetch_end_to_mall_start_us)); 734 subvp1_prefetch_us = div64_u64(((uint64_t)(phantom_timing1->v_total - phantom_timing1->v_front_porch) * 735 (uint64_t)phantom_timing1->h_total * 1000000), 736 (((uint64_t)phantom_timing1->pix_clk_100hz * 100) + dc->caps.subvp_prefetch_end_to_mall_start_us)); 737 738 // Whichever SubVP PIPE has the smaller prefetch (including the prefetch end to mall start time) 739 // should increase it's prefetch time to match the other 740 if (subvp0_prefetch_us > subvp1_prefetch_us) { 741 pipe_data = &cmd->fw_assisted_mclk_switch_v2.config_data.pipe_data[1]; 742 prefetch_delta_us = subvp0_prefetch_us - subvp1_prefetch_us; 743 pipe_data->pipe_config.subvp_data.prefetch_to_mall_start_lines = 744 div64_u64(((uint64_t)(dc->caps.subvp_prefetch_end_to_mall_start_us + prefetch_delta_us) * 745 ((uint64_t)phantom_timing1->pix_clk_100hz * 100) + ((uint64_t)phantom_timing1->h_total * 1000000 - 1)), 746 ((uint64_t)phantom_timing1->h_total * 1000000)); 747 748 } else if (subvp1_prefetch_us > subvp0_prefetch_us) { 749 pipe_data = &cmd->fw_assisted_mclk_switch_v2.config_data.pipe_data[0]; 750 prefetch_delta_us = subvp1_prefetch_us - subvp0_prefetch_us; 751 pipe_data->pipe_config.subvp_data.prefetch_to_mall_start_lines = 752 div64_u64(((uint64_t)(dc->caps.subvp_prefetch_end_to_mall_start_us + prefetch_delta_us) * 753 ((uint64_t)phantom_timing0->pix_clk_100hz * 100) + ((uint64_t)phantom_timing0->h_total * 1000000 - 1)), 754 ((uint64_t)phantom_timing0->h_total * 1000000)); 755 } 756 } 757 758 /** 759 * populate_subvp_cmd_pipe_info - Helper to populate the SubVP pipe info for the DMUB subvp command 760 * 761 * @dc: [in] current dc state 762 * @context: [in] new dc state 763 * @cmd: [in] DMUB cmd to be populated with SubVP info 764 * @subvp_pipe: [in] pipe_ctx for the SubVP pipe 765 * @cmd_pipe_index: [in] index for the pipe array in DMCUB SubVP cmd 766 * 767 * Populate the DMCUB SubVP command with SubVP pipe info. All the information 768 * required to calculate the microschedule for the SubVP pipe is stored in the 769 * pipe_data of the DMCUB SubVP command. 770 */ 771 static void populate_subvp_cmd_pipe_info(struct dc *dc, 772 struct dc_state *context, 773 union dmub_rb_cmd *cmd, 774 struct pipe_ctx *subvp_pipe, 775 uint8_t cmd_pipe_index) 776 { 777 uint32_t j; 778 struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 *pipe_data = 779 &cmd->fw_assisted_mclk_switch_v2.config_data.pipe_data[cmd_pipe_index]; 780 struct dc_stream_state *phantom_stream = dc_state_get_paired_subvp_stream(context, subvp_pipe->stream); 781 struct dc_crtc_timing *main_timing = &subvp_pipe->stream->timing; 782 struct dc_crtc_timing *phantom_timing; 783 uint32_t out_num_stream, out_den_stream, out_num_plane, out_den_plane, out_num, out_den; 784 785 if (!phantom_stream) 786 return; 787 788 phantom_timing = &phantom_stream->timing; 789 790 pipe_data->mode = SUBVP; 791 pipe_data->pipe_config.subvp_data.pix_clk_100hz = subvp_pipe->stream->timing.pix_clk_100hz; 792 pipe_data->pipe_config.subvp_data.htotal = subvp_pipe->stream->timing.h_total; 793 pipe_data->pipe_config.subvp_data.vtotal = subvp_pipe->stream->timing.v_total; 794 pipe_data->pipe_config.subvp_data.main_vblank_start = 795 main_timing->v_total - main_timing->v_front_porch; 796 pipe_data->pipe_config.subvp_data.main_vblank_end = 797 main_timing->v_total - main_timing->v_front_porch - main_timing->v_addressable; 798 pipe_data->pipe_config.subvp_data.mall_region_lines = phantom_timing->v_addressable; 799 pipe_data->pipe_config.subvp_data.main_pipe_index = subvp_pipe->stream_res.tg->inst; 800 pipe_data->pipe_config.subvp_data.is_drr = subvp_pipe->stream->ignore_msa_timing_param && 801 (subvp_pipe->stream->allow_freesync || subvp_pipe->stream->vrr_active_variable || subvp_pipe->stream->vrr_active_fixed); 802 803 /* Calculate the scaling factor from the src and dst height. 804 * e.g. If 3840x2160 being downscaled to 1920x1080, the scaling factor is 1/2. 805 * Reduce the fraction 1080/2160 = 1/2 for the "scaling factor" 806 * 807 * Make sure to combine stream and plane scaling together. 808 */ 809 reduce_fraction(subvp_pipe->stream->src.height, subvp_pipe->stream->dst.height, 810 &out_num_stream, &out_den_stream); 811 reduce_fraction(subvp_pipe->plane_state->src_rect.height, subvp_pipe->plane_state->dst_rect.height, 812 &out_num_plane, &out_den_plane); 813 reduce_fraction(out_num_stream * out_num_plane, out_den_stream * out_den_plane, &out_num, &out_den); 814 pipe_data->pipe_config.subvp_data.scale_factor_numerator = out_num; 815 pipe_data->pipe_config.subvp_data.scale_factor_denominator = out_den; 816 817 // Prefetch lines is equal to VACTIVE + BP + VSYNC 818 pipe_data->pipe_config.subvp_data.prefetch_lines = 819 phantom_timing->v_total - phantom_timing->v_front_porch; 820 821 // Round up 822 pipe_data->pipe_config.subvp_data.prefetch_to_mall_start_lines = 823 div64_u64(((uint64_t)dc->caps.subvp_prefetch_end_to_mall_start_us * ((uint64_t)phantom_timing->pix_clk_100hz * 100) + 824 ((uint64_t)phantom_timing->h_total * 1000000 - 1)), ((uint64_t)phantom_timing->h_total * 1000000)); 825 pipe_data->pipe_config.subvp_data.processing_delay_lines = 826 div64_u64(((uint64_t)(dc->caps.subvp_fw_processing_delay_us) * ((uint64_t)phantom_timing->pix_clk_100hz * 100) + 827 ((uint64_t)phantom_timing->h_total * 1000000 - 1)), ((uint64_t)phantom_timing->h_total * 1000000)); 828 829 if (subvp_pipe->bottom_pipe) { 830 pipe_data->pipe_config.subvp_data.main_split_pipe_index = subvp_pipe->bottom_pipe->pipe_idx; 831 } else if (subvp_pipe->next_odm_pipe) { 832 pipe_data->pipe_config.subvp_data.main_split_pipe_index = subvp_pipe->next_odm_pipe->pipe_idx; 833 } else { 834 pipe_data->pipe_config.subvp_data.main_split_pipe_index = 0xF; 835 } 836 837 // Find phantom pipe index based on phantom stream 838 for (j = 0; j < dc->res_pool->pipe_count; j++) { 839 struct pipe_ctx *phantom_pipe = &context->res_ctx.pipe_ctx[j]; 840 841 if (resource_is_pipe_type(phantom_pipe, OTG_MASTER) && 842 phantom_pipe->stream == dc_state_get_paired_subvp_stream(context, subvp_pipe->stream)) { 843 pipe_data->pipe_config.subvp_data.phantom_pipe_index = phantom_pipe->stream_res.tg->inst; 844 if (phantom_pipe->bottom_pipe) { 845 pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = phantom_pipe->bottom_pipe->plane_res.hubp->inst; 846 } else if (phantom_pipe->next_odm_pipe) { 847 pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = phantom_pipe->next_odm_pipe->plane_res.hubp->inst; 848 } else { 849 pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = 0xF; 850 } 851 break; 852 } 853 } 854 } 855 856 /** 857 * dc_dmub_setup_subvp_dmub_command - Populate the DMCUB SubVP command 858 * 859 * @dc: [in] current dc state 860 * @context: [in] new dc state 861 * @enable: [in] if true enables the pipes population 862 * 863 * This function loops through each pipe and populates the DMUB SubVP CMD info 864 * based on the pipe (e.g. SubVP, VBLANK). 865 */ 866 void dc_dmub_setup_subvp_dmub_command(struct dc *dc, 867 struct dc_state *context, 868 bool enable) 869 { 870 uint8_t cmd_pipe_index = 0; 871 uint32_t i, pipe_idx; 872 uint8_t subvp_count = 0; 873 union dmub_rb_cmd cmd; 874 struct pipe_ctx *subvp_pipes[2]; 875 uint32_t wm_val_refclk = 0; 876 enum mall_stream_type pipe_mall_type; 877 878 memset(&cmd, 0, sizeof(cmd)); 879 // FW command for SUBVP 880 cmd.fw_assisted_mclk_switch_v2.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH; 881 cmd.fw_assisted_mclk_switch_v2.header.sub_type = DMUB_CMD__HANDLE_SUBVP_CMD; 882 cmd.fw_assisted_mclk_switch_v2.header.payload_bytes = 883 sizeof(cmd.fw_assisted_mclk_switch_v2) - sizeof(cmd.fw_assisted_mclk_switch_v2.header); 884 885 for (i = 0; i < dc->res_pool->pipe_count; i++) { 886 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 887 888 /* For SubVP pipe count, only count the top most (ODM / MPC) pipe 889 */ 890 if (resource_is_pipe_type(pipe, OTG_MASTER) && 891 resource_is_pipe_type(pipe, DPP_PIPE) && 892 dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN) 893 subvp_pipes[subvp_count++] = pipe; 894 } 895 896 if (enable) { 897 // For each pipe that is a "main" SUBVP pipe, fill in pipe data for DMUB SUBVP cmd 898 for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { 899 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 900 pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe); 901 902 if (!pipe->stream) 903 continue; 904 905 /* When populating subvp cmd info, only pass in the top most (ODM / MPC) pipe. 906 * Any ODM or MPC splits being used in SubVP will be handled internally in 907 * populate_subvp_cmd_pipe_info 908 */ 909 if (resource_is_pipe_type(pipe, OTG_MASTER) && 910 resource_is_pipe_type(pipe, DPP_PIPE) && 911 pipe_mall_type == SUBVP_MAIN) { 912 populate_subvp_cmd_pipe_info(dc, context, &cmd, pipe, cmd_pipe_index++); 913 } else if (resource_is_pipe_type(pipe, OTG_MASTER) && 914 resource_is_pipe_type(pipe, DPP_PIPE) && 915 pipe_mall_type == SUBVP_NONE) { 916 // Don't need to check for ActiveDRAMClockChangeMargin < 0, not valid in cases where 917 // we run through DML without calculating "natural" P-state support 918 populate_subvp_cmd_vblank_pipe_info(dc, context, &cmd, pipe, cmd_pipe_index++); 919 920 } 921 pipe_idx++; 922 } 923 if (subvp_count == 2) { 924 update_subvp_prefetch_end_to_mall_start(dc, context, &cmd, subvp_pipes); 925 } 926 cmd.fw_assisted_mclk_switch_v2.config_data.pstate_allow_width_us = dc->caps.subvp_pstate_allow_width_us; 927 cmd.fw_assisted_mclk_switch_v2.config_data.vertical_int_margin_us = dc->caps.subvp_vertical_int_margin_us; 928 929 // Store the original watermark value for this SubVP config so we can lower it when the 930 // MCLK switch starts 931 wm_val_refclk = context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns * 932 (dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000) / 1000; 933 934 cmd.fw_assisted_mclk_switch_v2.config_data.watermark_a_cache = wm_val_refclk < 0xFFFF ? wm_val_refclk : 0xFFFF; 935 } 936 937 dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); 938 } 939 940 bool dc_dmub_srv_get_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv) 941 { 942 if (!dc_dmub_srv || !dc_dmub_srv->dmub) 943 return false; 944 return dmub_srv_get_diagnostic_data(dc_dmub_srv->dmub); 945 } 946 947 void dc_dmub_srv_log_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv) 948 { 949 uint32_t i; 950 951 if (!dc_dmub_srv || !dc_dmub_srv->dmub) { 952 DC_LOG_ERROR("%s: invalid parameters.", __func__); 953 return; 954 } 955 956 DC_LOG_ERROR("%s: DMCUB error - collecting diagnostic data\n", __func__); 957 958 if (!dc_dmub_srv_get_diagnostic_data(dc_dmub_srv)) { 959 DC_LOG_ERROR("%s: dc_dmub_srv_get_diagnostic_data failed.", __func__); 960 return; 961 } 962 963 DC_LOG_DEBUG("DMCUB STATE:"); 964 DC_LOG_DEBUG(" dmcub_version : %08x", dc_dmub_srv->dmub->debug.dmcub_version); 965 DC_LOG_DEBUG(" scratch [0] : %08x", dc_dmub_srv->dmub->debug.scratch[0]); 966 DC_LOG_DEBUG(" scratch [1] : %08x", dc_dmub_srv->dmub->debug.scratch[1]); 967 DC_LOG_DEBUG(" scratch [2] : %08x", dc_dmub_srv->dmub->debug.scratch[2]); 968 DC_LOG_DEBUG(" scratch [3] : %08x", dc_dmub_srv->dmub->debug.scratch[3]); 969 DC_LOG_DEBUG(" scratch [4] : %08x", dc_dmub_srv->dmub->debug.scratch[4]); 970 DC_LOG_DEBUG(" scratch [5] : %08x", dc_dmub_srv->dmub->debug.scratch[5]); 971 DC_LOG_DEBUG(" scratch [6] : %08x", dc_dmub_srv->dmub->debug.scratch[6]); 972 DC_LOG_DEBUG(" scratch [7] : %08x", dc_dmub_srv->dmub->debug.scratch[7]); 973 DC_LOG_DEBUG(" scratch [8] : %08x", dc_dmub_srv->dmub->debug.scratch[8]); 974 DC_LOG_DEBUG(" scratch [9] : %08x", dc_dmub_srv->dmub->debug.scratch[9]); 975 DC_LOG_DEBUG(" scratch [10] : %08x", dc_dmub_srv->dmub->debug.scratch[10]); 976 DC_LOG_DEBUG(" scratch [11] : %08x", dc_dmub_srv->dmub->debug.scratch[11]); 977 DC_LOG_DEBUG(" scratch [12] : %08x", dc_dmub_srv->dmub->debug.scratch[12]); 978 DC_LOG_DEBUG(" scratch [13] : %08x", dc_dmub_srv->dmub->debug.scratch[13]); 979 DC_LOG_DEBUG(" scratch [14] : %08x", dc_dmub_srv->dmub->debug.scratch[14]); 980 DC_LOG_DEBUG(" scratch [15] : %08x", dc_dmub_srv->dmub->debug.scratch[15]); 981 for (i = 0; i < DMUB_PC_SNAPSHOT_COUNT; i++) 982 DC_LOG_DEBUG(" pc[%d] : %08x", i, dc_dmub_srv->dmub->debug.pc[i]); 983 DC_LOG_DEBUG(" unk_fault_addr : %08x", dc_dmub_srv->dmub->debug.undefined_address_fault_addr); 984 DC_LOG_DEBUG(" inst_fault_addr : %08x", dc_dmub_srv->dmub->debug.inst_fetch_fault_addr); 985 DC_LOG_DEBUG(" data_fault_addr : %08x", dc_dmub_srv->dmub->debug.data_write_fault_addr); 986 DC_LOG_DEBUG(" inbox1_rptr : %08x", dc_dmub_srv->dmub->debug.inbox1_rptr); 987 DC_LOG_DEBUG(" inbox1_wptr : %08x", dc_dmub_srv->dmub->debug.inbox1_wptr); 988 DC_LOG_DEBUG(" inbox1_size : %08x", dc_dmub_srv->dmub->debug.inbox1_size); 989 DC_LOG_DEBUG(" inbox0_rptr : %08x", dc_dmub_srv->dmub->debug.inbox0_rptr); 990 DC_LOG_DEBUG(" inbox0_wptr : %08x", dc_dmub_srv->dmub->debug.inbox0_wptr); 991 DC_LOG_DEBUG(" inbox0_size : %08x", dc_dmub_srv->dmub->debug.inbox0_size); 992 DC_LOG_DEBUG(" outbox1_rptr : %08x", dc_dmub_srv->dmub->debug.outbox1_rptr); 993 DC_LOG_DEBUG(" outbox1_wptr : %08x", dc_dmub_srv->dmub->debug.outbox1_wptr); 994 DC_LOG_DEBUG(" outbox1_size : %08x", dc_dmub_srv->dmub->debug.outbox1_size); 995 DC_LOG_DEBUG(" is_enabled : %d", dc_dmub_srv->dmub->debug.is_dmcub_enabled); 996 DC_LOG_DEBUG(" is_soft_reset : %d", dc_dmub_srv->dmub->debug.is_dmcub_soft_reset); 997 DC_LOG_DEBUG(" is_secure_reset : %d", dc_dmub_srv->dmub->debug.is_dmcub_secure_reset); 998 DC_LOG_DEBUG(" is_traceport_en : %d", dc_dmub_srv->dmub->debug.is_traceport_en); 999 DC_LOG_DEBUG(" is_cw0_en : %d", dc_dmub_srv->dmub->debug.is_cw0_enabled); 1000 DC_LOG_DEBUG(" is_cw6_en : %d", dc_dmub_srv->dmub->debug.is_cw6_enabled); 1001 } 1002 1003 static bool dc_dmub_should_update_cursor_data(struct pipe_ctx *pipe_ctx) 1004 { 1005 if (pipe_ctx->plane_state != NULL) { 1006 if (pipe_ctx->plane_state->address.type == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE || 1007 resource_can_pipe_disable_cursor(pipe_ctx)) 1008 return false; 1009 } 1010 1011 if ((pipe_ctx->stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1 || 1012 pipe_ctx->stream->link->psr_settings.psr_version == DC_PSR_VERSION_1) && 1013 pipe_ctx->stream->ctx->dce_version >= DCN_VERSION_3_1) 1014 return true; 1015 1016 if (pipe_ctx->stream->link->replay_settings.config.replay_supported) 1017 return true; 1018 1019 return false; 1020 } 1021 1022 static void dc_build_cursor_update_payload0( 1023 struct pipe_ctx *pipe_ctx, uint8_t p_idx, 1024 struct dmub_cmd_update_cursor_payload0 *payload) 1025 { 1026 struct hubp *hubp = pipe_ctx->plane_res.hubp; 1027 unsigned int panel_inst = 0; 1028 1029 if (!dc_get_edp_link_panel_inst(hubp->ctx->dc, 1030 pipe_ctx->stream->link, &panel_inst)) 1031 return; 1032 1033 /* Payload: Cursor Rect is built from position & attribute 1034 * x & y are obtained from postion 1035 */ 1036 payload->cursor_rect.x = hubp->cur_rect.x; 1037 payload->cursor_rect.y = hubp->cur_rect.y; 1038 /* w & h are obtained from attribute */ 1039 payload->cursor_rect.width = hubp->cur_rect.w; 1040 payload->cursor_rect.height = hubp->cur_rect.h; 1041 1042 payload->enable = hubp->pos.cur_ctl.bits.cur_enable; 1043 payload->pipe_idx = p_idx; 1044 payload->cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1; 1045 payload->panel_inst = panel_inst; 1046 } 1047 1048 static void dc_build_cursor_position_update_payload0( 1049 struct dmub_cmd_update_cursor_payload0 *pl, const uint8_t p_idx, 1050 const struct hubp *hubp, const struct dpp *dpp) 1051 { 1052 /* Hubp */ 1053 pl->position_cfg.pHubp.cur_ctl.raw = hubp->pos.cur_ctl.raw; 1054 pl->position_cfg.pHubp.position.raw = hubp->pos.position.raw; 1055 pl->position_cfg.pHubp.hot_spot.raw = hubp->pos.hot_spot.raw; 1056 pl->position_cfg.pHubp.dst_offset.raw = hubp->pos.dst_offset.raw; 1057 1058 /* dpp */ 1059 pl->position_cfg.pDpp.cur0_ctl.raw = dpp->pos.cur0_ctl.raw; 1060 pl->position_cfg.pipe_idx = p_idx; 1061 } 1062 1063 static void dc_build_cursor_attribute_update_payload1( 1064 struct dmub_cursor_attributes_cfg *pl_A, const uint8_t p_idx, 1065 const struct hubp *hubp, const struct dpp *dpp) 1066 { 1067 /* Hubp */ 1068 pl_A->aHubp.SURFACE_ADDR_HIGH = hubp->att.SURFACE_ADDR_HIGH; 1069 pl_A->aHubp.SURFACE_ADDR = hubp->att.SURFACE_ADDR; 1070 pl_A->aHubp.cur_ctl.raw = hubp->att.cur_ctl.raw; 1071 pl_A->aHubp.size.raw = hubp->att.size.raw; 1072 pl_A->aHubp.settings.raw = hubp->att.settings.raw; 1073 1074 /* dpp */ 1075 pl_A->aDpp.cur0_ctl.raw = dpp->att.cur0_ctl.raw; 1076 } 1077 1078 /** 1079 * dc_send_update_cursor_info_to_dmu - Populate the DMCUB Cursor update info command 1080 * 1081 * @pCtx: [in] pipe context 1082 * @pipe_idx: [in] pipe index 1083 * 1084 * This function would store the cursor related information and pass it into 1085 * dmub 1086 */ 1087 void dc_send_update_cursor_info_to_dmu( 1088 struct pipe_ctx *pCtx, uint8_t pipe_idx) 1089 { 1090 union dmub_rb_cmd cmd[2]; 1091 union dmub_cmd_update_cursor_info_data *update_cursor_info_0 = 1092 &cmd[0].update_cursor_info.update_cursor_info_data; 1093 1094 memset(cmd, 0, sizeof(cmd)); 1095 1096 if (!dc_dmub_should_update_cursor_data(pCtx)) 1097 return; 1098 /* 1099 * Since we use multi_cmd_pending for dmub command, the 2nd command is 1100 * only assigned to store cursor attributes info. 1101 * 1st command can view as 2 parts, 1st is for PSR/Replay data, the other 1102 * is to store cursor position info. 1103 * 1104 * Command heaer type must be the same type if using multi_cmd_pending. 1105 * Besides, while process 2nd command in DMU, the sub type is useless. 1106 * So it's meanless to pass the sub type header with different type. 1107 */ 1108 1109 { 1110 /* Build Payload#0 Header */ 1111 cmd[0].update_cursor_info.header.type = DMUB_CMD__UPDATE_CURSOR_INFO; 1112 cmd[0].update_cursor_info.header.payload_bytes = 1113 sizeof(cmd[0].update_cursor_info.update_cursor_info_data); 1114 cmd[0].update_cursor_info.header.multi_cmd_pending = 1; //To combine multi dmu cmd, 1st cmd 1115 1116 /* Prepare Payload */ 1117 dc_build_cursor_update_payload0(pCtx, pipe_idx, &update_cursor_info_0->payload0); 1118 1119 dc_build_cursor_position_update_payload0(&update_cursor_info_0->payload0, pipe_idx, 1120 pCtx->plane_res.hubp, pCtx->plane_res.dpp); 1121 } 1122 { 1123 /* Build Payload#1 Header */ 1124 cmd[1].update_cursor_info.header.type = DMUB_CMD__UPDATE_CURSOR_INFO; 1125 cmd[1].update_cursor_info.header.payload_bytes = sizeof(struct cursor_attributes_cfg); 1126 cmd[1].update_cursor_info.header.multi_cmd_pending = 0; //Indicate it's the last command. 1127 1128 dc_build_cursor_attribute_update_payload1( 1129 &cmd[1].update_cursor_info.update_cursor_info_data.payload1.attribute_cfg, 1130 pipe_idx, pCtx->plane_res.hubp, pCtx->plane_res.dpp); 1131 1132 /* Combine 2nd cmds update_curosr_info to DMU */ 1133 dc_wake_and_execute_dmub_cmd_list(pCtx->stream->ctx, 2, cmd, DM_DMUB_WAIT_TYPE_WAIT); 1134 } 1135 } 1136 1137 bool dc_dmub_check_min_version(struct dmub_srv *srv) 1138 { 1139 if (!srv->hw_funcs.is_psrsu_supported) 1140 return true; 1141 return srv->hw_funcs.is_psrsu_supported(srv); 1142 } 1143 1144 void dc_dmub_srv_enable_dpia_trace(const struct dc *dc) 1145 { 1146 struct dc_dmub_srv *dc_dmub_srv = dc->ctx->dmub_srv; 1147 1148 if (!dc_dmub_srv || !dc_dmub_srv->dmub) { 1149 DC_LOG_ERROR("%s: invalid parameters.", __func__); 1150 return; 1151 } 1152 1153 if (!dc_wake_and_execute_gpint(dc->ctx, DMUB_GPINT__SET_TRACE_BUFFER_MASK_WORD1, 1154 0x0010, NULL, DM_DMUB_WAIT_TYPE_WAIT)) { 1155 DC_LOG_ERROR("timeout updating trace buffer mask word\n"); 1156 return; 1157 } 1158 1159 if (!dc_wake_and_execute_gpint(dc->ctx, DMUB_GPINT__UPDATE_TRACE_BUFFER_MASK, 1160 0x0000, NULL, DM_DMUB_WAIT_TYPE_WAIT)) { 1161 DC_LOG_ERROR("timeout updating trace buffer mask word\n"); 1162 return; 1163 } 1164 1165 DC_LOG_DEBUG("Enabled DPIA trace\n"); 1166 } 1167 1168 void dc_dmub_srv_subvp_save_surf_addr(const struct dc_dmub_srv *dc_dmub_srv, const struct dc_plane_address *addr, uint8_t subvp_index) 1169 { 1170 dmub_srv_subvp_save_surf_addr(dc_dmub_srv->dmub, addr, subvp_index); 1171 } 1172 1173 bool dc_dmub_srv_is_hw_pwr_up(struct dc_dmub_srv *dc_dmub_srv, bool wait) 1174 { 1175 struct dc_context *dc_ctx; 1176 enum dmub_status status; 1177 1178 if (!dc_dmub_srv || !dc_dmub_srv->dmub) 1179 return true; 1180 1181 if (dc_dmub_srv->ctx->dc->debug.dmcub_emulation) 1182 return true; 1183 1184 dc_ctx = dc_dmub_srv->ctx; 1185 1186 if (wait) { 1187 if (dc_dmub_srv->ctx->dc->debug.disable_timeout) { 1188 do { 1189 status = dmub_srv_wait_for_hw_pwr_up(dc_dmub_srv->dmub, 500000); 1190 } while (status != DMUB_STATUS_OK); 1191 } else { 1192 status = dmub_srv_wait_for_hw_pwr_up(dc_dmub_srv->dmub, 500000); 1193 if (status != DMUB_STATUS_OK) { 1194 DC_ERROR("Error querying DMUB hw power up status: error=%d\n", status); 1195 return false; 1196 } 1197 } 1198 } else 1199 return dmub_srv_is_hw_pwr_up(dc_dmub_srv->dmub); 1200 1201 return true; 1202 } 1203 1204 static int count_active_streams(const struct dc *dc) 1205 { 1206 int i, count = 0; 1207 1208 for (i = 0; i < dc->current_state->stream_count; ++i) { 1209 struct dc_stream_state *stream = dc->current_state->streams[i]; 1210 1211 if (stream && (!stream->dpms_off || dc->config.disable_ips_in_dpms_off)) 1212 count += 1; 1213 } 1214 1215 return count; 1216 } 1217 1218 static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle) 1219 { 1220 volatile const struct dmub_shared_state_ips_fw *ips_fw; 1221 struct dc_dmub_srv *dc_dmub_srv; 1222 union dmub_rb_cmd cmd = {0}; 1223 1224 if (dc->debug.dmcub_emulation) 1225 return; 1226 1227 if (!dc->ctx->dmub_srv || !dc->ctx->dmub_srv->dmub) 1228 return; 1229 1230 dc_dmub_srv = dc->ctx->dmub_srv; 1231 ips_fw = &dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_FW].data.ips_fw; 1232 1233 memset(&cmd, 0, sizeof(cmd)); 1234 cmd.idle_opt_notify_idle.header.type = DMUB_CMD__IDLE_OPT; 1235 cmd.idle_opt_notify_idle.header.sub_type = DMUB_CMD__IDLE_OPT_DCN_NOTIFY_IDLE; 1236 cmd.idle_opt_notify_idle.header.payload_bytes = 1237 sizeof(cmd.idle_opt_notify_idle) - 1238 sizeof(cmd.idle_opt_notify_idle.header); 1239 1240 cmd.idle_opt_notify_idle.cntl_data.driver_idle = allow_idle; 1241 1242 if (dc->work_arounds.skip_psr_ips_crtc_disable) 1243 cmd.idle_opt_notify_idle.cntl_data.skip_otg_disable = true; 1244 1245 if (allow_idle) { 1246 volatile struct dmub_shared_state_ips_driver *ips_driver = 1247 &dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_DRIVER].data.ips_driver; 1248 union dmub_shared_state_ips_driver_signals new_signals; 1249 1250 DC_LOG_IPS( 1251 "%s wait idle (ips1_commit=%u ips2_commit=%u)", 1252 __func__, 1253 ips_fw->signals.bits.ips1_commit, 1254 ips_fw->signals.bits.ips2_commit); 1255 1256 dc_dmub_srv_wait_for_idle(dc->ctx->dmub_srv, DM_DMUB_WAIT_TYPE_WAIT, NULL); 1257 1258 memset(&new_signals, 0, sizeof(new_signals)); 1259 1260 new_signals.bits.allow_idle = 1; /* always set */ 1261 1262 if (dc->config.disable_ips == DMUB_IPS_ENABLE || 1263 dc->config.disable_ips == DMUB_IPS_DISABLE_DYNAMIC) { 1264 new_signals.bits.allow_pg = 1; 1265 new_signals.bits.allow_ips1 = 1; 1266 new_signals.bits.allow_ips2 = 1; 1267 new_signals.bits.allow_z10 = 1; 1268 } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS1) { 1269 new_signals.bits.allow_ips1 = 1; 1270 } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2) { 1271 new_signals.bits.allow_pg = 1; 1272 new_signals.bits.allow_ips1 = 1; 1273 } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2_Z10) { 1274 new_signals.bits.allow_pg = 1; 1275 new_signals.bits.allow_ips1 = 1; 1276 new_signals.bits.allow_ips2 = 1; 1277 } else if (dc->config.disable_ips == DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF) { 1278 /* TODO: Move this logic out to hwseq */ 1279 if (count_active_streams(dc) == 0) { 1280 /* IPS2 - Display off */ 1281 new_signals.bits.allow_pg = 1; 1282 new_signals.bits.allow_ips1 = 1; 1283 new_signals.bits.allow_ips2 = 1; 1284 new_signals.bits.allow_z10 = 1; 1285 } else { 1286 /* RCG only */ 1287 new_signals.bits.allow_pg = 0; 1288 new_signals.bits.allow_ips1 = 1; 1289 new_signals.bits.allow_ips2 = 0; 1290 new_signals.bits.allow_z10 = 0; 1291 } 1292 } 1293 1294 ips_driver->signals = new_signals; 1295 dc_dmub_srv->driver_signals = ips_driver->signals; 1296 } 1297 1298 DC_LOG_IPS( 1299 "%s send allow_idle=%d (ips1_commit=%u ips2_commit=%u)", 1300 __func__, 1301 allow_idle, 1302 ips_fw->signals.bits.ips1_commit, 1303 ips_fw->signals.bits.ips2_commit); 1304 1305 /* NOTE: This does not use the "wake" interface since this is part of the wake path. */ 1306 /* We also do not perform a wait since DMCUB could enter idle after the notification. */ 1307 dm_execute_dmub_cmd(dc->ctx, &cmd, allow_idle ? DM_DMUB_WAIT_TYPE_NO_WAIT : DM_DMUB_WAIT_TYPE_WAIT); 1308 1309 /* Register access should stop at this point. */ 1310 if (allow_idle) 1311 dc_dmub_srv->needs_idle_wake = true; 1312 } 1313 1314 static void dc_dmub_srv_exit_low_power_state(const struct dc *dc) 1315 { 1316 struct dc_dmub_srv *dc_dmub_srv; 1317 uint32_t rcg_exit_count = 0, ips1_exit_count = 0, ips2_exit_count = 0; 1318 1319 if (dc->debug.dmcub_emulation) 1320 return; 1321 1322 if (!dc->ctx->dmub_srv || !dc->ctx->dmub_srv->dmub) 1323 return; 1324 1325 dc_dmub_srv = dc->ctx->dmub_srv; 1326 1327 if (dc->clk_mgr->funcs->exit_low_power_state) { 1328 volatile const struct dmub_shared_state_ips_fw *ips_fw = 1329 &dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_FW].data.ips_fw; 1330 volatile struct dmub_shared_state_ips_driver *ips_driver = 1331 &dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_DRIVER].data.ips_driver; 1332 union dmub_shared_state_ips_driver_signals prev_driver_signals = ips_driver->signals; 1333 1334 rcg_exit_count = ips_fw->rcg_exit_count; 1335 ips1_exit_count = ips_fw->ips1_exit_count; 1336 ips2_exit_count = ips_fw->ips2_exit_count; 1337 1338 ips_driver->signals.all = 0; 1339 dc_dmub_srv->driver_signals = ips_driver->signals; 1340 1341 DC_LOG_IPS( 1342 "%s (allow ips1=%u ips2=%u) (commit ips1=%u ips2=%u) (count rcg=%u ips1=%u ips2=%u)", 1343 __func__, 1344 ips_driver->signals.bits.allow_ips1, 1345 ips_driver->signals.bits.allow_ips2, 1346 ips_fw->signals.bits.ips1_commit, 1347 ips_fw->signals.bits.ips2_commit, 1348 ips_fw->rcg_entry_count, 1349 ips_fw->ips1_entry_count, 1350 ips_fw->ips2_entry_count); 1351 1352 /* Note: register access has technically not resumed for DCN here, but we 1353 * need to be message PMFW through our standard register interface. 1354 */ 1355 dc_dmub_srv->needs_idle_wake = false; 1356 1357 if ((prev_driver_signals.bits.allow_ips2 || prev_driver_signals.all == 0) && 1358 (!dc->debug.optimize_ips_handshake || 1359 ips_fw->signals.bits.ips2_commit || !ips_fw->signals.bits.in_idle)) { 1360 DC_LOG_IPS( 1361 "wait IPS2 eval (ips1_commit=%u ips2_commit=%u)", 1362 ips_fw->signals.bits.ips1_commit, 1363 ips_fw->signals.bits.ips2_commit); 1364 1365 if (!dc->debug.optimize_ips_handshake || !ips_fw->signals.bits.ips2_commit) 1366 udelay(dc->debug.ips2_eval_delay_us); 1367 1368 if (ips_fw->signals.bits.ips2_commit) { 1369 DC_LOG_IPS( 1370 "exit IPS2 #1 (ips1_commit=%u ips2_commit=%u)", 1371 ips_fw->signals.bits.ips1_commit, 1372 ips_fw->signals.bits.ips2_commit); 1373 1374 // Tell PMFW to exit low power state 1375 dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr); 1376 1377 DC_LOG_IPS( 1378 "wait IPS2 entry delay (ips1_commit=%u ips2_commit=%u)", 1379 ips_fw->signals.bits.ips1_commit, 1380 ips_fw->signals.bits.ips2_commit); 1381 1382 // Wait for IPS2 entry upper bound 1383 udelay(dc->debug.ips2_entry_delay_us); 1384 1385 DC_LOG_IPS( 1386 "exit IPS2 #2 (ips1_commit=%u ips2_commit=%u)", 1387 ips_fw->signals.bits.ips1_commit, 1388 ips_fw->signals.bits.ips2_commit); 1389 1390 dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr); 1391 1392 DC_LOG_IPS( 1393 "wait IPS2 commit clear (ips1_commit=%u ips2_commit=%u)", 1394 ips_fw->signals.bits.ips1_commit, 1395 ips_fw->signals.bits.ips2_commit); 1396 1397 while (ips_fw->signals.bits.ips2_commit) 1398 udelay(1); 1399 1400 DC_LOG_IPS( 1401 "wait hw_pwr_up (ips1_commit=%u ips2_commit=%u)", 1402 ips_fw->signals.bits.ips1_commit, 1403 ips_fw->signals.bits.ips2_commit); 1404 1405 if (!dc_dmub_srv_is_hw_pwr_up(dc->ctx->dmub_srv, true)) 1406 ASSERT(0); 1407 1408 DC_LOG_IPS( 1409 "resync inbox1 (ips1_commit=%u ips2_commit=%u)", 1410 ips_fw->signals.bits.ips1_commit, 1411 ips_fw->signals.bits.ips2_commit); 1412 1413 dmub_srv_sync_inboxes(dc->ctx->dmub_srv->dmub); 1414 } 1415 } 1416 1417 dc_dmub_srv_notify_idle(dc, false); 1418 if (prev_driver_signals.bits.allow_ips1 || prev_driver_signals.all == 0) { 1419 DC_LOG_IPS( 1420 "wait for IPS1 commit clear (ips1_commit=%u ips2_commit=%u)", 1421 ips_fw->signals.bits.ips1_commit, 1422 ips_fw->signals.bits.ips2_commit); 1423 1424 while (ips_fw->signals.bits.ips1_commit) 1425 udelay(1); 1426 1427 DC_LOG_IPS( 1428 "wait for IPS1 commit clear done (ips1_commit=%u ips2_commit=%u)", 1429 ips_fw->signals.bits.ips1_commit, 1430 ips_fw->signals.bits.ips2_commit); 1431 } 1432 } 1433 1434 if (!dc_dmub_srv_is_hw_pwr_up(dc->ctx->dmub_srv, true)) 1435 ASSERT(0); 1436 1437 DC_LOG_IPS("%s exit (count rcg=%u ips1=%u ips2=%u)", 1438 __func__, 1439 rcg_exit_count, 1440 ips1_exit_count, 1441 ips2_exit_count); 1442 } 1443 1444 void dc_dmub_srv_set_power_state(struct dc_dmub_srv *dc_dmub_srv, enum dc_acpi_cm_power_state power_state) 1445 { 1446 struct dmub_srv *dmub; 1447 1448 if (!dc_dmub_srv) 1449 return; 1450 1451 dmub = dc_dmub_srv->dmub; 1452 1453 if (power_state == DC_ACPI_CM_POWER_STATE_D0) 1454 dmub_srv_set_power_state(dmub, DMUB_POWER_STATE_D0); 1455 else 1456 dmub_srv_set_power_state(dmub, DMUB_POWER_STATE_D3); 1457 } 1458 1459 void dc_dmub_srv_notify_fw_dc_power_state(struct dc_dmub_srv *dc_dmub_srv, 1460 enum dc_acpi_cm_power_state power_state) 1461 { 1462 union dmub_rb_cmd cmd; 1463 1464 if (!dc_dmub_srv) 1465 return; 1466 1467 memset(&cmd, 0, sizeof(cmd)); 1468 1469 cmd.idle_opt_set_dc_power_state.header.type = DMUB_CMD__IDLE_OPT; 1470 cmd.idle_opt_set_dc_power_state.header.sub_type = DMUB_CMD__IDLE_OPT_SET_DC_POWER_STATE; 1471 cmd.idle_opt_set_dc_power_state.header.payload_bytes = 1472 sizeof(cmd.idle_opt_set_dc_power_state) - sizeof(cmd.idle_opt_set_dc_power_state.header); 1473 1474 if (power_state == DC_ACPI_CM_POWER_STATE_D0) { 1475 cmd.idle_opt_set_dc_power_state.data.power_state = DMUB_IDLE_OPT_DC_POWER_STATE_D0; 1476 } else if (power_state == DC_ACPI_CM_POWER_STATE_D3) { 1477 cmd.idle_opt_set_dc_power_state.data.power_state = DMUB_IDLE_OPT_DC_POWER_STATE_D3; 1478 } else { 1479 cmd.idle_opt_set_dc_power_state.data.power_state = DMUB_IDLE_OPT_DC_POWER_STATE_UNKNOWN; 1480 } 1481 1482 dc_wake_and_execute_dmub_cmd(dc_dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); 1483 } 1484 1485 bool dc_dmub_srv_should_detect(struct dc_dmub_srv *dc_dmub_srv) 1486 { 1487 volatile const struct dmub_shared_state_ips_fw *ips_fw; 1488 bool reallow_idle = false, should_detect = false; 1489 1490 if (!dc_dmub_srv || !dc_dmub_srv->dmub) 1491 return false; 1492 1493 if (dc_dmub_srv->dmub->shared_state && 1494 dc_dmub_srv->dmub->meta_info.feature_bits.bits.shared_state_link_detection) { 1495 ips_fw = &dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_FW].data.ips_fw; 1496 return ips_fw->signals.bits.detection_required; 1497 } 1498 1499 /* Detection may require reading scratch 0 - exit out of idle prior to the read. */ 1500 if (dc_dmub_srv->idle_allowed) { 1501 dc_dmub_srv_apply_idle_power_optimizations(dc_dmub_srv->ctx->dc, false); 1502 reallow_idle = true; 1503 } 1504 1505 should_detect = dmub_srv_should_detect(dc_dmub_srv->dmub); 1506 1507 /* Re-enter idle if we're not about to immediately redetect links. */ 1508 if (!should_detect && reallow_idle && dc_dmub_srv->idle_exit_counter == 0 && 1509 !dc_dmub_srv->ctx->dc->debug.disable_dmub_reallow_idle) 1510 dc_dmub_srv_apply_idle_power_optimizations(dc_dmub_srv->ctx->dc, true); 1511 1512 return should_detect; 1513 } 1514 1515 void dc_dmub_srv_apply_idle_power_optimizations(const struct dc *dc, bool allow_idle) 1516 { 1517 struct dc_dmub_srv *dc_dmub_srv = dc->ctx->dmub_srv; 1518 1519 if (!dc_dmub_srv || !dc_dmub_srv->dmub) 1520 return; 1521 1522 allow_idle &= (!dc->debug.ips_disallow_entry); 1523 1524 if (dc_dmub_srv->idle_allowed == allow_idle) 1525 return; 1526 1527 DC_LOG_IPS("%s state change: old=%d new=%d", __func__, dc_dmub_srv->idle_allowed, allow_idle); 1528 1529 /* 1530 * Entering a low power state requires a driver notification. 1531 * Powering up the hardware requires notifying PMFW and DMCUB. 1532 * Clearing the driver idle allow requires a DMCUB command. 1533 * DMCUB commands requires the DMCUB to be powered up and restored. 1534 */ 1535 1536 if (!allow_idle) { 1537 dc_dmub_srv->idle_exit_counter += 1; 1538 1539 dc_dmub_srv_exit_low_power_state(dc); 1540 /* 1541 * Idle is considered fully exited only after the sequence above 1542 * fully completes. If we have a race of two threads exiting 1543 * at the same time then it's safe to perform the sequence 1544 * twice as long as we're not re-entering. 1545 * 1546 * Infinite command submission is avoided by using the 1547 * dm_execute_dmub_cmd submission instead of the "wake" helpers. 1548 */ 1549 dc_dmub_srv->idle_allowed = false; 1550 1551 dc_dmub_srv->idle_exit_counter -= 1; 1552 if (dc_dmub_srv->idle_exit_counter < 0) { 1553 ASSERT(0); 1554 dc_dmub_srv->idle_exit_counter = 0; 1555 } 1556 } else { 1557 /* Consider idle as notified prior to the actual submission to 1558 * prevent multiple entries. */ 1559 dc_dmub_srv->idle_allowed = true; 1560 1561 dc_dmub_srv_notify_idle(dc, allow_idle); 1562 } 1563 } 1564 1565 bool dc_wake_and_execute_dmub_cmd(const struct dc_context *ctx, union dmub_rb_cmd *cmd, 1566 enum dm_dmub_wait_type wait_type) 1567 { 1568 return dc_wake_and_execute_dmub_cmd_list(ctx, 1, cmd, wait_type); 1569 } 1570 1571 bool dc_wake_and_execute_dmub_cmd_list(const struct dc_context *ctx, unsigned int count, 1572 union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type) 1573 { 1574 struct dc_dmub_srv *dc_dmub_srv = ctx->dmub_srv; 1575 bool result = false, reallow_idle = false; 1576 1577 if (!dc_dmub_srv || !dc_dmub_srv->dmub) 1578 return false; 1579 1580 if (count == 0) 1581 return true; 1582 1583 if (dc_dmub_srv->idle_allowed) { 1584 dc_dmub_srv_apply_idle_power_optimizations(ctx->dc, false); 1585 reallow_idle = true; 1586 } 1587 1588 /* 1589 * These may have different implementations in DM, so ensure 1590 * that we guide it to the expected helper. 1591 */ 1592 if (count > 1) 1593 result = dm_execute_dmub_cmd_list(ctx, count, cmd, wait_type); 1594 else 1595 result = dm_execute_dmub_cmd(ctx, cmd, wait_type); 1596 1597 if (result && reallow_idle && dc_dmub_srv->idle_exit_counter == 0 && 1598 !ctx->dc->debug.disable_dmub_reallow_idle) 1599 dc_dmub_srv_apply_idle_power_optimizations(ctx->dc, true); 1600 1601 return result; 1602 } 1603 1604 static bool dc_dmub_execute_gpint(const struct dc_context *ctx, enum dmub_gpint_command command_code, 1605 uint16_t param, uint32_t *response, enum dm_dmub_wait_type wait_type) 1606 { 1607 struct dc_dmub_srv *dc_dmub_srv = ctx->dmub_srv; 1608 const uint32_t wait_us = wait_type == DM_DMUB_WAIT_TYPE_NO_WAIT ? 0 : 30; 1609 enum dmub_status status; 1610 1611 if (response) 1612 *response = 0; 1613 1614 if (!dc_dmub_srv || !dc_dmub_srv->dmub) 1615 return false; 1616 1617 status = dmub_srv_send_gpint_command(dc_dmub_srv->dmub, command_code, param, wait_us); 1618 if (status != DMUB_STATUS_OK) { 1619 if (status == DMUB_STATUS_TIMEOUT && wait_type == DM_DMUB_WAIT_TYPE_NO_WAIT) 1620 return true; 1621 1622 return false; 1623 } 1624 1625 if (response && wait_type == DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) 1626 dmub_srv_get_gpint_response(dc_dmub_srv->dmub, response); 1627 1628 return true; 1629 } 1630 1631 bool dc_wake_and_execute_gpint(const struct dc_context *ctx, enum dmub_gpint_command command_code, 1632 uint16_t param, uint32_t *response, enum dm_dmub_wait_type wait_type) 1633 { 1634 struct dc_dmub_srv *dc_dmub_srv = ctx->dmub_srv; 1635 bool result = false, reallow_idle = false; 1636 1637 if (!dc_dmub_srv || !dc_dmub_srv->dmub) 1638 return false; 1639 1640 if (dc_dmub_srv->idle_allowed) { 1641 dc_dmub_srv_apply_idle_power_optimizations(ctx->dc, false); 1642 reallow_idle = true; 1643 } 1644 1645 result = dc_dmub_execute_gpint(ctx, command_code, param, response, wait_type); 1646 1647 if (result && reallow_idle && dc_dmub_srv->idle_exit_counter == 0 && 1648 !ctx->dc->debug.disable_dmub_reallow_idle) 1649 dc_dmub_srv_apply_idle_power_optimizations(ctx->dc, true); 1650 1651 return result; 1652 } 1653 1654 void dc_dmub_srv_fams2_update_config(struct dc *dc, 1655 struct dc_state *context, 1656 bool enable) 1657 { 1658 uint8_t num_cmds = 1; 1659 uint32_t i; 1660 union dmub_rb_cmd cmd[2 * MAX_STREAMS + 1]; 1661 struct dmub_rb_cmd_fams2 *global_cmd = &cmd[0].fams2_config; 1662 1663 memset(cmd, 0, sizeof(union dmub_rb_cmd) * (2 * MAX_STREAMS + 1)); 1664 /* fill in generic command header */ 1665 global_cmd->header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH; 1666 global_cmd->header.sub_type = DMUB_CMD__FAMS2_CONFIG; 1667 global_cmd->header.payload_bytes = 1668 sizeof(struct dmub_rb_cmd_fams2) - sizeof(struct dmub_cmd_header); 1669 1670 if (enable) { 1671 /* send global configuration parameters */ 1672 memcpy(&global_cmd->config.global, &context->bw_ctx.bw.dcn.fams2_global_config, sizeof(struct dmub_cmd_fams2_global_config)); 1673 1674 /* copy static feature configuration overrides */ 1675 global_cmd->config.global.features.bits.enable_stall_recovery = dc->debug.fams2_config.bits.enable_stall_recovery; 1676 global_cmd->config.global.features.bits.enable_debug = dc->debug.fams2_config.bits.enable_debug; 1677 global_cmd->config.global.features.bits.enable_offload_flip = dc->debug.fams2_config.bits.enable_offload_flip; 1678 1679 /* construct per-stream configs */ 1680 for (i = 0; i < context->bw_ctx.bw.dcn.fams2_global_config.num_streams; i++) { 1681 struct dmub_rb_cmd_fams2 *stream_base_cmd = &cmd[i+1].fams2_config; 1682 struct dmub_rb_cmd_fams2 *stream_sub_state_cmd = &cmd[i+1+context->bw_ctx.bw.dcn.fams2_global_config.num_streams].fams2_config; 1683 1684 /* configure command header */ 1685 stream_base_cmd->header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH; 1686 stream_base_cmd->header.sub_type = DMUB_CMD__FAMS2_CONFIG; 1687 stream_base_cmd->header.payload_bytes = 1688 sizeof(struct dmub_rb_cmd_fams2) - sizeof(struct dmub_cmd_header); 1689 stream_base_cmd->header.multi_cmd_pending = 1; 1690 stream_sub_state_cmd->header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH; 1691 stream_sub_state_cmd->header.sub_type = DMUB_CMD__FAMS2_CONFIG; 1692 stream_sub_state_cmd->header.payload_bytes = 1693 sizeof(struct dmub_rb_cmd_fams2) - sizeof(struct dmub_cmd_header); 1694 stream_sub_state_cmd->header.multi_cmd_pending = 1; 1695 /* copy stream static base state */ 1696 memcpy(&stream_base_cmd->config, 1697 &context->bw_ctx.bw.dcn.fams2_stream_base_params[i], 1698 sizeof(union dmub_cmd_fams2_config)); 1699 /* copy stream static sub state */ 1700 memcpy(&stream_sub_state_cmd->config, 1701 &context->bw_ctx.bw.dcn.fams2_stream_sub_params[i], 1702 sizeof(union dmub_cmd_fams2_config)); 1703 } 1704 } 1705 1706 /* apply feature configuration based on current driver state */ 1707 global_cmd->config.global.features.bits.enable_visual_confirm = dc->debug.visual_confirm == VISUAL_CONFIRM_FAMS2; 1708 global_cmd->config.global.features.bits.enable = enable; 1709 1710 if (enable && context->bw_ctx.bw.dcn.fams2_global_config.features.bits.enable) { 1711 /* set multi pending for global, and unset for last stream cmd */ 1712 global_cmd->header.multi_cmd_pending = 1; 1713 cmd[2 * context->bw_ctx.bw.dcn.fams2_global_config.num_streams].fams2_config.header.multi_cmd_pending = 0; 1714 num_cmds += 2 * context->bw_ctx.bw.dcn.fams2_global_config.num_streams; 1715 } 1716 1717 dm_execute_dmub_cmd_list(dc->ctx, num_cmds, cmd, DM_DMUB_WAIT_TYPE_WAIT); 1718 } 1719 1720 void dc_dmub_srv_fams2_drr_update(struct dc *dc, 1721 uint32_t tg_inst, 1722 uint32_t vtotal_min, 1723 uint32_t vtotal_max, 1724 uint32_t vtotal_mid, 1725 uint32_t vtotal_mid_frame_num, 1726 bool program_manual_trigger) 1727 { 1728 union dmub_rb_cmd cmd = { 0 }; 1729 1730 cmd.fams2_drr_update.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH; 1731 cmd.fams2_drr_update.header.sub_type = DMUB_CMD__FAMS2_DRR_UPDATE; 1732 cmd.fams2_drr_update.dmub_optc_state_req.tg_inst = tg_inst; 1733 cmd.fams2_drr_update.dmub_optc_state_req.v_total_max = vtotal_max; 1734 cmd.fams2_drr_update.dmub_optc_state_req.v_total_min = vtotal_min; 1735 cmd.fams2_drr_update.dmub_optc_state_req.v_total_mid = vtotal_mid; 1736 cmd.fams2_drr_update.dmub_optc_state_req.v_total_mid_frame_num = vtotal_mid_frame_num; 1737 cmd.fams2_drr_update.dmub_optc_state_req.program_manual_trigger = program_manual_trigger; 1738 1739 cmd.fams2_drr_update.header.payload_bytes = 1740 sizeof(cmd.fams2_drr_update) - sizeof(cmd.fams2_drr_update.header); 1741 1742 dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); 1743 } 1744 1745 void dc_dmub_srv_fams2_passthrough_flip( 1746 struct dc *dc, 1747 struct dc_state *state, 1748 struct dc_stream_state *stream, 1749 struct dc_surface_update *srf_updates, 1750 int surface_count) 1751 { 1752 int plane_index; 1753 union dmub_rb_cmd cmds[MAX_PLANES]; 1754 struct dc_plane_address *address; 1755 struct dc_plane_state *plane_state; 1756 int num_cmds = 0; 1757 struct dc_stream_status *stream_status = dc_stream_get_status(stream); 1758 1759 if (surface_count <= 0 || stream_status == NULL) 1760 return; 1761 1762 memset(cmds, 0, sizeof(union dmub_rb_cmd) * MAX_PLANES); 1763 1764 /* build command for each surface update */ 1765 for (plane_index = 0; plane_index < surface_count; plane_index++) { 1766 plane_state = srf_updates[plane_index].surface; 1767 address = &plane_state->address; 1768 1769 /* skip if there is no address update for plane */ 1770 if (!srf_updates[plane_index].flip_addr) 1771 continue; 1772 1773 /* build command header */ 1774 cmds[num_cmds].fams2_flip.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH; 1775 cmds[num_cmds].fams2_flip.header.sub_type = DMUB_CMD__FAMS2_FLIP; 1776 cmds[num_cmds].fams2_flip.header.payload_bytes = 1777 sizeof(struct dmub_rb_cmd_fams2_flip) - sizeof(struct dmub_cmd_header); 1778 1779 /* for chaining multiple commands, all but last command should set to 1 */ 1780 cmds[num_cmds].fams2_flip.header.multi_cmd_pending = 1; 1781 1782 /* set topology info */ 1783 cmds[num_cmds].fams2_flip.flip_info.pipe_mask = dc_plane_get_pipe_mask(state, plane_state); 1784 if (stream_status) 1785 cmds[num_cmds].fams2_flip.flip_info.otg_inst = stream_status->primary_otg_inst; 1786 1787 cmds[num_cmds].fams2_flip.flip_info.config.bits.is_immediate = plane_state->flip_immediate; 1788 1789 /* build address info for command */ 1790 switch (address->type) { 1791 case PLN_ADDR_TYPE_GRAPHICS: 1792 if (address->grph.addr.quad_part == 0) { 1793 BREAK_TO_DEBUGGER(); 1794 break; 1795 } 1796 1797 cmds[num_cmds].fams2_flip.flip_info.addr_info.meta_addr_lo = 1798 address->grph.meta_addr.low_part; 1799 cmds[num_cmds].fams2_flip.flip_info.addr_info.meta_addr_hi = 1800 (uint16_t)address->grph.meta_addr.high_part; 1801 cmds[num_cmds].fams2_flip.flip_info.addr_info.surf_addr_lo = 1802 address->grph.addr.low_part; 1803 cmds[num_cmds].fams2_flip.flip_info.addr_info.surf_addr_hi = 1804 (uint16_t)address->grph.addr.high_part; 1805 break; 1806 case PLN_ADDR_TYPE_VIDEO_PROGRESSIVE: 1807 if (address->video_progressive.luma_addr.quad_part == 0 || 1808 address->video_progressive.chroma_addr.quad_part == 0) { 1809 BREAK_TO_DEBUGGER(); 1810 break; 1811 } 1812 1813 cmds[num_cmds].fams2_flip.flip_info.addr_info.meta_addr_lo = 1814 address->video_progressive.luma_meta_addr.low_part; 1815 cmds[num_cmds].fams2_flip.flip_info.addr_info.meta_addr_hi = 1816 (uint16_t)address->video_progressive.luma_meta_addr.high_part; 1817 cmds[num_cmds].fams2_flip.flip_info.addr_info.meta_addr_c_lo = 1818 address->video_progressive.chroma_meta_addr.low_part; 1819 cmds[num_cmds].fams2_flip.flip_info.addr_info.meta_addr_c_hi = 1820 (uint16_t)address->video_progressive.chroma_meta_addr.high_part; 1821 cmds[num_cmds].fams2_flip.flip_info.addr_info.surf_addr_lo = 1822 address->video_progressive.luma_addr.low_part; 1823 cmds[num_cmds].fams2_flip.flip_info.addr_info.surf_addr_hi = 1824 (uint16_t)address->video_progressive.luma_addr.high_part; 1825 cmds[num_cmds].fams2_flip.flip_info.addr_info.surf_addr_c_lo = 1826 address->video_progressive.chroma_addr.low_part; 1827 cmds[num_cmds].fams2_flip.flip_info.addr_info.surf_addr_c_hi = 1828 (uint16_t)address->video_progressive.chroma_addr.high_part; 1829 break; 1830 default: 1831 // Should never be hit 1832 BREAK_TO_DEBUGGER(); 1833 break; 1834 } 1835 1836 num_cmds++; 1837 } 1838 1839 if (num_cmds > 0) { 1840 cmds[num_cmds - 1].fams2_flip.header.multi_cmd_pending = 0; 1841 dm_execute_dmub_cmd_list(dc->ctx, num_cmds, cmds, DM_DMUB_WAIT_TYPE_WAIT); 1842 } 1843 } 1844 1845 bool dc_dmub_srv_ips_residency_cntl(struct dc_dmub_srv *dc_dmub_srv, bool start_measurement) 1846 { 1847 bool result; 1848 1849 if (!dc_dmub_srv || !dc_dmub_srv->dmub) 1850 return false; 1851 1852 result = dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__IPS_RESIDENCY, 1853 start_measurement, NULL, DM_DMUB_WAIT_TYPE_WAIT); 1854 1855 return result; 1856 } 1857 1858 void dc_dmub_srv_ips_query_residency_info(struct dc_dmub_srv *dc_dmub_srv, struct ips_residency_info *output) 1859 { 1860 uint32_t i; 1861 enum dmub_gpint_command command_code; 1862 1863 if (!dc_dmub_srv || !dc_dmub_srv->dmub) 1864 return; 1865 1866 switch (output->ips_mode) { 1867 case DMUB_IPS_MODE_IPS1_MAX: 1868 command_code = DMUB_GPINT__GET_IPS1_HISTOGRAM_COUNTER; 1869 break; 1870 case DMUB_IPS_MODE_IPS2: 1871 command_code = DMUB_GPINT__GET_IPS2_HISTOGRAM_COUNTER; 1872 break; 1873 case DMUB_IPS_MODE_IPS1_RCG: 1874 command_code = DMUB_GPINT__GET_IPS1_RCG_HISTOGRAM_COUNTER; 1875 break; 1876 case DMUB_IPS_MODE_IPS1_ONO2_ON: 1877 command_code = DMUB_GPINT__GET_IPS1_ONO2_ON_HISTOGRAM_COUNTER; 1878 break; 1879 default: 1880 command_code = DMUB_GPINT__INVALID_COMMAND; 1881 break; 1882 } 1883 1884 if (command_code == DMUB_GPINT__INVALID_COMMAND) 1885 return; 1886 1887 // send gpint commands and wait for ack 1888 if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_RESIDENCY_PERCENT, 1889 (uint16_t)(output->ips_mode), 1890 &output->residency_percent, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) 1891 output->residency_percent = 0; 1892 1893 if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_RESIDENCY_ENTRY_COUNTER, 1894 (uint16_t)(output->ips_mode), 1895 &output->entry_counter, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) 1896 output->entry_counter = 0; 1897 1898 if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_RESIDENCY_DURATION_US_LO, 1899 (uint16_t)(output->ips_mode), 1900 &output->total_active_time_us[0], DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) 1901 output->total_active_time_us[0] = 0; 1902 if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_RESIDENCY_DURATION_US_HI, 1903 (uint16_t)(output->ips_mode), 1904 &output->total_active_time_us[1], DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) 1905 output->total_active_time_us[1] = 0; 1906 1907 if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_INACTIVE_RESIDENCY_DURATION_US_LO, 1908 (uint16_t)(output->ips_mode), 1909 &output->total_inactive_time_us[0], DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) 1910 output->total_inactive_time_us[0] = 0; 1911 if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_INACTIVE_RESIDENCY_DURATION_US_HI, 1912 (uint16_t)(output->ips_mode), 1913 &output->total_inactive_time_us[1], DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) 1914 output->total_inactive_time_us[1] = 0; 1915 1916 // NUM_IPS_HISTOGRAM_BUCKETS = 16 1917 for (i = 0; i < 16; i++) 1918 if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, command_code, i, &output->histogram[i], 1919 DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) 1920 output->histogram[i] = 0; 1921 } 1922