1 /* 2 * Copyright 2022 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: AMD 23 * 24 */ 25 26 27 #include "dcn35_clk_mgr.h" 28 29 #include "dccg.h" 30 #include "clk_mgr_internal.h" 31 32 // For dce12_get_dp_ref_freq_khz 33 #include "dce100/dce_clk_mgr.h" 34 35 // For dcn20_update_clocks_update_dpp_dto 36 #include "dcn20/dcn20_clk_mgr.h" 37 38 39 #include "reg_helper.h" 40 #include "core_types.h" 41 #include "dcn35_smu.h" 42 #include "dm_helpers.h" 43 44 #include "dcn31/dcn31_clk_mgr.h" 45 46 #include "dc_dmub_srv.h" 47 #include "link_service.h" 48 #include "logger_types.h" 49 50 #undef DC_LOGGER 51 #define DC_LOGGER \ 52 clk_mgr->base.base.ctx->logger 53 54 #define DCN_BASE__INST0_SEG1 0x000000C0 55 #define mmCLK1_CLK_PLL_REQ 0x16E37 56 57 #define mmCLK1_CLK0_DFS_CNTL 0x16E69 58 #define mmCLK1_CLK1_DFS_CNTL 0x16E6C 59 #define mmCLK1_CLK2_DFS_CNTL 0x16E6F 60 #define mmCLK1_CLK3_DFS_CNTL 0x16E72 61 #define mmCLK1_CLK4_DFS_CNTL 0x16E75 62 #define mmCLK1_CLK5_DFS_CNTL 0x16E78 63 64 #define mmCLK1_CLK0_CURRENT_CNT 0x16EFB 65 #define mmCLK1_CLK1_CURRENT_CNT 0x16EFC 66 #define mmCLK1_CLK2_CURRENT_CNT 0x16EFD 67 #define mmCLK1_CLK3_CURRENT_CNT 0x16EFE 68 #define mmCLK1_CLK4_CURRENT_CNT 0x16EFF 69 #define mmCLK1_CLK5_CURRENT_CNT 0x16F00 70 71 #define mmCLK1_CLK0_BYPASS_CNTL 0x16E8A 72 #define mmCLK1_CLK1_BYPASS_CNTL 0x16E93 73 #define mmCLK1_CLK2_BYPASS_CNTL 0x16E9C 74 #define mmCLK1_CLK3_BYPASS_CNTL 0x16EA5 75 #define mmCLK1_CLK4_BYPASS_CNTL 0x16EAE 76 #define mmCLK1_CLK5_BYPASS_CNTL 0x16EB7 77 78 #define mmCLK1_CLK0_DS_CNTL 0x16E83 79 #define mmCLK1_CLK1_DS_CNTL 0x16E8C 80 #define mmCLK1_CLK2_DS_CNTL 0x16E95 81 #define mmCLK1_CLK3_DS_CNTL 0x16E9E 82 #define mmCLK1_CLK4_DS_CNTL 0x16EA7 83 #define mmCLK1_CLK5_DS_CNTL 0x16EB0 84 85 #define mmCLK1_CLK0_ALLOW_DS 0x16E84 86 #define mmCLK1_CLK1_ALLOW_DS 0x16E8D 87 #define mmCLK1_CLK2_ALLOW_DS 0x16E96 88 #define mmCLK1_CLK3_ALLOW_DS 0x16E9F 89 #define mmCLK1_CLK4_ALLOW_DS 0x16EA8 90 #define mmCLK1_CLK5_ALLOW_DS 0x16EB1 91 92 #define mmCLK5_spll_field_8 0x1B24B 93 #define mmCLK6_spll_field_8 0x1B24B 94 #define mmDENTIST_DISPCLK_CNTL 0x0124 95 #define regDENTIST_DISPCLK_CNTL 0x0064 96 #define regDENTIST_DISPCLK_CNTL_BASE_IDX 1 97 98 #define CLK1_CLK_PLL_REQ__FbMult_int__SHIFT 0x0 99 #define CLK1_CLK_PLL_REQ__PllSpineDiv__SHIFT 0xc 100 #define CLK1_CLK_PLL_REQ__FbMult_frac__SHIFT 0x10 101 #define CLK1_CLK_PLL_REQ__FbMult_int_MASK 0x000001FFL 102 #define CLK1_CLK_PLL_REQ__PllSpineDiv_MASK 0x0000F000L 103 #define CLK1_CLK_PLL_REQ__FbMult_frac_MASK 0xFFFF0000L 104 105 #define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL_MASK 0x00000007L 106 #define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_DIV_MASK 0x000F0000L 107 // DENTIST_DISPCLK_CNTL 108 #define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_WDIVIDER__SHIFT 0x0 109 #define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_RDIVIDER__SHIFT 0x8 110 #define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHG_DONE__SHIFT 0x13 111 #define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_CHG_DONE__SHIFT 0x14 112 #define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_WDIVIDER__SHIFT 0x18 113 #define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_WDIVIDER_MASK 0x0000007FL 114 #define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_RDIVIDER_MASK 0x00007F00L 115 #define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHG_DONE_MASK 0x00080000L 116 #define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_CHG_DONE_MASK 0x00100000L 117 #define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_WDIVIDER_MASK 0x7F000000L 118 119 #define CLK5_spll_field_8__spll_ssc_en_MASK 0x00002000L 120 #define CLK6_spll_field_8__spll_ssc_en_MASK 0x00002000L 121 122 #define SMU_VER_THRESHOLD 0x5D4A00 //93.74.0 123 #undef FN 124 #define FN(reg_name, field_name) \ 125 clk_mgr->clk_mgr_shift->field_name, clk_mgr->clk_mgr_mask->field_name 126 127 #define REG(reg) \ 128 (clk_mgr->regs->reg) 129 130 #define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg 131 132 #define BASE(seg) BASE_INNER(seg) 133 134 #define SR(reg_name)\ 135 .reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \ 136 reg ## reg_name 137 138 #define CLK_SR_DCN35(reg_name)\ 139 .reg_name = mm ## reg_name 140 141 static const struct clk_mgr_registers clk_mgr_regs_dcn35 = { 142 CLK_REG_LIST_DCN35() 143 }; 144 145 static const struct clk_mgr_shift clk_mgr_shift_dcn35 = { 146 CLK_COMMON_MASK_SH_LIST_DCN32(__SHIFT) 147 }; 148 149 static const struct clk_mgr_mask clk_mgr_mask_dcn35 = { 150 CLK_COMMON_MASK_SH_LIST_DCN32(_MASK) 151 }; 152 153 #define TO_CLK_MGR_DCN35(clk_mgr)\ 154 container_of(clk_mgr, struct clk_mgr_dcn35, base) 155 156 static int dcn35_get_active_display_cnt_wa( 157 struct dc *dc, 158 struct dc_state *context, 159 int *all_active_disps) 160 { 161 int i, display_count = 0; 162 bool tmds_present = false; 163 164 for (i = 0; i < context->stream_count; i++) { 165 const struct dc_stream_state *stream = context->streams[i]; 166 167 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A || 168 stream->signal == SIGNAL_TYPE_DVI_SINGLE_LINK || 169 stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK) 170 tmds_present = true; 171 } 172 173 for (i = 0; i < dc->link_count; i++) { 174 const struct dc_link *link = dc->links[i]; 175 176 /* abusing the fact that the dig and phy are coupled to see if the phy is enabled */ 177 if (link->link_enc && link->link_enc->funcs->is_dig_enabled && 178 link->link_enc->funcs->is_dig_enabled(link->link_enc)) 179 display_count++; 180 } 181 if (all_active_disps != NULL) 182 *all_active_disps = display_count; 183 /* WA for hang on HDMI after display off back on*/ 184 if (display_count == 0 && tmds_present) 185 display_count = 1; 186 187 return display_count; 188 } 189 static void dcn35_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context, 190 bool safe_to_lower, bool disable) 191 { 192 struct dc *dc = clk_mgr_base->ctx->dc; 193 int i; 194 195 if (dc->ctx->dce_environment == DCE_ENV_DIAG) 196 return; 197 198 for (i = 0; i < dc->res_pool->pipe_count; ++i) { 199 struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 200 struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i]; 201 struct clk_mgr_internal *clk_mgr_internal = TO_CLK_MGR_INTERNAL(clk_mgr_base); 202 struct dccg *dccg = clk_mgr_internal->dccg; 203 struct pipe_ctx *pipe = safe_to_lower 204 ? &context->res_ctx.pipe_ctx[i] 205 : &dc->current_state->res_ctx.pipe_ctx[i]; 206 struct link_encoder *new_pipe_link_enc = new_pipe->link_res.dio_link_enc; 207 struct link_encoder *pipe_link_enc = pipe->link_res.dio_link_enc; 208 bool stream_changed_otg_dig_on = false; 209 bool has_active_hpo = false; 210 211 if (pipe->top_pipe || pipe->prev_odm_pipe) 212 continue; 213 214 if (!dc->config.unify_link_enc_assignment) { 215 if (new_pipe->stream) 216 new_pipe_link_enc = new_pipe->stream->link_enc; 217 if (pipe->stream) 218 pipe_link_enc = pipe->stream->link_enc; 219 } 220 221 stream_changed_otg_dig_on = old_pipe->stream && new_pipe->stream && 222 old_pipe->stream != new_pipe->stream && 223 old_pipe->stream_res.tg == new_pipe->stream_res.tg && 224 new_pipe_link_enc && !new_pipe->stream->dpms_off && 225 new_pipe_link_enc->funcs->is_dig_enabled && 226 new_pipe_link_enc->funcs->is_dig_enabled( 227 new_pipe_link_enc) && 228 new_pipe->stream_res.stream_enc && 229 new_pipe->stream_res.stream_enc->funcs->is_fifo_enabled && 230 new_pipe->stream_res.stream_enc->funcs->is_fifo_enabled(new_pipe->stream_res.stream_enc); 231 232 if (old_pipe->stream && new_pipe->stream && old_pipe->stream == new_pipe->stream) { 233 has_active_hpo = dccg->ctx->dc->link_srv->dp_is_128b_132b_signal(old_pipe) && 234 dccg->ctx->dc->link_srv->dp_is_128b_132b_signal(new_pipe); 235 236 } 237 238 if (!has_active_hpo && !stream_changed_otg_dig_on && pipe->stream && 239 (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal) || !pipe_link_enc) && 240 !dccg->ctx->dc->link_srv->dp_is_128b_132b_signal(pipe)) { 241 /* This w/a should not trigger when we have a dig active */ 242 if (disable) { 243 if (pipe->stream_res.tg && pipe->stream_res.tg->funcs->immediate_disable_crtc) 244 pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg); 245 246 reset_sync_context_for_pipe(dc, context, i); 247 } else { 248 pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg); 249 } 250 } 251 } 252 } 253 254 static void dcn35_update_clocks_update_dtb_dto(struct clk_mgr_internal *clk_mgr, 255 struct dc_state *context, 256 int ref_dtbclk_khz) 257 { 258 struct dccg *dccg = clk_mgr->dccg; 259 uint32_t tg_mask = 0; 260 int i; 261 262 for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) { 263 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 264 struct dtbclk_dto_params dto_params = {0}; 265 266 /* use mask to program DTO once per tg */ 267 if (pipe_ctx->stream_res.tg && 268 !(tg_mask & (1 << pipe_ctx->stream_res.tg->inst))) { 269 tg_mask |= (1 << pipe_ctx->stream_res.tg->inst); 270 271 dto_params.otg_inst = pipe_ctx->stream_res.tg->inst; 272 dto_params.ref_dtbclk_khz = ref_dtbclk_khz; 273 274 dccg->funcs->set_dtbclk_dto(clk_mgr->dccg, &dto_params); 275 //dccg->funcs->set_audio_dtbclk_dto(clk_mgr->dccg, &dto_params); 276 } 277 } 278 } 279 280 static void dcn35_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr, 281 struct dc_state *context, bool safe_to_lower) 282 { 283 int i; 284 bool dppclk_active[MAX_PIPES] = {0}; 285 286 287 clk_mgr->dccg->ref_dppclk = clk_mgr->base.clks.dppclk_khz; 288 for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) { 289 int dpp_inst = 0, dppclk_khz, prev_dppclk_khz; 290 291 dppclk_khz = context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz; 292 293 if (context->res_ctx.pipe_ctx[i].plane_res.dpp) 294 dpp_inst = context->res_ctx.pipe_ctx[i].plane_res.dpp->inst; 295 else if (!context->res_ctx.pipe_ctx[i].plane_res.dpp && dppclk_khz == 0) { 296 /* dpp == NULL && dppclk_khz == 0 is valid because of pipe harvesting. 297 * In this case just continue in loop 298 */ 299 continue; 300 } else if (!context->res_ctx.pipe_ctx[i].plane_res.dpp && dppclk_khz > 0) { 301 /* The software state is not valid if dpp resource is NULL and 302 * dppclk_khz > 0. 303 */ 304 ASSERT(false); 305 continue; 306 } 307 308 prev_dppclk_khz = clk_mgr->dccg->pipe_dppclk_khz[i]; 309 310 if (safe_to_lower || prev_dppclk_khz < dppclk_khz) 311 clk_mgr->dccg->funcs->update_dpp_dto( 312 clk_mgr->dccg, dpp_inst, dppclk_khz); 313 dppclk_active[dpp_inst] = true; 314 } 315 if (safe_to_lower) 316 for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) { 317 struct dpp *old_dpp = clk_mgr->base.ctx->dc->current_state->res_ctx.pipe_ctx[i].plane_res.dpp; 318 319 if (old_dpp && !dppclk_active[old_dpp->inst]) 320 clk_mgr->dccg->funcs->update_dpp_dto(clk_mgr->dccg, old_dpp->inst, 0); 321 } 322 } 323 324 static uint8_t get_lowest_dpia_index(const struct dc_link *link) 325 { 326 const struct dc *dc_struct = link->dc; 327 uint8_t idx = 0xFF; 328 int i; 329 330 for (i = 0; i < MAX_PIPES * 2; ++i) { 331 if (!dc_struct->links[i] || dc_struct->links[i]->ep_type != DISPLAY_ENDPOINT_USB4_DPIA) 332 continue; 333 334 if (idx > dc_struct->links[i]->link_index) 335 idx = dc_struct->links[i]->link_index; 336 } 337 338 return idx; 339 } 340 341 static void dcn35_notify_host_router_bw(struct clk_mgr *clk_mgr_base, struct dc_state *context, 342 bool safe_to_lower) 343 { 344 struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk; 345 struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); 346 uint32_t host_router_bw_kbps[MAX_HOST_ROUTERS_NUM] = { 0 }; 347 int i; 348 for (i = 0; i < context->stream_count; ++i) { 349 const struct dc_stream_state *stream = context->streams[i]; 350 const struct dc_link *link = stream->link; 351 uint8_t lowest_dpia_index = 0; 352 unsigned int hr_index = 0; 353 354 if (!link) 355 continue; 356 357 lowest_dpia_index = get_lowest_dpia_index(link); 358 if (link->link_index < lowest_dpia_index) 359 continue; 360 361 hr_index = (link->link_index - lowest_dpia_index) / 2; 362 if (hr_index >= MAX_HOST_ROUTERS_NUM) 363 continue; 364 host_router_bw_kbps[hr_index] += dc_bandwidth_in_kbps_from_timing( 365 &stream->timing, dc_link_get_highest_encoding_format(link)); 366 } 367 368 for (i = 0; i < MAX_HOST_ROUTERS_NUM; ++i) { 369 new_clocks->host_router_bw_kbps[i] = host_router_bw_kbps[i]; 370 if (should_set_clock(safe_to_lower, new_clocks->host_router_bw_kbps[i], clk_mgr_base->clks.host_router_bw_kbps[i])) { 371 clk_mgr_base->clks.host_router_bw_kbps[i] = new_clocks->host_router_bw_kbps[i]; 372 dcn35_smu_notify_host_router_bw(clk_mgr, i, new_clocks->host_router_bw_kbps[i]); 373 } 374 } 375 } 376 377 void dcn35_update_clocks(struct clk_mgr *clk_mgr_base, 378 struct dc_state *context, 379 bool safe_to_lower) 380 { 381 union dmub_rb_cmd cmd; 382 struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); 383 struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk; 384 struct dc *dc = clk_mgr_base->ctx->dc; 385 int display_count = 0; 386 bool update_dppclk = false; 387 bool update_dispclk = false; 388 bool dpp_clock_lowered = false; 389 int all_active_disps = 0; 390 391 if (dc->work_arounds.skip_clock_update) 392 return; 393 394 display_count = dcn35_get_active_display_cnt_wa(dc, context, &all_active_disps); 395 if (new_clocks->dtbclk_en && !new_clocks->ref_dtbclk_khz) 396 new_clocks->ref_dtbclk_khz = 600000; 397 else if (!new_clocks->dtbclk_en && new_clocks->ref_dtbclk_khz > 590000) 398 new_clocks->ref_dtbclk_khz = 0; 399 400 /* 401 * if it is safe to lower, but we are already in the lower state, we don't have to do anything 402 * also if safe to lower is false, we just go in the higher state 403 */ 404 if (safe_to_lower) { 405 if (new_clocks->zstate_support != DCN_ZSTATE_SUPPORT_DISALLOW && 406 new_clocks->zstate_support != clk_mgr_base->clks.zstate_support) { 407 dcn35_smu_set_zstate_support(clk_mgr, new_clocks->zstate_support); 408 dm_helpers_enable_periodic_detection(clk_mgr_base->ctx, true); 409 clk_mgr_base->clks.zstate_support = new_clocks->zstate_support; 410 } 411 412 if (clk_mgr_base->clks.dtbclk_en && !new_clocks->dtbclk_en) { 413 if (clk_mgr->base.ctx->dc->config.allow_0_dtb_clk) 414 dcn35_smu_set_dtbclk(clk_mgr, false); 415 416 clk_mgr_base->clks.dtbclk_en = new_clocks->dtbclk_en; 417 } 418 /* check that we're not already in lower */ 419 if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_LOW_POWER) { 420 /* if we can go lower, go lower */ 421 if (display_count == 0) 422 clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER; 423 } 424 } else { 425 if (new_clocks->zstate_support == DCN_ZSTATE_SUPPORT_DISALLOW && 426 new_clocks->zstate_support != clk_mgr_base->clks.zstate_support) { 427 dcn35_smu_set_zstate_support(clk_mgr, DCN_ZSTATE_SUPPORT_DISALLOW); 428 dm_helpers_enable_periodic_detection(clk_mgr_base->ctx, false); 429 clk_mgr_base->clks.zstate_support = new_clocks->zstate_support; 430 } 431 432 if (!clk_mgr_base->clks.dtbclk_en && new_clocks->dtbclk_en) { 433 int actual_dtbclk = 0; 434 435 dcn35_update_clocks_update_dtb_dto(clk_mgr, context, new_clocks->ref_dtbclk_khz); 436 dcn35_smu_set_dtbclk(clk_mgr, true); 437 438 actual_dtbclk = REG_READ(CLK1_CLK4_CURRENT_CNT); 439 440 if (actual_dtbclk > 590000) { 441 clk_mgr_base->clks.ref_dtbclk_khz = new_clocks->ref_dtbclk_khz; 442 clk_mgr_base->clks.dtbclk_en = new_clocks->dtbclk_en; 443 } 444 } 445 446 /* check that we're not already in D0 */ 447 if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_MISSION_MODE) { 448 union display_idle_optimization_u idle_info = { 0 }; 449 450 dcn35_smu_set_display_idle_optimization(clk_mgr, idle_info.data); 451 /* update power state */ 452 clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_MISSION_MODE; 453 } 454 } 455 if (dc->debug.force_min_dcfclk_mhz > 0) 456 new_clocks->dcfclk_khz = (new_clocks->dcfclk_khz > (dc->debug.force_min_dcfclk_mhz * 1000)) ? 457 new_clocks->dcfclk_khz : (dc->debug.force_min_dcfclk_mhz * 1000); 458 459 if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr_base->clks.dcfclk_khz)) { 460 clk_mgr_base->clks.dcfclk_khz = new_clocks->dcfclk_khz; 461 dcn35_smu_set_hard_min_dcfclk(clk_mgr, clk_mgr_base->clks.dcfclk_khz); 462 } 463 464 if (should_set_clock(safe_to_lower, 465 new_clocks->dcfclk_deep_sleep_khz, clk_mgr_base->clks.dcfclk_deep_sleep_khz)) { 466 clk_mgr_base->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz; 467 dcn35_smu_set_min_deep_sleep_dcfclk(clk_mgr, clk_mgr_base->clks.dcfclk_deep_sleep_khz); 468 } 469 470 // workaround: Limit dppclk to 100Mhz to avoid lower eDP panel switch to plus 4K monitor underflow. 471 if (new_clocks->dppclk_khz < 100000) 472 new_clocks->dppclk_khz = 100000; 473 474 if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) { 475 if (clk_mgr->base.clks.dppclk_khz > new_clocks->dppclk_khz) 476 dpp_clock_lowered = true; 477 clk_mgr_base->clks.dppclk_khz = new_clocks->dppclk_khz; 478 update_dppclk = true; 479 } 480 481 if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz) && 482 (new_clocks->dispclk_khz > 0 || (safe_to_lower && display_count == 0))) { 483 int requested_dispclk_khz = new_clocks->dispclk_khz; 484 485 dcn35_disable_otg_wa(clk_mgr_base, context, safe_to_lower, true); 486 487 /* Clamp the requested clock to PMFW based on their limit. */ 488 if (dc->debug.min_disp_clk_khz > 0 && requested_dispclk_khz < dc->debug.min_disp_clk_khz) 489 requested_dispclk_khz = dc->debug.min_disp_clk_khz; 490 491 dcn35_smu_set_dispclk(clk_mgr, requested_dispclk_khz); 492 clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz; 493 494 dcn35_disable_otg_wa(clk_mgr_base, context, safe_to_lower, false); 495 496 update_dispclk = true; 497 } 498 499 /* clock limits are received with MHz precision, divide by 1000 to prevent setting clocks at every call */ 500 if (!dc->debug.disable_dtb_ref_clk_switch && 501 should_set_clock(safe_to_lower, new_clocks->ref_dtbclk_khz / 1000, 502 clk_mgr_base->clks.ref_dtbclk_khz / 1000)) { 503 dcn35_update_clocks_update_dtb_dto(clk_mgr, context, new_clocks->ref_dtbclk_khz); 504 clk_mgr_base->clks.ref_dtbclk_khz = new_clocks->ref_dtbclk_khz; 505 } 506 507 if (dpp_clock_lowered) { 508 // increase per DPP DTO before lowering global dppclk 509 dcn35_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower); 510 dcn35_smu_set_dppclk(clk_mgr, clk_mgr_base->clks.dppclk_khz); 511 } else { 512 // increase global DPPCLK before lowering per DPP DTO 513 if (update_dppclk || update_dispclk) 514 dcn35_smu_set_dppclk(clk_mgr, clk_mgr_base->clks.dppclk_khz); 515 dcn35_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower); 516 } 517 518 // notify PMFW of bandwidth per DPIA tunnel 519 if (dc->debug.notify_dpia_hr_bw) 520 dcn35_notify_host_router_bw(clk_mgr_base, context, safe_to_lower); 521 522 // notify DMCUB of latest clocks 523 memset(&cmd, 0, sizeof(cmd)); 524 cmd.notify_clocks.header.type = DMUB_CMD__CLK_MGR; 525 cmd.notify_clocks.header.sub_type = DMUB_CMD__CLK_MGR_NOTIFY_CLOCKS; 526 cmd.notify_clocks.clocks.dcfclk_khz = clk_mgr_base->clks.dcfclk_khz; 527 cmd.notify_clocks.clocks.dcfclk_deep_sleep_khz = 528 clk_mgr_base->clks.dcfclk_deep_sleep_khz; 529 cmd.notify_clocks.clocks.dispclk_khz = clk_mgr_base->clks.dispclk_khz; 530 cmd.notify_clocks.clocks.dppclk_khz = clk_mgr_base->clks.dppclk_khz; 531 532 dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); 533 } 534 535 static int get_vco_frequency_from_reg(struct clk_mgr_internal *clk_mgr) 536 { 537 /* get FbMult value */ 538 struct fixed31_32 pll_req; 539 unsigned int fbmult_frac_val = 0; 540 unsigned int fbmult_int_val = 0; 541 542 /* 543 * Register value of fbmult is in 8.16 format, we are converting to 314.32 544 * to leverage the fix point operations available in driver 545 */ 546 547 REG_GET(CLK1_CLK_PLL_REQ, FbMult_frac, &fbmult_frac_val); /* 16 bit fractional part*/ 548 REG_GET(CLK1_CLK_PLL_REQ, FbMult_int, &fbmult_int_val); /* 8 bit integer part */ 549 550 pll_req = dc_fixpt_from_int(fbmult_int_val); 551 552 /* 553 * since fractional part is only 16 bit in register definition but is 32 bit 554 * in our fix point definiton, need to shift left by 16 to obtain correct value 555 */ 556 pll_req.value |= fbmult_frac_val << 16; 557 558 /* multiply by REFCLK period */ 559 pll_req = dc_fixpt_mul_int(pll_req, clk_mgr->dfs_ref_freq_khz); 560 561 /* integer part is now VCO frequency in kHz */ 562 return dc_fixpt_floor(pll_req); 563 } 564 565 static void dcn35_enable_pme_wa(struct clk_mgr *clk_mgr_base) 566 { 567 struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); 568 569 dcn35_smu_enable_pme_wa(clk_mgr); 570 } 571 572 573 bool dcn35_are_clock_states_equal(struct dc_clocks *a, 574 struct dc_clocks *b) 575 { 576 if (a->dispclk_khz != b->dispclk_khz) 577 return false; 578 else if (a->dppclk_khz != b->dppclk_khz) 579 return false; 580 else if (a->dcfclk_khz != b->dcfclk_khz) 581 return false; 582 else if (a->dcfclk_deep_sleep_khz != b->dcfclk_deep_sleep_khz) 583 return false; 584 else if (a->zstate_support != b->zstate_support) 585 return false; 586 else if (a->dtbclk_en != b->dtbclk_en) 587 return false; 588 589 return true; 590 } 591 592 static void dcn35_save_clk_registers_internal(struct dcn35_clk_internal *internal, struct clk_mgr *clk_mgr_base) 593 { 594 struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); 595 596 // read dtbclk 597 internal->CLK1_CLK4_CURRENT_CNT = REG_READ(CLK1_CLK4_CURRENT_CNT); 598 internal->CLK1_CLK4_BYPASS_CNTL = REG_READ(CLK1_CLK4_BYPASS_CNTL); 599 600 // read dcfclk 601 internal->CLK1_CLK3_CURRENT_CNT = REG_READ(CLK1_CLK3_CURRENT_CNT); 602 internal->CLK1_CLK3_BYPASS_CNTL = REG_READ(CLK1_CLK3_BYPASS_CNTL); 603 604 // read dcf deep sleep divider 605 internal->CLK1_CLK3_DS_CNTL = REG_READ(CLK1_CLK3_DS_CNTL); 606 internal->CLK1_CLK3_ALLOW_DS = REG_READ(CLK1_CLK3_ALLOW_DS); 607 608 // read dppclk 609 internal->CLK1_CLK1_CURRENT_CNT = REG_READ(CLK1_CLK1_CURRENT_CNT); 610 internal->CLK1_CLK1_BYPASS_CNTL = REG_READ(CLK1_CLK1_BYPASS_CNTL); 611 612 // read dprefclk 613 internal->CLK1_CLK2_CURRENT_CNT = REG_READ(CLK1_CLK2_CURRENT_CNT); 614 internal->CLK1_CLK2_BYPASS_CNTL = REG_READ(CLK1_CLK2_BYPASS_CNTL); 615 616 // read dispclk 617 internal->CLK1_CLK0_CURRENT_CNT = REG_READ(CLK1_CLK0_CURRENT_CNT); 618 internal->CLK1_CLK0_BYPASS_CNTL = REG_READ(CLK1_CLK0_BYPASS_CNTL); 619 } 620 621 static void dcn35_save_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass, 622 struct clk_mgr_dcn35 *clk_mgr) 623 { 624 struct dcn35_clk_internal internal = {0}; 625 char *bypass_clks[5] = {"0x0 DFS", "0x1 REFCLK", "0x2 ERROR", "0x3 400 FCH", "0x4 600 FCH"}; 626 627 dcn35_save_clk_registers_internal(&internal, &clk_mgr->base.base); 628 629 regs_and_bypass->dcfclk = internal.CLK1_CLK3_CURRENT_CNT / 10; 630 regs_and_bypass->dcf_deep_sleep_divider = internal.CLK1_CLK3_DS_CNTL / 10; 631 regs_and_bypass->dcf_deep_sleep_allow = internal.CLK1_CLK3_ALLOW_DS; 632 regs_and_bypass->dprefclk = internal.CLK1_CLK2_CURRENT_CNT / 10; 633 regs_and_bypass->dispclk = internal.CLK1_CLK0_CURRENT_CNT / 10; 634 regs_and_bypass->dppclk = internal.CLK1_CLK1_CURRENT_CNT / 10; 635 regs_and_bypass->dtbclk = internal.CLK1_CLK4_CURRENT_CNT / 10; 636 637 regs_and_bypass->dppclk_bypass = internal.CLK1_CLK1_BYPASS_CNTL & 0x0007; 638 if (regs_and_bypass->dppclk_bypass > 4) 639 regs_and_bypass->dppclk_bypass = 0; 640 regs_and_bypass->dcfclk_bypass = internal.CLK1_CLK3_BYPASS_CNTL & 0x0007; 641 if (regs_and_bypass->dcfclk_bypass > 4) 642 regs_and_bypass->dcfclk_bypass = 0; 643 regs_and_bypass->dispclk_bypass = internal.CLK1_CLK0_BYPASS_CNTL & 0x0007; 644 if (regs_and_bypass->dispclk_bypass > 4) 645 regs_and_bypass->dispclk_bypass = 0; 646 regs_and_bypass->dprefclk_bypass = internal.CLK1_CLK2_BYPASS_CNTL & 0x0007; 647 if (regs_and_bypass->dprefclk_bypass > 4) 648 regs_and_bypass->dprefclk_bypass = 0; 649 650 if (clk_mgr->base.base.ctx->dc->debug.pstate_enabled) { 651 DC_LOG_SMU("clk_type,clk_value,deepsleep_cntl,deepsleep_allow,bypass\n"); 652 653 DC_LOG_SMU("dcfclk,%d,%d,%d,%s\n", 654 regs_and_bypass->dcfclk, 655 regs_and_bypass->dcf_deep_sleep_divider, 656 regs_and_bypass->dcf_deep_sleep_allow, 657 bypass_clks[(int) regs_and_bypass->dcfclk_bypass]); 658 659 DC_LOG_SMU("dprefclk,%d,N/A,N/A,%s\n", 660 regs_and_bypass->dprefclk, 661 bypass_clks[(int) regs_and_bypass->dprefclk_bypass]); 662 663 DC_LOG_SMU("dispclk,%d,N/A,N/A,%s\n", 664 regs_and_bypass->dispclk, 665 bypass_clks[(int) regs_and_bypass->dispclk_bypass]); 666 667 // REGISTER VALUES 668 DC_LOG_SMU("reg_name,value,clk_type"); 669 670 DC_LOG_SMU("CLK1_CLK3_CURRENT_CNT,%d,dcfclk", 671 internal.CLK1_CLK3_CURRENT_CNT); 672 673 DC_LOG_SMU("CLK1_CLK4_CURRENT_CNT,%d,dtbclk", 674 internal.CLK1_CLK4_CURRENT_CNT); 675 676 DC_LOG_SMU("CLK1_CLK3_DS_CNTL,%d,dcf_deep_sleep_divider", 677 internal.CLK1_CLK3_DS_CNTL); 678 679 DC_LOG_SMU("CLK1_CLK3_ALLOW_DS,%d,dcf_deep_sleep_allow", 680 internal.CLK1_CLK3_ALLOW_DS); 681 682 DC_LOG_SMU("CLK1_CLK2_CURRENT_CNT,%d,dprefclk", 683 internal.CLK1_CLK2_CURRENT_CNT); 684 685 DC_LOG_SMU("CLK1_CLK0_CURRENT_CNT,%d,dispclk", 686 internal.CLK1_CLK0_CURRENT_CNT); 687 688 DC_LOG_SMU("CLK1_CLK1_CURRENT_CNT,%d,dppclk", 689 internal.CLK1_CLK1_CURRENT_CNT); 690 691 DC_LOG_SMU("CLK1_CLK3_BYPASS_CNTL,%d,dcfclk_bypass", 692 internal.CLK1_CLK3_BYPASS_CNTL); 693 694 DC_LOG_SMU("CLK1_CLK2_BYPASS_CNTL,%d,dprefclk_bypass", 695 internal.CLK1_CLK2_BYPASS_CNTL); 696 697 DC_LOG_SMU("CLK1_CLK0_BYPASS_CNTL,%d,dispclk_bypass", 698 internal.CLK1_CLK0_BYPASS_CNTL); 699 700 DC_LOG_SMU("CLK1_CLK1_BYPASS_CNTL,%d,dppclk_bypass", 701 internal.CLK1_CLK1_BYPASS_CNTL); 702 703 } 704 } 705 706 static bool dcn35_is_spll_ssc_enabled(struct clk_mgr *clk_mgr_base) 707 { 708 struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); 709 710 uint32_t ssc_enable; 711 712 if (clk_mgr_base->ctx->dce_version == DCN_VERSION_3_51) { 713 ssc_enable = REG_READ(CLK6_spll_field_8) & CLK6_spll_field_8__spll_ssc_en_MASK; 714 } else { 715 ssc_enable = REG_READ(CLK5_spll_field_8) & CLK5_spll_field_8__spll_ssc_en_MASK; 716 } 717 718 return ssc_enable != 0; 719 } 720 721 static void init_clk_states(struct clk_mgr *clk_mgr) 722 { 723 uint32_t ref_dtbclk = clk_mgr->clks.ref_dtbclk_khz; 724 725 memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks)); 726 727 clk_mgr->clks.ref_dtbclk_khz = ref_dtbclk; // restore ref_dtbclk 728 clk_mgr->clks.p_state_change_support = true; 729 clk_mgr->clks.prev_p_state_change_support = true; 730 clk_mgr->clks.pwr_state = DCN_PWR_STATE_UNKNOWN; 731 clk_mgr->clks.zstate_support = DCN_ZSTATE_SUPPORT_UNKNOWN; 732 } 733 734 void dcn35_init_clocks(struct clk_mgr *clk_mgr) 735 { 736 struct clk_mgr_internal *clk_mgr_int = TO_CLK_MGR_INTERNAL(clk_mgr); 737 struct clk_mgr_dcn35 *clk_mgr_dcn35 = TO_CLK_MGR_DCN35(clk_mgr_int); 738 739 init_clk_states(clk_mgr); 740 741 // to adjust dp_dto reference clock if ssc is enable otherwise to apply dprefclk 742 if (dcn35_is_spll_ssc_enabled(clk_mgr)) 743 clk_mgr->dp_dto_source_clock_in_khz = 744 dce_adjust_dp_ref_freq_for_ss(clk_mgr_int, clk_mgr->dprefclk_khz); 745 else 746 clk_mgr->dp_dto_source_clock_in_khz = clk_mgr->dprefclk_khz; 747 748 dcn35_save_clk_registers(&clk_mgr->boot_snapshot, clk_mgr_dcn35); 749 750 clk_mgr->clks.ref_dtbclk_khz = clk_mgr->boot_snapshot.dtbclk * 10; 751 if (clk_mgr->boot_snapshot.dtbclk > 59000) { 752 /*dtbclk enabled based on */ 753 clk_mgr->clks.dtbclk_en = true; 754 } 755 } 756 static struct clk_bw_params dcn35_bw_params = { 757 .vram_type = Ddr4MemType, 758 .num_channels = 1, 759 .clk_table = { 760 .num_entries = 4, 761 }, 762 763 }; 764 765 static struct wm_table ddr5_wm_table = { 766 .entries = { 767 { 768 .wm_inst = WM_A, 769 .wm_type = WM_TYPE_PSTATE_CHG, 770 .pstate_latency_us = 11.72, 771 .sr_exit_time_us = 28.0, 772 .sr_enter_plus_exit_time_us = 30.0, 773 .valid = true, 774 }, 775 { 776 .wm_inst = WM_B, 777 .wm_type = WM_TYPE_PSTATE_CHG, 778 .pstate_latency_us = 11.72, 779 .sr_exit_time_us = 28.0, 780 .sr_enter_plus_exit_time_us = 30.0, 781 .valid = true, 782 }, 783 { 784 .wm_inst = WM_C, 785 .wm_type = WM_TYPE_PSTATE_CHG, 786 .pstate_latency_us = 11.72, 787 .sr_exit_time_us = 28.0, 788 .sr_enter_plus_exit_time_us = 30.0, 789 .valid = true, 790 }, 791 { 792 .wm_inst = WM_D, 793 .wm_type = WM_TYPE_PSTATE_CHG, 794 .pstate_latency_us = 11.72, 795 .sr_exit_time_us = 28.0, 796 .sr_enter_plus_exit_time_us = 30.0, 797 .valid = true, 798 }, 799 } 800 }; 801 802 static struct wm_table lpddr5_wm_table = { 803 .entries = { 804 { 805 .wm_inst = WM_A, 806 .wm_type = WM_TYPE_PSTATE_CHG, 807 .pstate_latency_us = 11.65333, 808 .sr_exit_time_us = 28.0, 809 .sr_enter_plus_exit_time_us = 30.0, 810 .valid = true, 811 }, 812 { 813 .wm_inst = WM_B, 814 .wm_type = WM_TYPE_PSTATE_CHG, 815 .pstate_latency_us = 11.65333, 816 .sr_exit_time_us = 28.0, 817 .sr_enter_plus_exit_time_us = 30.0, 818 .valid = true, 819 }, 820 { 821 .wm_inst = WM_C, 822 .wm_type = WM_TYPE_PSTATE_CHG, 823 .pstate_latency_us = 11.65333, 824 .sr_exit_time_us = 28.0, 825 .sr_enter_plus_exit_time_us = 30.0, 826 .valid = true, 827 }, 828 { 829 .wm_inst = WM_D, 830 .wm_type = WM_TYPE_PSTATE_CHG, 831 .pstate_latency_us = 11.65333, 832 .sr_exit_time_us = 28.0, 833 .sr_enter_plus_exit_time_us = 30.0, 834 .valid = true, 835 }, 836 } 837 }; 838 839 static DpmClocks_t_dcn35 dummy_clocks; 840 static DpmClocks_t_dcn351 dummy_clocks_dcn351; 841 842 static struct dcn35_watermarks dummy_wms = { 0 }; 843 844 static struct dcn35_ss_info_table ss_info_table = { 845 .ss_divider = 1000, 846 .ss_percentage = {0, 0, 375, 375, 375} 847 }; 848 849 static void dcn35_read_ss_info_from_lut(struct clk_mgr_internal *clk_mgr) 850 { 851 uint32_t clock_source = 0; 852 853 clock_source = REG_READ(CLK1_CLK2_BYPASS_CNTL) & CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL_MASK; 854 855 // If it's DFS mode, clock_source is 0. 856 if (dcn35_is_spll_ssc_enabled(&clk_mgr->base) && (clock_source < ARRAY_SIZE(ss_info_table.ss_percentage))) { 857 clk_mgr->dprefclk_ss_percentage = ss_info_table.ss_percentage[clock_source]; 858 859 if (clk_mgr->dprefclk_ss_percentage != 0) { 860 clk_mgr->ss_on_dprefclk = true; 861 clk_mgr->dprefclk_ss_divider = ss_info_table.ss_divider; 862 } 863 } 864 } 865 866 static void dcn35_build_watermark_ranges(struct clk_bw_params *bw_params, struct dcn35_watermarks *table) 867 { 868 int i, num_valid_sets; 869 870 num_valid_sets = 0; 871 872 for (i = 0; i < WM_SET_COUNT; i++) { 873 /* skip empty entries, the smu array has no holes*/ 874 if (!bw_params->wm_table.entries[i].valid) 875 continue; 876 877 table->WatermarkRow[WM_DCFCLK][num_valid_sets].WmSetting = bw_params->wm_table.entries[i].wm_inst; 878 table->WatermarkRow[WM_DCFCLK][num_valid_sets].WmType = bw_params->wm_table.entries[i].wm_type; 879 /* We will not select WM based on fclk, so leave it as unconstrained */ 880 table->WatermarkRow[WM_DCFCLK][num_valid_sets].MinClock = 0; 881 table->WatermarkRow[WM_DCFCLK][num_valid_sets].MaxClock = 0xFFFF; 882 883 if (table->WatermarkRow[WM_DCFCLK][num_valid_sets].WmType == WM_TYPE_PSTATE_CHG) { 884 if (i == 0) 885 table->WatermarkRow[WM_DCFCLK][num_valid_sets].MinMclk = 0; 886 else { 887 /* add 1 to make it non-overlapping with next lvl */ 888 table->WatermarkRow[WM_DCFCLK][num_valid_sets].MinMclk = 889 bw_params->clk_table.entries[i - 1].dcfclk_mhz + 1; 890 } 891 table->WatermarkRow[WM_DCFCLK][num_valid_sets].MaxMclk = 892 bw_params->clk_table.entries[i].dcfclk_mhz; 893 894 } else { 895 /* unconstrained for memory retraining */ 896 table->WatermarkRow[WM_DCFCLK][num_valid_sets].MinClock = 0; 897 table->WatermarkRow[WM_DCFCLK][num_valid_sets].MaxClock = 0xFFFF; 898 899 /* Modify previous watermark range to cover up to max */ 900 table->WatermarkRow[WM_DCFCLK][num_valid_sets - 1].MaxClock = 0xFFFF; 901 } 902 num_valid_sets++; 903 } 904 905 ASSERT(num_valid_sets != 0); /* Must have at least one set of valid watermarks */ 906 907 /* modify the min and max to make sure we cover the whole range*/ 908 table->WatermarkRow[WM_DCFCLK][0].MinMclk = 0; 909 table->WatermarkRow[WM_DCFCLK][0].MinClock = 0; 910 table->WatermarkRow[WM_DCFCLK][num_valid_sets - 1].MaxMclk = 0xFFFF; 911 table->WatermarkRow[WM_DCFCLK][num_valid_sets - 1].MaxClock = 0xFFFF; 912 913 /* This is for writeback only, does not matter currently as no writeback support*/ 914 table->WatermarkRow[WM_SOCCLK][0].WmSetting = WM_A; 915 table->WatermarkRow[WM_SOCCLK][0].MinClock = 0; 916 table->WatermarkRow[WM_SOCCLK][0].MaxClock = 0xFFFF; 917 table->WatermarkRow[WM_SOCCLK][0].MinMclk = 0; 918 table->WatermarkRow[WM_SOCCLK][0].MaxMclk = 0xFFFF; 919 } 920 921 static void dcn35_notify_wm_ranges(struct clk_mgr *clk_mgr_base) 922 { 923 struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); 924 struct clk_mgr_dcn35 *clk_mgr_dcn35 = TO_CLK_MGR_DCN35(clk_mgr); 925 struct dcn35_watermarks *table = clk_mgr_dcn35->smu_wm_set.wm_set; 926 927 if (!clk_mgr->smu_ver) 928 return; 929 930 if (!table || clk_mgr_dcn35->smu_wm_set.mc_address.quad_part == 0) 931 return; 932 933 memset(table, 0, sizeof(*table)); 934 935 dcn35_build_watermark_ranges(clk_mgr_base->bw_params, table); 936 937 dcn35_smu_set_dram_addr_high(clk_mgr, 938 clk_mgr_dcn35->smu_wm_set.mc_address.high_part); 939 dcn35_smu_set_dram_addr_low(clk_mgr, 940 clk_mgr_dcn35->smu_wm_set.mc_address.low_part); 941 dcn35_smu_transfer_wm_table_dram_2_smu(clk_mgr); 942 } 943 944 static void dcn35_get_dpm_table_from_smu(struct clk_mgr_internal *clk_mgr, 945 struct dcn35_smu_dpm_clks *smu_dpm_clks) 946 { 947 DpmClocks_t_dcn35 *table = smu_dpm_clks->dpm_clks; 948 949 if (!clk_mgr->smu_ver) 950 return; 951 952 if (!table || smu_dpm_clks->mc_address.quad_part == 0) 953 return; 954 955 memset(table, 0, sizeof(*table)); 956 957 dcn35_smu_set_dram_addr_high(clk_mgr, 958 smu_dpm_clks->mc_address.high_part); 959 dcn35_smu_set_dram_addr_low(clk_mgr, 960 smu_dpm_clks->mc_address.low_part); 961 dcn35_smu_transfer_dpm_table_smu_2_dram(clk_mgr); 962 } 963 964 static void dcn351_get_dpm_table_from_smu(struct clk_mgr_internal *clk_mgr, 965 struct dcn351_smu_dpm_clks *smu_dpm_clks) 966 { 967 DpmClocks_t_dcn351 *table = smu_dpm_clks->dpm_clks; 968 969 if (!clk_mgr->smu_ver) 970 return; 971 if (!table || smu_dpm_clks->mc_address.quad_part == 0) 972 return; 973 memset(table, 0, sizeof(*table)); 974 dcn35_smu_set_dram_addr_high(clk_mgr, 975 smu_dpm_clks->mc_address.high_part); 976 dcn35_smu_set_dram_addr_low(clk_mgr, 977 smu_dpm_clks->mc_address.low_part); 978 dcn35_smu_transfer_dpm_table_smu_2_dram(clk_mgr); 979 } 980 static uint32_t find_max_clk_value(const uint32_t clocks[], uint32_t num_clocks) 981 { 982 uint32_t max = 0; 983 int i; 984 985 for (i = 0; i < num_clocks; ++i) { 986 if (clocks[i] > max) 987 max = clocks[i]; 988 } 989 990 return max; 991 } 992 993 static inline bool is_valid_clock_value(uint32_t clock_value) 994 { 995 return clock_value > 1 && clock_value < 100000; 996 } 997 998 static unsigned int convert_wck_ratio(uint8_t wck_ratio) 999 { 1000 switch (wck_ratio) { 1001 case WCK_RATIO_1_2: 1002 return 2; 1003 1004 case WCK_RATIO_1_4: 1005 return 4; 1006 /* Find lowest DPM, FCLK is filled in reverse order*/ 1007 1008 default: 1009 break; 1010 } 1011 1012 return 1; 1013 } 1014 1015 static inline uint32_t calc_dram_speed_mts(const MemPstateTable_t *entry) 1016 { 1017 return entry->UClk * convert_wck_ratio(entry->WckRatio) * 2; 1018 } 1019 1020 static void dcn35_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *clk_mgr, 1021 struct integrated_info *bios_info, 1022 DpmClocks_t_dcn35 *clock_table) 1023 { 1024 struct clk_bw_params *bw_params = clk_mgr->base.bw_params; 1025 struct clk_limit_table_entry def_max = bw_params->clk_table.entries[bw_params->clk_table.num_entries - 1]; 1026 uint32_t max_fclk = 0, min_pstate = 0, max_dispclk = 0, max_dppclk = 0; 1027 uint32_t max_pstate = 0, max_dram_speed_mts = 0, min_dram_speed_mts = 0; 1028 uint32_t num_memps, num_fclk, num_dcfclk; 1029 int i; 1030 1031 /* Determine min/max p-state values. */ 1032 num_memps = (clock_table->NumMemPstatesEnabled > NUM_MEM_PSTATE_LEVELS) ? NUM_MEM_PSTATE_LEVELS : 1033 clock_table->NumMemPstatesEnabled; 1034 for (i = 0; i < num_memps; i++) { 1035 uint32_t dram_speed_mts = calc_dram_speed_mts(&clock_table->MemPstateTable[i]); 1036 1037 if (is_valid_clock_value(dram_speed_mts) && dram_speed_mts > max_dram_speed_mts) { 1038 max_dram_speed_mts = dram_speed_mts; 1039 max_pstate = i; 1040 } 1041 } 1042 1043 min_dram_speed_mts = max_dram_speed_mts; 1044 min_pstate = max_pstate; 1045 1046 for (i = 0; i < num_memps; i++) { 1047 uint32_t dram_speed_mts = calc_dram_speed_mts(&clock_table->MemPstateTable[i]); 1048 1049 if (is_valid_clock_value(dram_speed_mts) && dram_speed_mts < min_dram_speed_mts) { 1050 min_dram_speed_mts = dram_speed_mts; 1051 min_pstate = i; 1052 } 1053 } 1054 1055 /* We expect the table to contain at least one valid P-state entry. */ 1056 ASSERT(clock_table->NumMemPstatesEnabled && 1057 is_valid_clock_value(max_dram_speed_mts) && 1058 is_valid_clock_value(min_dram_speed_mts)); 1059 1060 /* dispclk and dppclk can be max at any voltage, same number of levels for both */ 1061 if (clock_table->NumDispClkLevelsEnabled <= NUM_DISPCLK_DPM_LEVELS && 1062 clock_table->NumDispClkLevelsEnabled <= NUM_DPPCLK_DPM_LEVELS) { 1063 max_dispclk = find_max_clk_value(clock_table->DispClocks, 1064 clock_table->NumDispClkLevelsEnabled); 1065 max_dppclk = find_max_clk_value(clock_table->DppClocks, 1066 clock_table->NumDispClkLevelsEnabled); 1067 } else { 1068 /* Invalid number of entries in the table from PMFW. */ 1069 ASSERT(0); 1070 } 1071 1072 /* Base the clock table on dcfclk, need at least one entry regardless of pmfw table */ 1073 ASSERT(clock_table->NumDcfClkLevelsEnabled > 0); 1074 1075 num_fclk = (clock_table->NumFclkLevelsEnabled > NUM_FCLK_DPM_LEVELS) ? NUM_FCLK_DPM_LEVELS : 1076 clock_table->NumFclkLevelsEnabled; 1077 max_fclk = find_max_clk_value(clock_table->FclkClocks_Freq, num_fclk); 1078 1079 num_dcfclk = (clock_table->NumDcfClkLevelsEnabled > NUM_DCFCLK_DPM_LEVELS) ? NUM_DCFCLK_DPM_LEVELS : 1080 clock_table->NumDcfClkLevelsEnabled; 1081 for (i = 0; i < num_dcfclk; i++) { 1082 int j; 1083 1084 /* First search defaults for the clocks we don't read using closest lower or equal default dcfclk */ 1085 for (j = bw_params->clk_table.num_entries - 1; j > 0; j--) 1086 if (bw_params->clk_table.entries[j].dcfclk_mhz <= clock_table->DcfClocks[i]) 1087 break; 1088 1089 bw_params->clk_table.entries[i].phyclk_mhz = bw_params->clk_table.entries[j].phyclk_mhz; 1090 bw_params->clk_table.entries[i].phyclk_d18_mhz = bw_params->clk_table.entries[j].phyclk_d18_mhz; 1091 bw_params->clk_table.entries[i].dtbclk_mhz = bw_params->clk_table.entries[j].dtbclk_mhz; 1092 1093 /* Now update clocks we do read */ 1094 bw_params->clk_table.entries[i].memclk_mhz = clock_table->MemPstateTable[min_pstate].MemClk; 1095 bw_params->clk_table.entries[i].voltage = clock_table->MemPstateTable[min_pstate].Voltage; 1096 bw_params->clk_table.entries[i].dcfclk_mhz = clock_table->DcfClocks[i]; 1097 bw_params->clk_table.entries[i].socclk_mhz = clock_table->SocClocks[i]; 1098 bw_params->clk_table.entries[i].dispclk_mhz = max_dispclk; 1099 bw_params->clk_table.entries[i].dppclk_mhz = max_dppclk; 1100 bw_params->clk_table.entries[i].wck_ratio = 1101 convert_wck_ratio(clock_table->MemPstateTable[min_pstate].WckRatio); 1102 1103 /* Dcfclk and Fclk are tied, but at a different ratio */ 1104 bw_params->clk_table.entries[i].fclk_mhz = min(max_fclk, 2 * clock_table->DcfClocks[i]); 1105 } 1106 1107 /* Make sure to include at least one entry at highest pstate */ 1108 if (max_pstate != min_pstate || i == 0) { 1109 if (i > MAX_NUM_DPM_LVL - 1) 1110 i = MAX_NUM_DPM_LVL - 1; 1111 1112 bw_params->clk_table.entries[i].fclk_mhz = max_fclk; 1113 bw_params->clk_table.entries[i].memclk_mhz = clock_table->MemPstateTable[max_pstate].MemClk; 1114 bw_params->clk_table.entries[i].voltage = clock_table->MemPstateTable[max_pstate].Voltage; 1115 bw_params->clk_table.entries[i].dcfclk_mhz = 1116 find_max_clk_value(clock_table->DcfClocks, NUM_DCFCLK_DPM_LEVELS); 1117 bw_params->clk_table.entries[i].socclk_mhz = 1118 find_max_clk_value(clock_table->SocClocks, NUM_SOCCLK_DPM_LEVELS); 1119 bw_params->clk_table.entries[i].dispclk_mhz = max_dispclk; 1120 bw_params->clk_table.entries[i].dppclk_mhz = max_dppclk; 1121 bw_params->clk_table.entries[i].wck_ratio = convert_wck_ratio( 1122 clock_table->MemPstateTable[max_pstate].WckRatio); 1123 i++; 1124 } 1125 bw_params->clk_table.num_entries = i--; 1126 1127 /* Make sure all highest clocks are included*/ 1128 bw_params->clk_table.entries[i].socclk_mhz = 1129 find_max_clk_value(clock_table->SocClocks, NUM_SOCCLK_DPM_LEVELS); 1130 bw_params->clk_table.entries[i].dispclk_mhz = 1131 find_max_clk_value(clock_table->DispClocks, NUM_DISPCLK_DPM_LEVELS); 1132 bw_params->clk_table.entries[i].dppclk_mhz = 1133 find_max_clk_value(clock_table->DppClocks, NUM_DPPCLK_DPM_LEVELS); 1134 bw_params->clk_table.entries[i].fclk_mhz = 1135 find_max_clk_value(clock_table->FclkClocks_Freq, NUM_FCLK_DPM_LEVELS); 1136 ASSERT(clock_table->DcfClocks[i] == find_max_clk_value(clock_table->DcfClocks, NUM_DCFCLK_DPM_LEVELS)); 1137 bw_params->clk_table.entries[i].phyclk_mhz = def_max.phyclk_mhz; 1138 bw_params->clk_table.entries[i].phyclk_d18_mhz = def_max.phyclk_d18_mhz; 1139 bw_params->clk_table.entries[i].dtbclk_mhz = def_max.dtbclk_mhz; 1140 bw_params->clk_table.num_entries_per_clk.num_dcfclk_levels = clock_table->NumDcfClkLevelsEnabled; 1141 bw_params->clk_table.num_entries_per_clk.num_dispclk_levels = clock_table->NumDispClkLevelsEnabled; 1142 bw_params->clk_table.num_entries_per_clk.num_dppclk_levels = clock_table->NumDispClkLevelsEnabled; 1143 bw_params->clk_table.num_entries_per_clk.num_fclk_levels = clock_table->NumFclkLevelsEnabled; 1144 bw_params->clk_table.num_entries_per_clk.num_memclk_levels = clock_table->NumMemPstatesEnabled; 1145 bw_params->clk_table.num_entries_per_clk.num_socclk_levels = clock_table->NumSocClkLevelsEnabled; 1146 1147 /* 1148 * Set any 0 clocks to max default setting. Not an issue for 1149 * power since we aren't doing switching in such case anyway 1150 */ 1151 for (i = 0; i < bw_params->clk_table.num_entries; i++) { 1152 if (!bw_params->clk_table.entries[i].fclk_mhz) { 1153 bw_params->clk_table.entries[i].fclk_mhz = def_max.fclk_mhz; 1154 bw_params->clk_table.entries[i].memclk_mhz = def_max.memclk_mhz; 1155 bw_params->clk_table.entries[i].voltage = def_max.voltage; 1156 } 1157 if (!bw_params->clk_table.entries[i].dcfclk_mhz) 1158 bw_params->clk_table.entries[i].dcfclk_mhz = def_max.dcfclk_mhz; 1159 if (!bw_params->clk_table.entries[i].socclk_mhz) 1160 bw_params->clk_table.entries[i].socclk_mhz = def_max.socclk_mhz; 1161 if (!bw_params->clk_table.entries[i].dispclk_mhz) 1162 bw_params->clk_table.entries[i].dispclk_mhz = def_max.dispclk_mhz; 1163 if (!bw_params->clk_table.entries[i].dppclk_mhz) 1164 bw_params->clk_table.entries[i].dppclk_mhz = def_max.dppclk_mhz; 1165 if (!bw_params->clk_table.entries[i].fclk_mhz) 1166 bw_params->clk_table.entries[i].fclk_mhz = def_max.fclk_mhz; 1167 if (!bw_params->clk_table.entries[i].phyclk_mhz) 1168 bw_params->clk_table.entries[i].phyclk_mhz = def_max.phyclk_mhz; 1169 if (!bw_params->clk_table.entries[i].phyclk_d18_mhz) 1170 bw_params->clk_table.entries[i].phyclk_d18_mhz = def_max.phyclk_d18_mhz; 1171 if (!bw_params->clk_table.entries[i].dtbclk_mhz) 1172 bw_params->clk_table.entries[i].dtbclk_mhz = def_max.dtbclk_mhz; 1173 } 1174 ASSERT(bw_params->clk_table.entries[i-1].dcfclk_mhz); 1175 bw_params->vram_type = bios_info->memory_type; 1176 bw_params->dram_channel_width_bytes = bios_info->memory_type == 0x22 ? 8 : 4; 1177 bw_params->num_channels = bios_info->ma_channel_number ? bios_info->ma_channel_number : 4; 1178 1179 for (i = 0; i < WM_SET_COUNT; i++) { 1180 bw_params->wm_table.entries[i].wm_inst = i; 1181 1182 if (i >= bw_params->clk_table.num_entries) { 1183 bw_params->wm_table.entries[i].valid = false; 1184 continue; 1185 } 1186 1187 bw_params->wm_table.entries[i].wm_type = WM_TYPE_PSTATE_CHG; 1188 bw_params->wm_table.entries[i].valid = true; 1189 } 1190 } 1191 1192 static void dcn35_set_low_power_state(struct clk_mgr *clk_mgr_base) 1193 { 1194 int display_count; 1195 struct dc *dc = clk_mgr_base->ctx->dc; 1196 struct dc_state *context = dc->current_state; 1197 1198 if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_LOW_POWER) { 1199 display_count = dcn35_get_active_display_cnt_wa(dc, context, NULL); 1200 /* if we can go lower, go lower */ 1201 if (display_count == 0) 1202 clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER; 1203 } 1204 } 1205 1206 static void dcn35_exit_low_power_state(struct clk_mgr *clk_mgr_base) 1207 { 1208 struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); 1209 1210 //SMU optimization is performed part of low power state exit. 1211 dcn35_smu_exit_low_power_state(clk_mgr); 1212 1213 } 1214 1215 static bool dcn35_is_ips_supported(struct clk_mgr *clk_mgr_base) 1216 { 1217 struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); 1218 1219 return dcn35_smu_get_ips_supported(clk_mgr) ? true : false; 1220 } 1221 1222 static void dcn35_init_clocks_fpga(struct clk_mgr *clk_mgr) 1223 { 1224 init_clk_states(clk_mgr); 1225 1226 /* TODO: Implement the functions and remove the ifndef guard */ 1227 } 1228 1229 static void dcn35_update_clocks_fpga(struct clk_mgr *clk_mgr, 1230 struct dc_state *context, 1231 bool safe_to_lower) 1232 { 1233 struct clk_mgr_internal *clk_mgr_int = TO_CLK_MGR_INTERNAL(clk_mgr); 1234 struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk; 1235 int fclk_adj = new_clocks->fclk_khz; 1236 1237 /* TODO: remove this after correctly set by DML */ 1238 new_clocks->dcfclk_khz = 400000; 1239 new_clocks->socclk_khz = 400000; 1240 1241 /* Min fclk = 1.2GHz since all the extra scemi logic seems to run off of it */ 1242 //int fclk_adj = new_clocks->fclk_khz > 1200000 ? new_clocks->fclk_khz : 1200000; 1243 new_clocks->fclk_khz = 4320000; 1244 1245 if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, clk_mgr->clks.phyclk_khz)) { 1246 clk_mgr->clks.phyclk_khz = new_clocks->phyclk_khz; 1247 } 1248 1249 if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr->clks.dcfclk_khz)) { 1250 clk_mgr->clks.dcfclk_khz = new_clocks->dcfclk_khz; 1251 } 1252 1253 if (should_set_clock(safe_to_lower, 1254 new_clocks->dcfclk_deep_sleep_khz, clk_mgr->clks.dcfclk_deep_sleep_khz)) { 1255 clk_mgr->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz; 1256 } 1257 1258 if (should_set_clock(safe_to_lower, new_clocks->socclk_khz, clk_mgr->clks.socclk_khz)) { 1259 clk_mgr->clks.socclk_khz = new_clocks->socclk_khz; 1260 } 1261 1262 if (should_set_clock(safe_to_lower, new_clocks->dramclk_khz, clk_mgr->clks.dramclk_khz)) { 1263 clk_mgr->clks.dramclk_khz = new_clocks->dramclk_khz; 1264 } 1265 1266 if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->clks.dppclk_khz)) { 1267 clk_mgr->clks.dppclk_khz = new_clocks->dppclk_khz; 1268 } 1269 1270 if (should_set_clock(safe_to_lower, fclk_adj, clk_mgr->clks.fclk_khz)) { 1271 clk_mgr->clks.fclk_khz = fclk_adj; 1272 } 1273 1274 if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr->clks.dispclk_khz)) { 1275 clk_mgr->clks.dispclk_khz = new_clocks->dispclk_khz; 1276 } 1277 1278 /* Both fclk and ref_dppclk run on the same scemi clock. 1279 * So take the higher value since the DPP DTO is typically programmed 1280 * such that max dppclk is 1:1 with ref_dppclk. 1281 */ 1282 if (clk_mgr->clks.fclk_khz > clk_mgr->clks.dppclk_khz) 1283 clk_mgr->clks.dppclk_khz = clk_mgr->clks.fclk_khz; 1284 if (clk_mgr->clks.dppclk_khz > clk_mgr->clks.fclk_khz) 1285 clk_mgr->clks.fclk_khz = clk_mgr->clks.dppclk_khz; 1286 1287 // Both fclk and ref_dppclk run on the same scemi clock. 1288 clk_mgr_int->dccg->ref_dppclk = clk_mgr->clks.fclk_khz; 1289 1290 /* TODO: set dtbclk in correct place */ 1291 clk_mgr->clks.dtbclk_en = true; 1292 dm_set_dcn_clocks(clk_mgr->ctx, &clk_mgr->clks); 1293 dcn35_update_clocks_update_dpp_dto(clk_mgr_int, context, safe_to_lower); 1294 1295 dcn35_update_clocks_update_dtb_dto(clk_mgr_int, context, clk_mgr->clks.ref_dtbclk_khz); 1296 } 1297 1298 static unsigned int dcn35_get_max_clock_khz(struct clk_mgr *clk_mgr_base, enum clk_type clk_type) 1299 { 1300 struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); 1301 1302 unsigned int num_clk_levels; 1303 1304 switch (clk_type) { 1305 case CLK_TYPE_DISPCLK: 1306 num_clk_levels = clk_mgr->base.bw_params->clk_table.num_entries_per_clk.num_dispclk_levels; 1307 return num_clk_levels ? 1308 clk_mgr->base.bw_params->clk_table.entries[num_clk_levels - 1].dispclk_mhz * 1000 : 1309 clk_mgr->base.boot_snapshot.dispclk; 1310 case CLK_TYPE_DPPCLK: 1311 num_clk_levels = clk_mgr->base.bw_params->clk_table.num_entries_per_clk.num_dppclk_levels; 1312 return num_clk_levels ? 1313 clk_mgr->base.bw_params->clk_table.entries[num_clk_levels - 1].dppclk_mhz * 1000 : 1314 clk_mgr->base.boot_snapshot.dppclk; 1315 case CLK_TYPE_DSCCLK: 1316 num_clk_levels = clk_mgr->base.bw_params->clk_table.num_entries_per_clk.num_dispclk_levels; 1317 return num_clk_levels ? 1318 clk_mgr->base.bw_params->clk_table.entries[num_clk_levels - 1].dispclk_mhz * 1000 / 3 : 1319 clk_mgr->base.boot_snapshot.dispclk / 3; 1320 default: 1321 break; 1322 } 1323 1324 return 0; 1325 } 1326 1327 static struct clk_mgr_funcs dcn35_funcs = { 1328 .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz, 1329 .get_dtb_ref_clk_frequency = dcn31_get_dtb_ref_freq_khz, 1330 .update_clocks = dcn35_update_clocks, 1331 .init_clocks = dcn35_init_clocks, 1332 .enable_pme_wa = dcn35_enable_pme_wa, 1333 .are_clock_states_equal = dcn35_are_clock_states_equal, 1334 .notify_wm_ranges = dcn35_notify_wm_ranges, 1335 .set_low_power_state = dcn35_set_low_power_state, 1336 .exit_low_power_state = dcn35_exit_low_power_state, 1337 .is_ips_supported = dcn35_is_ips_supported, 1338 .get_max_clock_khz = dcn35_get_max_clock_khz, 1339 }; 1340 1341 struct clk_mgr_funcs dcn35_fpga_funcs = { 1342 .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz, 1343 .update_clocks = dcn35_update_clocks_fpga, 1344 .init_clocks = dcn35_init_clocks_fpga, 1345 .get_dtb_ref_clk_frequency = dcn31_get_dtb_ref_freq_khz, 1346 }; 1347 1348 static void translate_to_DpmClocks_t_dcn35(struct dcn351_smu_dpm_clks *smu_dpm_clks_a, 1349 struct dcn35_smu_dpm_clks *smu_dpm_clks_b) 1350 { 1351 /*translate two structures and only take need clock tables*/ 1352 uint8_t i; 1353 1354 if (smu_dpm_clks_a == NULL || smu_dpm_clks_b == NULL || 1355 smu_dpm_clks_a->dpm_clks == NULL || smu_dpm_clks_b->dpm_clks == NULL) 1356 return; 1357 1358 for (i = 0; i < NUM_DCFCLK_DPM_LEVELS; i++) 1359 smu_dpm_clks_b->dpm_clks->DcfClocks[i] = smu_dpm_clks_a->dpm_clks->DcfClocks[i]; 1360 1361 for (i = 0; i < NUM_DISPCLK_DPM_LEVELS; i++) 1362 smu_dpm_clks_b->dpm_clks->DispClocks[i] = smu_dpm_clks_a->dpm_clks->DispClocks[i]; 1363 1364 for (i = 0; i < NUM_DPPCLK_DPM_LEVELS; i++) 1365 smu_dpm_clks_b->dpm_clks->DppClocks[i] = smu_dpm_clks_a->dpm_clks->DppClocks[i]; 1366 1367 for (i = 0; i < NUM_FCLK_DPM_LEVELS; i++) { 1368 smu_dpm_clks_b->dpm_clks->FclkClocks_Freq[i] = smu_dpm_clks_a->dpm_clks->FclkClocks_Freq[i]; 1369 smu_dpm_clks_b->dpm_clks->FclkClocks_Voltage[i] = smu_dpm_clks_a->dpm_clks->FclkClocks_Voltage[i]; 1370 } 1371 for (i = 0; i < NUM_MEM_PSTATE_LEVELS; i++) { 1372 smu_dpm_clks_b->dpm_clks->MemPstateTable[i].MemClk = 1373 smu_dpm_clks_a->dpm_clks->MemPstateTable[i].MemClk; 1374 smu_dpm_clks_b->dpm_clks->MemPstateTable[i].UClk = 1375 smu_dpm_clks_a->dpm_clks->MemPstateTable[i].UClk; 1376 smu_dpm_clks_b->dpm_clks->MemPstateTable[i].Voltage = 1377 smu_dpm_clks_a->dpm_clks->MemPstateTable[i].Voltage; 1378 smu_dpm_clks_b->dpm_clks->MemPstateTable[i].WckRatio = 1379 smu_dpm_clks_a->dpm_clks->MemPstateTable[i].WckRatio; 1380 } 1381 smu_dpm_clks_b->dpm_clks->MaxGfxClk = smu_dpm_clks_a->dpm_clks->MaxGfxClk; 1382 smu_dpm_clks_b->dpm_clks->MinGfxClk = smu_dpm_clks_a->dpm_clks->MinGfxClk; 1383 smu_dpm_clks_b->dpm_clks->NumDcfClkLevelsEnabled = 1384 smu_dpm_clks_a->dpm_clks->NumDcfClkLevelsEnabled; 1385 smu_dpm_clks_b->dpm_clks->NumDispClkLevelsEnabled = 1386 smu_dpm_clks_a->dpm_clks->NumDispClkLevelsEnabled; 1387 smu_dpm_clks_b->dpm_clks->NumFclkLevelsEnabled = 1388 smu_dpm_clks_a->dpm_clks->NumFclkLevelsEnabled; 1389 smu_dpm_clks_b->dpm_clks->NumMemPstatesEnabled = 1390 smu_dpm_clks_a->dpm_clks->NumMemPstatesEnabled; 1391 smu_dpm_clks_b->dpm_clks->NumSocClkLevelsEnabled = 1392 smu_dpm_clks_a->dpm_clks->NumSocClkLevelsEnabled; 1393 1394 for (i = 0; i < NUM_SOC_VOLTAGE_LEVELS; i++) { 1395 smu_dpm_clks_b->dpm_clks->SocClocks[i] = smu_dpm_clks_a->dpm_clks->SocClocks[i]; 1396 smu_dpm_clks_b->dpm_clks->SocVoltage[i] = smu_dpm_clks_a->dpm_clks->SocVoltage[i]; 1397 } 1398 } 1399 void dcn35_clk_mgr_construct( 1400 struct dc_context *ctx, 1401 struct clk_mgr_dcn35 *clk_mgr, 1402 struct pp_smu_funcs *pp_smu, 1403 struct dccg *dccg) 1404 { 1405 struct dcn35_smu_dpm_clks smu_dpm_clks = { 0 }; 1406 struct dcn351_smu_dpm_clks smu_dpm_clks_dcn351 = { 0 }; 1407 clk_mgr->base.base.ctx = ctx; 1408 clk_mgr->base.base.funcs = &dcn35_funcs; 1409 1410 clk_mgr->base.pp_smu = pp_smu; 1411 1412 clk_mgr->base.dccg = dccg; 1413 clk_mgr->base.dfs_bypass_disp_clk = 0; 1414 1415 clk_mgr->base.dprefclk_ss_percentage = 0; 1416 clk_mgr->base.dprefclk_ss_divider = 1000; 1417 clk_mgr->base.ss_on_dprefclk = false; 1418 clk_mgr->base.dfs_ref_freq_khz = 48000; 1419 if (ctx->dce_version != DCN_VERSION_3_51) { 1420 clk_mgr->base.regs = &clk_mgr_regs_dcn35; 1421 clk_mgr->base.clk_mgr_shift = &clk_mgr_shift_dcn35; 1422 clk_mgr->base.clk_mgr_mask = &clk_mgr_mask_dcn35; 1423 } 1424 1425 1426 clk_mgr->smu_wm_set.wm_set = (struct dcn35_watermarks *)dm_helpers_allocate_gpu_mem( 1427 clk_mgr->base.base.ctx, 1428 DC_MEM_ALLOC_TYPE_GART, 1429 sizeof(struct dcn35_watermarks), 1430 &clk_mgr->smu_wm_set.mc_address.quad_part); 1431 1432 if (!clk_mgr->smu_wm_set.wm_set) { 1433 clk_mgr->smu_wm_set.wm_set = &dummy_wms; 1434 clk_mgr->smu_wm_set.mc_address.quad_part = 0; 1435 } 1436 ASSERT(clk_mgr->smu_wm_set.wm_set); 1437 1438 smu_dpm_clks.dpm_clks = (DpmClocks_t_dcn35 *)dm_helpers_allocate_gpu_mem( 1439 clk_mgr->base.base.ctx, 1440 DC_MEM_ALLOC_TYPE_GART, 1441 sizeof(DpmClocks_t_dcn35), 1442 &smu_dpm_clks.mc_address.quad_part); 1443 if (smu_dpm_clks.dpm_clks == NULL) { 1444 smu_dpm_clks.dpm_clks = &dummy_clocks; 1445 smu_dpm_clks.mc_address.quad_part = 0; 1446 } 1447 ASSERT(smu_dpm_clks.dpm_clks); 1448 1449 if (ctx->dce_version == DCN_VERSION_3_51) { 1450 smu_dpm_clks_dcn351.dpm_clks = (DpmClocks_t_dcn351 *)dm_helpers_allocate_gpu_mem( 1451 clk_mgr->base.base.ctx, 1452 DC_MEM_ALLOC_TYPE_GART, 1453 sizeof(DpmClocks_t_dcn351), 1454 &smu_dpm_clks_dcn351.mc_address.quad_part); 1455 if (smu_dpm_clks_dcn351.dpm_clks == NULL) { 1456 smu_dpm_clks_dcn351.dpm_clks = &dummy_clocks_dcn351; 1457 smu_dpm_clks_dcn351.mc_address.quad_part = 0; 1458 } 1459 } 1460 1461 clk_mgr->base.smu_ver = dcn35_smu_get_smu_version(&clk_mgr->base); 1462 1463 if (clk_mgr->base.smu_ver) 1464 clk_mgr->base.smu_present = true; 1465 1466 /* TODO: Check we get what we expect during bringup */ 1467 clk_mgr->base.base.dentist_vco_freq_khz = get_vco_frequency_from_reg(&clk_mgr->base); 1468 1469 if (ctx->dc_bios->integrated_info->memory_type == LpDdr5MemType) { 1470 dcn35_bw_params.wm_table = lpddr5_wm_table; 1471 } else { 1472 dcn35_bw_params.wm_table = ddr5_wm_table; 1473 } 1474 /* Saved clocks configured at boot for debug purposes */ 1475 dcn35_save_clk_registers(&clk_mgr->base.base.boot_snapshot, clk_mgr); 1476 1477 clk_mgr->base.base.dprefclk_khz = dcn35_smu_get_dprefclk(&clk_mgr->base); 1478 clk_mgr->base.base.clks.ref_dtbclk_khz = 600000; 1479 1480 dce_clock_read_ss_info(&clk_mgr->base); 1481 /*when clk src is from FCH, it could have ss, same clock src as DPREF clk*/ 1482 1483 dcn35_read_ss_info_from_lut(&clk_mgr->base); 1484 1485 clk_mgr->base.base.bw_params = &dcn35_bw_params; 1486 1487 if (clk_mgr->base.base.ctx->dc->debug.pstate_enabled) { 1488 int i; 1489 if (ctx->dce_version == DCN_VERSION_3_51) { 1490 dcn351_get_dpm_table_from_smu(&clk_mgr->base, &smu_dpm_clks_dcn351); 1491 translate_to_DpmClocks_t_dcn35(&smu_dpm_clks_dcn351, &smu_dpm_clks); 1492 } else 1493 dcn35_get_dpm_table_from_smu(&clk_mgr->base, &smu_dpm_clks); 1494 DC_LOG_SMU("NumDcfClkLevelsEnabled: %d\n" 1495 "NumDispClkLevelsEnabled: %d\n" 1496 "NumSocClkLevelsEnabled: %d\n" 1497 "VcnClkLevelsEnabled: %d\n" 1498 "FClkLevelsEnabled: %d\n" 1499 "NumMemPstatesEnabled: %d\n" 1500 "MinGfxClk: %d\n" 1501 "MaxGfxClk: %d\n", 1502 smu_dpm_clks.dpm_clks->NumDcfClkLevelsEnabled, 1503 smu_dpm_clks.dpm_clks->NumDispClkLevelsEnabled, 1504 smu_dpm_clks.dpm_clks->NumSocClkLevelsEnabled, 1505 smu_dpm_clks.dpm_clks->VcnClkLevelsEnabled, 1506 smu_dpm_clks.dpm_clks->NumFclkLevelsEnabled, 1507 smu_dpm_clks.dpm_clks->NumMemPstatesEnabled, 1508 smu_dpm_clks.dpm_clks->MinGfxClk, 1509 smu_dpm_clks.dpm_clks->MaxGfxClk); 1510 for (i = 0; i < smu_dpm_clks.dpm_clks->NumDcfClkLevelsEnabled; i++) { 1511 DC_LOG_SMU("smu_dpm_clks.dpm_clks->DcfClocks[%d] = %d\n", 1512 i, 1513 smu_dpm_clks.dpm_clks->DcfClocks[i]); 1514 } 1515 for (i = 0; i < smu_dpm_clks.dpm_clks->NumDispClkLevelsEnabled; i++) { 1516 DC_LOG_SMU("smu_dpm_clks.dpm_clks->DispClocks[%d] = %d\n", 1517 i, smu_dpm_clks.dpm_clks->DispClocks[i]); 1518 } 1519 for (i = 0; i < smu_dpm_clks.dpm_clks->NumSocClkLevelsEnabled; i++) { 1520 DC_LOG_SMU("smu_dpm_clks.dpm_clks->SocClocks[%d] = %d\n", 1521 i, smu_dpm_clks.dpm_clks->SocClocks[i]); 1522 } 1523 for (i = 0; i < smu_dpm_clks.dpm_clks->NumFclkLevelsEnabled; i++) { 1524 DC_LOG_SMU("smu_dpm_clks.dpm_clks->FclkClocks_Freq[%d] = %d\n", 1525 i, smu_dpm_clks.dpm_clks->FclkClocks_Freq[i]); 1526 DC_LOG_SMU("smu_dpm_clks.dpm_clks->FclkClocks_Voltage[%d] = %d\n", 1527 i, smu_dpm_clks.dpm_clks->FclkClocks_Voltage[i]); 1528 } 1529 for (i = 0; i < smu_dpm_clks.dpm_clks->NumSocClkLevelsEnabled; i++) 1530 DC_LOG_SMU("smu_dpm_clks.dpm_clks->SocVoltage[%d] = %d\n", 1531 i, smu_dpm_clks.dpm_clks->SocVoltage[i]); 1532 1533 for (i = 0; i < smu_dpm_clks.dpm_clks->NumMemPstatesEnabled; i++) { 1534 DC_LOG_SMU("smu_dpm_clks.dpm_clks.MemPstateTable[%d].UClk = %d\n" 1535 "smu_dpm_clks.dpm_clks->MemPstateTable[%d].MemClk= %d\n" 1536 "smu_dpm_clks.dpm_clks->MemPstateTable[%d].Voltage = %d\n", 1537 i, smu_dpm_clks.dpm_clks->MemPstateTable[i].UClk, 1538 i, smu_dpm_clks.dpm_clks->MemPstateTable[i].MemClk, 1539 i, smu_dpm_clks.dpm_clks->MemPstateTable[i].Voltage); 1540 } 1541 1542 if (ctx->dc_bios->integrated_info && ctx->dc->config.use_default_clock_table == false) { 1543 dcn35_clk_mgr_helper_populate_bw_params( 1544 &clk_mgr->base, 1545 ctx->dc_bios->integrated_info, 1546 smu_dpm_clks.dpm_clks); 1547 } 1548 } 1549 1550 if (smu_dpm_clks.dpm_clks && smu_dpm_clks.mc_address.quad_part != 0) 1551 dm_helpers_free_gpu_mem(clk_mgr->base.base.ctx, DC_MEM_ALLOC_TYPE_GART, 1552 smu_dpm_clks.dpm_clks); 1553 1554 if (smu_dpm_clks_dcn351.dpm_clks && smu_dpm_clks_dcn351.mc_address.quad_part != 0) 1555 dm_helpers_free_gpu_mem(clk_mgr->base.base.ctx, DC_MEM_ALLOC_TYPE_GART, 1556 smu_dpm_clks_dcn351.dpm_clks); 1557 1558 if (ctx->dc->config.disable_ips != DMUB_IPS_DISABLE_ALL) { 1559 bool ips_support = false; 1560 1561 /*avoid call pmfw at init*/ 1562 ips_support = dcn35_smu_get_ips_supported(&clk_mgr->base); 1563 if (ips_support) { 1564 ctx->dc->debug.ignore_pg = false; 1565 ctx->dc->debug.disable_dpp_power_gate = false; 1566 ctx->dc->debug.disable_hubp_power_gate = false; 1567 ctx->dc->debug.disable_dsc_power_gate = false; 1568 1569 /* Disable dynamic IPS2 in older PMFW (93.12) for Z8 interop. */ 1570 if (ctx->dc->config.disable_ips == DMUB_IPS_ENABLE && 1571 ctx->dce_version != DCN_VERSION_3_51 && 1572 ((clk_mgr->base.smu_ver & 0x00FFFFFF) <= 0x005d0c00)) 1573 ctx->dc->config.disable_ips = DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF; 1574 } else { 1575 /*let's reset the config control flag*/ 1576 ctx->dc->config.disable_ips = DMUB_IPS_DISABLE_ALL; /*pmfw not support it, disable it all*/ 1577 } 1578 } 1579 } 1580 1581 void dcn35_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr_int) 1582 { 1583 struct clk_mgr_dcn35 *clk_mgr = TO_CLK_MGR_DCN35(clk_mgr_int); 1584 1585 if (clk_mgr->smu_wm_set.wm_set && clk_mgr->smu_wm_set.mc_address.quad_part != 0) 1586 dm_helpers_free_gpu_mem(clk_mgr_int->base.ctx, DC_MEM_ALLOC_TYPE_FRAME_BUFFER, 1587 clk_mgr->smu_wm_set.wm_set); 1588 } 1589