1 /*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26 #include <linux/delay.h>
27 #include "dm_services.h"
28 #include "basics/dc_common.h"
29 #include "core_types.h"
30 #include "resource.h"
31 #include "custom_float.h"
32 #include "dcn10_hwseq.h"
33 #include "dcn10/dcn10_hw_sequencer_debug.h"
34 #include "dce/dce_hwseq.h"
35 #include "abm.h"
36 #include "dmcu.h"
37 #include "dcn10/dcn10_optc.h"
38 #include "dcn10/dcn10_dpp.h"
39 #include "dcn10/dcn10_mpc.h"
40 #include "timing_generator.h"
41 #include "opp.h"
42 #include "ipp.h"
43 #include "mpc.h"
44 #include "reg_helper.h"
45 #include "dcn10/dcn10_hubp.h"
46 #include "dcn10/dcn10_hubbub.h"
47 #include "dcn10/dcn10_cm_common.h"
48 #include "dccg.h"
49 #include "clk_mgr.h"
50 #include "link_hwss.h"
51 #include "dpcd_defs.h"
52 #include "dsc.h"
53 #include "dio/dcn10/dcn10_dio.h"
54 #include "dce/dmub_psr.h"
55 #include "dc_dmub_srv.h"
56 #include "dce/dmub_hw_lock_mgr.h"
57 #include "dc_trace.h"
58 #include "dce/dmub_outbox.h"
59 #include "link_service.h"
60 #include "dc_state_priv.h"
61
62 #define DC_LOGGER \
63 dc_logger
64 #define DC_LOGGER_INIT(logger) \
65 struct dal_logger *dc_logger = logger
66
67 #define CTX \
68 hws->ctx
69 #define REG(reg)\
70 hws->regs->reg
71
72 #undef FN
73 #define FN(reg_name, field_name) \
74 hws->shifts->field_name, hws->masks->field_name
75
76 /*print is 17 wide, first two characters are spaces*/
77 #define DTN_INFO_MICRO_SEC(ref_cycle) \
78 print_microsec(dc_ctx, log_ctx, ref_cycle)
79
80 #define GAMMA_HW_POINTS_NUM 256
81
82 #define PGFSM_POWER_ON 0
83 #define PGFSM_POWER_OFF 2
84
print_microsec(struct dc_context * dc_ctx,struct dc_log_buffer_ctx * log_ctx,uint32_t ref_cycle)85 static void print_microsec(struct dc_context *dc_ctx,
86 struct dc_log_buffer_ctx *log_ctx,
87 uint32_t ref_cycle)
88 {
89 const uint32_t ref_clk_mhz = dc_ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000;
90 static const unsigned int frac = 1000;
91 uint32_t us_x10 = (ref_cycle * frac) / ref_clk_mhz;
92
93 DTN_INFO(" %11d.%03d",
94 us_x10 / frac,
95 us_x10 % frac);
96 }
97
98 /*
99 * Delay until we passed busy-until-point to which we can
100 * do necessary locking/programming on consecutive full updates
101 */
dcn10_wait_for_pipe_update_if_needed(struct dc * dc,struct pipe_ctx * pipe_ctx,bool is_surface_update_only)102 void dcn10_wait_for_pipe_update_if_needed(struct dc *dc, struct pipe_ctx *pipe_ctx, bool is_surface_update_only)
103 {
104 struct crtc_position position;
105 struct dc_stream_state *stream = pipe_ctx->stream;
106 unsigned int vpos, frame_count;
107 uint32_t vupdate_start, vupdate_end, vblank_start;
108 unsigned int lines_to_vupdate, us_to_vupdate;
109 unsigned int us_per_line, us_vupdate;
110
111 if (!pipe_ctx->stream ||
112 !pipe_ctx->stream_res.tg ||
113 !pipe_ctx->stream_res.stream_enc)
114 return;
115
116 if (pipe_ctx->prev_odm_pipe &&
117 pipe_ctx->stream)
118 return;
119
120 if (!pipe_ctx->wait_is_required)
121 return;
122
123 struct timing_generator *tg = pipe_ctx->stream_res.tg;
124
125 if (tg->funcs->is_tg_enabled && !tg->funcs->is_tg_enabled(tg))
126 return;
127
128 dc->hwss.calc_vupdate_position(dc, pipe_ctx, &vupdate_start,
129 &vupdate_end);
130
131 dc->hwss.get_position(&pipe_ctx, 1, &position);
132 vpos = position.vertical_count;
133
134 frame_count = tg->funcs->get_frame_count(tg);
135
136 if (frame_count - pipe_ctx->wait_frame_count > 2)
137 return;
138
139 vblank_start = pipe_ctx->pipe_dlg_param.vblank_start;
140
141 if (vpos >= vupdate_start && vupdate_start >= vblank_start)
142 lines_to_vupdate = stream->timing.v_total - vpos + vupdate_start;
143 else
144 lines_to_vupdate = vupdate_start - vpos;
145
146 us_per_line =
147 stream->timing.h_total * 10000u / stream->timing.pix_clk_100hz;
148 us_to_vupdate = lines_to_vupdate * us_per_line;
149
150 if (vupdate_end < vupdate_start)
151 vupdate_end += stream->timing.v_total;
152
153 if (lines_to_vupdate > stream->timing.v_total - vupdate_end + vupdate_start)
154 us_to_vupdate = 0;
155
156 us_vupdate = (vupdate_end - vupdate_start + 1) * us_per_line;
157
158 if (is_surface_update_only && us_to_vupdate + us_vupdate > 200) {
159 //surface updates come in at high irql
160 pipe_ctx->wait_is_required = true;
161 return;
162 }
163
164 fsleep(us_to_vupdate + us_vupdate);
165
166 //clear
167 pipe_ctx->next_vupdate = 0;
168 pipe_ctx->wait_frame_count = 0;
169 pipe_ctx->wait_is_required = false;
170 }
171
172 /*
173 * On pipe unlock and programming, indicate pipe will be busy
174 * until some frame and line (vupdate), this is required for consecutive
175 * full updates, need to wait for updates
176 * to latch to try and program the next update
177 */
dcn10_set_wait_for_update_needed_for_pipe(struct dc * dc,struct pipe_ctx * pipe_ctx)178 void dcn10_set_wait_for_update_needed_for_pipe(struct dc *dc, struct pipe_ctx *pipe_ctx)
179 {
180 uint32_t vupdate_start, vupdate_end;
181 struct crtc_position position;
182 unsigned int vpos, cur_frame;
183
184 if (!pipe_ctx->stream ||
185 !pipe_ctx->stream_res.tg ||
186 !pipe_ctx->stream_res.stream_enc)
187 return;
188
189 dc->hwss.get_position(&pipe_ctx, 1, &position);
190 vpos = position.vertical_count;
191
192 dc->hwss.calc_vupdate_position(dc, pipe_ctx, &vupdate_start,
193 &vupdate_end);
194
195 struct timing_generator *tg = pipe_ctx->stream_res.tg;
196
197 struct optc *optc1 = DCN10TG_FROM_TG(tg);
198
199 ASSERT(optc1->max_frame_count != 0);
200
201 if (tg->funcs->is_tg_enabled && !tg->funcs->is_tg_enabled(tg))
202 return;
203
204 pipe_ctx->next_vupdate = vupdate_start;
205
206 cur_frame = tg->funcs->get_frame_count(tg);
207
208 if (vpos < vupdate_start) {
209 pipe_ctx->wait_frame_count = cur_frame;
210 } else {
211 if (cur_frame + 1 > optc1->max_frame_count)
212 pipe_ctx->wait_frame_count = cur_frame + 1 - optc1->max_frame_count;
213 else
214 pipe_ctx->wait_frame_count = cur_frame + 1;
215 }
216
217 pipe_ctx->wait_is_required = true;
218 }
219
dcn10_lock_all_pipes(struct dc * dc,struct dc_state * context,bool lock)220 void dcn10_lock_all_pipes(struct dc *dc,
221 struct dc_state *context,
222 bool lock)
223 {
224 struct pipe_ctx *pipe_ctx;
225 struct pipe_ctx *old_pipe_ctx;
226 struct timing_generator *tg;
227 int i;
228
229 for (i = 0; i < dc->res_pool->pipe_count; i++) {
230 old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
231 pipe_ctx = &context->res_ctx.pipe_ctx[i];
232 tg = pipe_ctx->stream_res.tg;
233
234 /*
235 * Only lock the top pipe's tg to prevent redundant
236 * (un)locking. Also skip if pipe is disabled.
237 */
238 if (pipe_ctx->top_pipe ||
239 !pipe_ctx->stream ||
240 (!pipe_ctx->plane_state && !old_pipe_ctx->plane_state) ||
241 !tg->funcs->is_tg_enabled(tg) ||
242 dc_state_get_pipe_subvp_type(context, pipe_ctx) == SUBVP_PHANTOM)
243 continue;
244
245 if (lock)
246 dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
247 else
248 dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
249 }
250 }
251
log_mpc_crc(struct dc * dc,struct dc_log_buffer_ctx * log_ctx)252 static void log_mpc_crc(struct dc *dc,
253 struct dc_log_buffer_ctx *log_ctx)
254 {
255 struct dc_context *dc_ctx = dc->ctx;
256 struct dce_hwseq *hws = dc->hwseq;
257
258 if (REG(MPC_CRC_RESULT_GB))
259 DTN_INFO("MPC_CRC_RESULT_GB:%d MPC_CRC_RESULT_C:%d MPC_CRC_RESULT_AR:%d\n",
260 REG_READ(MPC_CRC_RESULT_GB), REG_READ(MPC_CRC_RESULT_C), REG_READ(MPC_CRC_RESULT_AR));
261 if (REG(DPP_TOP0_DPP_CRC_VAL_B_A))
262 DTN_INFO("DPP_TOP0_DPP_CRC_VAL_B_A:%d DPP_TOP0_DPP_CRC_VAL_R_G:%d\n",
263 REG_READ(DPP_TOP0_DPP_CRC_VAL_B_A), REG_READ(DPP_TOP0_DPP_CRC_VAL_R_G));
264 }
265
dcn10_log_hubbub_state(struct dc * dc,struct dc_log_buffer_ctx * log_ctx)266 static void dcn10_log_hubbub_state(struct dc *dc,
267 struct dc_log_buffer_ctx *log_ctx)
268 {
269 struct dc_context *dc_ctx = dc->ctx;
270 struct dcn_hubbub_wm wm;
271 int i;
272
273 memset(&wm, 0, sizeof(struct dcn_hubbub_wm));
274 dc->res_pool->hubbub->funcs->wm_read_state(dc->res_pool->hubbub, &wm);
275
276 DTN_INFO("HUBBUB WM: data_urgent pte_meta_urgent"
277 " sr_enter sr_exit dram_clk_change\n");
278
279 for (i = 0; i < 4; i++) {
280 struct dcn_hubbub_wm_set *s;
281
282 s = &wm.sets[i];
283 DTN_INFO("WM_Set[%d]:", s->wm_set);
284 DTN_INFO_MICRO_SEC(s->data_urgent);
285 DTN_INFO_MICRO_SEC(s->pte_meta_urgent);
286 DTN_INFO_MICRO_SEC(s->sr_enter);
287 DTN_INFO_MICRO_SEC(s->sr_exit);
288 DTN_INFO_MICRO_SEC(s->dram_clk_change);
289 DTN_INFO("\n");
290 }
291
292 DTN_INFO("\n");
293 }
294
dcn10_log_hubp_states(struct dc * dc,void * log_ctx)295 static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx)
296 {
297 struct dc_context *dc_ctx = dc->ctx;
298 struct resource_pool *pool = dc->res_pool;
299 int i;
300
301 DTN_INFO(
302 "HUBP: format addr_hi width height rot mir sw_mode dcc_en blank_en clock_en ttu_dis underflow min_ttu_vblank qos_low_wm qos_high_wm\n");
303 for (i = 0; i < pool->pipe_count; i++) {
304 struct hubp *hubp = pool->hubps[i];
305 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state);
306
307 hubp->funcs->hubp_read_state(hubp);
308
309 if (!s->blank_en) {
310 DTN_INFO("[%2d]: %5xh %6xh %5d %6d %2xh %2xh %6xh %6d %8d %8d %7d %8xh",
311 hubp->inst,
312 s->pixel_format,
313 s->inuse_addr_hi,
314 s->viewport_width,
315 s->viewport_height,
316 s->rotation_angle,
317 s->h_mirror_en,
318 s->sw_mode,
319 s->dcc_en,
320 s->blank_en,
321 s->clock_en,
322 s->ttu_disable,
323 s->underflow_status);
324 DTN_INFO_MICRO_SEC(s->min_ttu_vblank);
325 DTN_INFO_MICRO_SEC(s->qos_level_low_wm);
326 DTN_INFO_MICRO_SEC(s->qos_level_high_wm);
327 DTN_INFO("\n");
328 }
329 }
330
331 DTN_INFO("\n=======HUBP FL======\n");
332 static const char * const pLabels[] = {
333 "inst", "Enabled ", "Done ", "adr_mode ", "width ", "mpc_width ",
334 "tmz", "xbar_sel_R", "xbar_sel_G", "xbar_sel_B", "adr_hi ",
335 "adr_low", "REFCYC", "Bias", "Scale", "Mode",
336 "Format", "prefetch"};
337
338 for (i = 0; i < pool->pipe_count; i++) {
339 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
340 struct dcn_fl_regs_st *fl_regs = &s->fl_regs;
341 struct _vcs_dpi_display_dlg_regs_st *dlg_regs = &s->dlg_attr;
342
343 if (!s->blank_en) {
344 uint32_t values[] = {
345 pool->hubps[i]->inst,
346 fl_regs->lut_enable,
347 fl_regs->lut_done,
348 fl_regs->lut_addr_mode,
349 fl_regs->lut_width,
350 fl_regs->lut_mpc_width,
351 fl_regs->lut_tmz,
352 fl_regs->lut_crossbar_sel_r,
353 fl_regs->lut_crossbar_sel_g,
354 fl_regs->lut_crossbar_sel_b,
355 fl_regs->lut_addr_hi,
356 fl_regs->lut_addr_lo,
357 fl_regs->refcyc_3dlut_group,
358 fl_regs->lut_fl_bias,
359 fl_regs->lut_fl_scale,
360 fl_regs->lut_fl_mode,
361 fl_regs->lut_fl_format,
362 dlg_regs->dst_y_prefetch};
363
364 int num_elements = 18;
365
366 for (int j = 0; j < num_elements; j++)
367 DTN_INFO("%s \t %8xh\n", pLabels[j], values[j]);
368 }
369 }
370
371 DTN_INFO("\n=========RQ========\n");
372 DTN_INFO("HUBP: drq_exp_m prq_exp_m mrq_exp_m crq_exp_m plane1_ba L:chunk_s min_chu_s meta_ch_s"
373 " min_m_c_s dpte_gr_s mpte_gr_s swath_hei pte_row_h C:chunk_s min_chu_s meta_ch_s"
374 " min_m_c_s dpte_gr_s mpte_gr_s swath_hei pte_row_h\n");
375 for (i = 0; i < pool->pipe_count; i++) {
376 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
377 struct _vcs_dpi_display_rq_regs_st *rq_regs = &s->rq_regs;
378
379 if (!s->blank_en)
380 DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
381 pool->hubps[i]->inst, rq_regs->drq_expansion_mode, rq_regs->prq_expansion_mode, rq_regs->mrq_expansion_mode,
382 rq_regs->crq_expansion_mode, rq_regs->plane1_base_address, rq_regs->rq_regs_l.chunk_size,
383 rq_regs->rq_regs_l.min_chunk_size, rq_regs->rq_regs_l.meta_chunk_size,
384 rq_regs->rq_regs_l.min_meta_chunk_size, rq_regs->rq_regs_l.dpte_group_size,
385 rq_regs->rq_regs_l.mpte_group_size, rq_regs->rq_regs_l.swath_height,
386 rq_regs->rq_regs_l.pte_row_height_linear, rq_regs->rq_regs_c.chunk_size, rq_regs->rq_regs_c.min_chunk_size,
387 rq_regs->rq_regs_c.meta_chunk_size, rq_regs->rq_regs_c.min_meta_chunk_size,
388 rq_regs->rq_regs_c.dpte_group_size, rq_regs->rq_regs_c.mpte_group_size,
389 rq_regs->rq_regs_c.swath_height, rq_regs->rq_regs_c.pte_row_height_linear);
390 }
391
392 DTN_INFO("========DLG========\n");
393 DTN_INFO("HUBP: rc_hbe dlg_vbe min_d_y_n rc_per_ht rc_x_a_s "
394 " dst_y_a_s dst_y_pf dst_y_vvb dst_y_rvb dst_y_vfl dst_y_rfl rf_pix_fq"
395 " vratio_pf vrat_pf_c rc_pg_vbl rc_pg_vbc rc_mc_vbl rc_mc_vbc rc_pg_fll"
396 " rc_pg_flc rc_mc_fll rc_mc_flc pr_nom_l pr_nom_c rc_pg_nl rc_pg_nc "
397 " mr_nom_l mr_nom_c rc_mc_nl rc_mc_nc rc_ld_pl rc_ld_pc rc_ld_l "
398 " rc_ld_c cha_cur0 ofst_cur1 cha_cur1 vr_af_vc0 ddrq_limt x_rt_dlay"
399 " x_rp_dlay x_rr_sfl rc_td_grp\n");
400
401 for (i = 0; i < pool->pipe_count; i++) {
402 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
403 struct _vcs_dpi_display_dlg_regs_st *dlg_regs = &s->dlg_attr;
404
405 if (!s->blank_en)
406 DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh"
407 " %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh"
408 " %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %xh\n",
409 pool->hubps[i]->inst, dlg_regs->refcyc_h_blank_end, dlg_regs->dlg_vblank_end, dlg_regs->min_dst_y_next_start,
410 dlg_regs->refcyc_per_htotal, dlg_regs->refcyc_x_after_scaler, dlg_regs->dst_y_after_scaler,
411 dlg_regs->dst_y_prefetch, dlg_regs->dst_y_per_vm_vblank, dlg_regs->dst_y_per_row_vblank,
412 dlg_regs->dst_y_per_vm_flip, dlg_regs->dst_y_per_row_flip, dlg_regs->ref_freq_to_pix_freq,
413 dlg_regs->vratio_prefetch, dlg_regs->vratio_prefetch_c, dlg_regs->refcyc_per_pte_group_vblank_l,
414 dlg_regs->refcyc_per_pte_group_vblank_c, dlg_regs->refcyc_per_meta_chunk_vblank_l,
415 dlg_regs->refcyc_per_meta_chunk_vblank_c, dlg_regs->refcyc_per_pte_group_flip_l,
416 dlg_regs->refcyc_per_pte_group_flip_c, dlg_regs->refcyc_per_meta_chunk_flip_l,
417 dlg_regs->refcyc_per_meta_chunk_flip_c, dlg_regs->dst_y_per_pte_row_nom_l,
418 dlg_regs->dst_y_per_pte_row_nom_c, dlg_regs->refcyc_per_pte_group_nom_l,
419 dlg_regs->refcyc_per_pte_group_nom_c, dlg_regs->dst_y_per_meta_row_nom_l,
420 dlg_regs->dst_y_per_meta_row_nom_c, dlg_regs->refcyc_per_meta_chunk_nom_l,
421 dlg_regs->refcyc_per_meta_chunk_nom_c, dlg_regs->refcyc_per_line_delivery_pre_l,
422 dlg_regs->refcyc_per_line_delivery_pre_c, dlg_regs->refcyc_per_line_delivery_l,
423 dlg_regs->refcyc_per_line_delivery_c, dlg_regs->chunk_hdl_adjust_cur0, dlg_regs->dst_y_offset_cur1,
424 dlg_regs->chunk_hdl_adjust_cur1, dlg_regs->vready_after_vcount0, dlg_regs->dst_y_delta_drq_limit,
425 dlg_regs->xfc_reg_transfer_delay, dlg_regs->xfc_reg_precharge_delay,
426 dlg_regs->xfc_reg_remote_surface_flip_latency, dlg_regs->refcyc_per_tdlut_group);
427 }
428
429 DTN_INFO("========TTU========\n");
430 DTN_INFO("HUBP: qos_ll_wm qos_lh_wm mn_ttu_vb qos_l_flp rc_rd_p_l rc_rd_l rc_rd_p_c"
431 " rc_rd_c rc_rd_c0 rc_rd_pc0 rc_rd_c1 rc_rd_pc1 qos_lf_l qos_rds_l"
432 " qos_lf_c qos_rds_c qos_lf_c0 qos_rds_c0 qos_lf_c1 qos_rds_c1\n");
433 for (i = 0; i < pool->pipe_count; i++) {
434 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
435 struct _vcs_dpi_display_ttu_regs_st *ttu_regs = &s->ttu_attr;
436
437 if (!s->blank_en)
438 DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
439 pool->hubps[i]->inst, ttu_regs->qos_level_low_wm, ttu_regs->qos_level_high_wm, ttu_regs->min_ttu_vblank,
440 ttu_regs->qos_level_flip, ttu_regs->refcyc_per_req_delivery_pre_l, ttu_regs->refcyc_per_req_delivery_l,
441 ttu_regs->refcyc_per_req_delivery_pre_c, ttu_regs->refcyc_per_req_delivery_c, ttu_regs->refcyc_per_req_delivery_cur0,
442 ttu_regs->refcyc_per_req_delivery_pre_cur0, ttu_regs->refcyc_per_req_delivery_cur1,
443 ttu_regs->refcyc_per_req_delivery_pre_cur1, ttu_regs->qos_level_fixed_l, ttu_regs->qos_ramp_disable_l,
444 ttu_regs->qos_level_fixed_c, ttu_regs->qos_ramp_disable_c, ttu_regs->qos_level_fixed_cur0,
445 ttu_regs->qos_ramp_disable_cur0, ttu_regs->qos_level_fixed_cur1, ttu_regs->qos_ramp_disable_cur1);
446 }
447 DTN_INFO("\n");
448 }
449
dcn10_log_color_state(struct dc * dc,struct dc_log_buffer_ctx * log_ctx)450 static void dcn10_log_color_state(struct dc *dc,
451 struct dc_log_buffer_ctx *log_ctx)
452 {
453 struct dc_context *dc_ctx = dc->ctx;
454 struct resource_pool *pool = dc->res_pool;
455 bool is_gamut_remap_available = false;
456 int i;
457
458 DTN_INFO("DPP: IGAM format IGAM mode DGAM mode RGAM mode"
459 " GAMUT adjust "
460 "C11 C12 C13 C14 "
461 "C21 C22 C23 C24 "
462 "C31 C32 C33 C34 \n");
463 for (i = 0; i < pool->pipe_count; i++) {
464 struct dpp *dpp = pool->dpps[i];
465 struct dcn_dpp_state s = {0};
466
467 dpp->funcs->dpp_read_state(dpp, &s);
468 if (dpp->funcs->dpp_get_gamut_remap) {
469 dpp->funcs->dpp_get_gamut_remap(dpp, &s.gamut_remap);
470 is_gamut_remap_available = true;
471 }
472
473 if (!s.is_enabled)
474 continue;
475
476 DTN_INFO("[%2d]: %11xh %11s %9s %9s",
477 dpp->inst,
478 s.igam_input_format,
479 (s.igam_lut_mode == 0) ? "BypassFixed" :
480 ((s.igam_lut_mode == 1) ? "BypassFloat" :
481 ((s.igam_lut_mode == 2) ? "RAM" :
482 ((s.igam_lut_mode == 3) ? "RAM" :
483 "Unknown"))),
484 (s.dgam_lut_mode == 0) ? "Bypass" :
485 ((s.dgam_lut_mode == 1) ? "sRGB" :
486 ((s.dgam_lut_mode == 2) ? "Ycc" :
487 ((s.dgam_lut_mode == 3) ? "RAM" :
488 ((s.dgam_lut_mode == 4) ? "RAM" :
489 "Unknown")))),
490 (s.rgam_lut_mode == 0) ? "Bypass" :
491 ((s.rgam_lut_mode == 1) ? "sRGB" :
492 ((s.rgam_lut_mode == 2) ? "Ycc" :
493 ((s.rgam_lut_mode == 3) ? "RAM" :
494 ((s.rgam_lut_mode == 4) ? "RAM" :
495 "Unknown")))));
496 if (is_gamut_remap_available)
497 DTN_INFO(" %12s "
498 "%010lld %010lld %010lld %010lld "
499 "%010lld %010lld %010lld %010lld "
500 "%010lld %010lld %010lld %010lld",
501 (s.gamut_remap.gamut_adjust_type == 0) ? "Bypass" :
502 ((s.gamut_remap.gamut_adjust_type == 1) ? "HW" : "SW"),
503 s.gamut_remap.temperature_matrix[0].value,
504 s.gamut_remap.temperature_matrix[1].value,
505 s.gamut_remap.temperature_matrix[2].value,
506 s.gamut_remap.temperature_matrix[3].value,
507 s.gamut_remap.temperature_matrix[4].value,
508 s.gamut_remap.temperature_matrix[5].value,
509 s.gamut_remap.temperature_matrix[6].value,
510 s.gamut_remap.temperature_matrix[7].value,
511 s.gamut_remap.temperature_matrix[8].value,
512 s.gamut_remap.temperature_matrix[9].value,
513 s.gamut_remap.temperature_matrix[10].value,
514 s.gamut_remap.temperature_matrix[11].value);
515
516 DTN_INFO("\n");
517 }
518 DTN_INFO("\n");
519 DTN_INFO("DPP Color Caps: input_lut_shared:%d icsc:%d"
520 " dgam_ram:%d dgam_rom: srgb:%d,bt2020:%d,gamma2_2:%d,pq:%d,hlg:%d"
521 " post_csc:%d gamcor:%d dgam_rom_for_yuv:%d 3d_lut:%d"
522 " blnd_lut:%d oscs:%d\n\n",
523 dc->caps.color.dpp.input_lut_shared,
524 dc->caps.color.dpp.icsc,
525 dc->caps.color.dpp.dgam_ram,
526 dc->caps.color.dpp.dgam_rom_caps.srgb,
527 dc->caps.color.dpp.dgam_rom_caps.bt2020,
528 dc->caps.color.dpp.dgam_rom_caps.gamma2_2,
529 dc->caps.color.dpp.dgam_rom_caps.pq,
530 dc->caps.color.dpp.dgam_rom_caps.hlg,
531 dc->caps.color.dpp.post_csc,
532 dc->caps.color.dpp.gamma_corr,
533 dc->caps.color.dpp.dgam_rom_for_yuv,
534 dc->caps.color.dpp.hw_3d_lut,
535 dc->caps.color.dpp.ogam_ram,
536 dc->caps.color.dpp.ocsc);
537
538 DTN_INFO("MPCC: OPP DPP MPCCBOT MODE ALPHA_MODE PREMULT OVERLAP_ONLY IDLE\n");
539 for (i = 0; i < pool->mpcc_count; i++) {
540 struct mpcc_state s = {0};
541
542 pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s);
543 if (s.opp_id != 0xf)
544 DTN_INFO("[%2d]: %2xh %2xh %6xh %4d %10d %7d %12d %4d\n",
545 i, s.opp_id, s.dpp_id, s.bot_mpcc_id,
546 s.mode, s.alpha_mode, s.pre_multiplied_alpha, s.overlap_only,
547 s.idle);
548 }
549 DTN_INFO("\n");
550 DTN_INFO("MPC Color Caps: gamut_remap:%d, 3dlut:%d, ogam_ram:%d, ocsc:%d\n\n",
551 dc->caps.color.mpc.gamut_remap,
552 dc->caps.color.mpc.num_3dluts,
553 dc->caps.color.mpc.ogam_ram,
554 dc->caps.color.mpc.ocsc);
555 DTN_INFO("===== MPC RMCM 3DLUT =====\n");
556 static const char * const pLabels[] = {
557 "MPCC", "SIZE", "MODE", "MODE_CUR", "RD_SEL",
558 "30BIT_EN", "WR_EN_MASK", "RAM_SEL", "OUT_NORM_FACTOR", "FL_SEL",
559 "OUT_OFFSET", "OUT_SCALE", "FL_DONE", "SOFT_UNDERFLOW", "HARD_UNDERFLOW",
560 "MEM_PWR_ST", "FORCE", "DIS", "MODE"};
561
562 for (i = 0; i < pool->mpcc_count; i++) {
563 struct mpcc_state s = {0};
564
565 pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s);
566 if (s.opp_id != 0xf) {
567 uint32_t values[] = {
568 i,
569 s.rmcm_regs.rmcm_3dlut_size,
570 s.rmcm_regs.rmcm_3dlut_mode,
571 s.rmcm_regs.rmcm_3dlut_mode_cur,
572 s.rmcm_regs.rmcm_3dlut_read_sel,
573 s.rmcm_regs.rmcm_3dlut_30bit_en,
574 s.rmcm_regs.rmcm_3dlut_wr_en_mask,
575 s.rmcm_regs.rmcm_3dlut_ram_sel,
576 s.rmcm_regs.rmcm_3dlut_out_norm_factor,
577 s.rmcm_regs.rmcm_3dlut_fl_sel,
578 s.rmcm_regs.rmcm_3dlut_out_offset_r,
579 s.rmcm_regs.rmcm_3dlut_out_scale_r,
580 s.rmcm_regs.rmcm_3dlut_fl_done,
581 s.rmcm_regs.rmcm_3dlut_fl_soft_underflow,
582 s.rmcm_regs.rmcm_3dlut_fl_hard_underflow,
583 s.rmcm_regs.rmcm_3dlut_mem_pwr_state,
584 s.rmcm_regs.rmcm_3dlut_mem_pwr_force,
585 s.rmcm_regs.rmcm_3dlut_mem_pwr_dis,
586 s.rmcm_regs.rmcm_3dlut_mem_pwr_mode};
587
588 int num_elements = 19;
589
590 for (int j = 0; j < num_elements; j++)
591 DTN_INFO("%s \t %8xh\n", pLabels[j], values[j]);
592 }
593 }
594 DTN_INFO("\n");
595 DTN_INFO("===== MPC RMCM Shaper =====\n");
596 DTN_INFO("MPCC: CNTL LUT_MODE MODE_CUR WR_EN_MASK WR_SEL OFFSET SCALE START_B START_SEG_B END_B END_BASE_B MEM_PWR_ST FORCE DIS MODE\n");
597 for (i = 0; i < pool->mpcc_count; i++) {
598 struct mpcc_state s = {0};
599
600 pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s);
601 if (s.opp_id != 0xf)
602 DTN_INFO("[%2d]: %4xh %4xh %6xh %4x %4x %4x %4x %4x %4xh %4xh %6xh %4x %4x %4x %4x\n",
603 i, s.rmcm_regs.rmcm_cntl, s.rmcm_regs.rmcm_shaper_lut_mode, s.rmcm_regs.rmcm_shaper_mode_cur,
604 s.rmcm_regs.rmcm_shaper_lut_write_en_mask, s.rmcm_regs.rmcm_shaper_lut_write_sel, s.rmcm_regs.rmcm_shaper_offset_b,
605 s.rmcm_regs.rmcm_shaper_scale_b, s.rmcm_regs.rmcm_shaper_rama_exp_region_start_b, s.rmcm_regs.rmcm_shaper_rama_exp_region_start_seg_b,
606 s.rmcm_regs.rmcm_shaper_rama_exp_region_end_b, s.rmcm_regs.rmcm_shaper_rama_exp_region_end_base_b, s.rmcm_regs.rmcm_shaper_mem_pwr_state,
607 s.rmcm_regs.rmcm_shaper_mem_pwr_force, s.rmcm_regs.rmcm_shaper_mem_pwr_dis, s.rmcm_regs.rmcm_shaper_mem_pwr_mode);
608 }
609 }
610
dcn10_log_hw_state(struct dc * dc,struct dc_log_buffer_ctx * log_ctx)611 void dcn10_log_hw_state(struct dc *dc,
612 struct dc_log_buffer_ctx *log_ctx)
613 {
614 struct dc_context *dc_ctx = dc->ctx;
615 struct resource_pool *pool = dc->res_pool;
616 int i;
617
618 DTN_INFO_BEGIN();
619
620 dcn10_log_hubbub_state(dc, log_ctx);
621
622 dcn10_log_hubp_states(dc, log_ctx);
623
624 if (dc->hwss.log_color_state)
625 dc->hwss.log_color_state(dc, log_ctx);
626 else
627 dcn10_log_color_state(dc, log_ctx);
628
629 DTN_INFO("OTG: v_bs v_be v_ss v_se vpol vmax vmin vmax_sel vmin_sel h_bs h_be h_ss h_se hpol htot vtot underflow blank_en\n");
630
631 for (i = 0; i < pool->timing_generator_count; i++) {
632 struct timing_generator *tg = pool->timing_generators[i];
633 struct dcn_otg_state s = {0};
634 /* Read shared OTG state registers for all DCNx */
635 if (tg->funcs->read_otg_state)
636 tg->funcs->read_otg_state(tg, &s);
637
638 /*
639 * For DCN2 and greater, a register on the OPP is used to
640 * determine if the CRTC is blanked instead of the OTG. So use
641 * dpg_is_blanked() if exists, otherwise fallback on otg.
642 *
643 * TODO: Implement DCN-specific read_otg_state hooks.
644 */
645 if (pool->opps[i]->funcs->dpg_is_blanked)
646 s.blank_enabled = pool->opps[i]->funcs->dpg_is_blanked(pool->opps[i]);
647 else
648 s.blank_enabled = tg->funcs->is_blanked(tg);
649
650 //only print if OTG master is enabled
651 if ((s.otg_enabled & 1) == 0)
652 continue;
653
654 DTN_INFO("[%d]: %5d %5d %5d %5d %5d %5d %5d %9d %9d %5d %5d %5d %5d %5d %5d %5d %9d %8d\n",
655 tg->inst,
656 s.v_blank_start,
657 s.v_blank_end,
658 s.v_sync_a_start,
659 s.v_sync_a_end,
660 s.v_sync_a_pol,
661 s.v_total_max,
662 s.v_total_min,
663 s.v_total_max_sel,
664 s.v_total_min_sel,
665 s.h_blank_start,
666 s.h_blank_end,
667 s.h_sync_a_start,
668 s.h_sync_a_end,
669 s.h_sync_a_pol,
670 s.h_total,
671 s.v_total,
672 s.underflow_occurred_status,
673 s.blank_enabled);
674
675 // Clear underflow for debug purposes
676 // We want to keep underflow sticky bit on for the longevity tests outside of test environment.
677 // This function is called only from Windows or Diags test environment, hence it's safe to clear
678 // it from here without affecting the original intent.
679 tg->funcs->clear_optc_underflow(tg);
680 }
681 DTN_INFO("\n");
682
683 // dcn_dsc_state struct field bytes_per_pixel was renamed to bits_per_pixel
684 // TODO: Update golden log header to reflect this name change
685 DTN_INFO("DSC: CLOCK_EN SLICE_WIDTH Bytes_pp\n");
686 for (i = 0; i < pool->res_cap->num_dsc; i++) {
687 struct display_stream_compressor *dsc = pool->dscs[i];
688 struct dcn_dsc_state s = {0};
689
690 dsc->funcs->dsc_read_state(dsc, &s);
691 DTN_INFO("[%d]: %-9d %-12d %-10d\n",
692 dsc->inst,
693 s.dsc_clock_en,
694 s.dsc_slice_width,
695 s.dsc_bits_per_pixel);
696 DTN_INFO("\n");
697 }
698 DTN_INFO("\n");
699
700 DTN_INFO("S_ENC: DSC_MODE SEC_GSP7_LINE_NUM"
701 " VBID6_LINE_REFERENCE VBID6_LINE_NUM SEC_GSP7_ENABLE SEC_STREAM_ENABLE\n");
702 for (i = 0; i < pool->stream_enc_count; i++) {
703 struct stream_encoder *enc = pool->stream_enc[i];
704 struct enc_state s = {0};
705
706 if (enc->funcs->enc_read_state) {
707 enc->funcs->enc_read_state(enc, &s);
708 DTN_INFO("[%-3d]: %-9d %-18d %-21d %-15d %-16d %-17d\n",
709 enc->id,
710 s.dsc_mode,
711 s.sec_gsp_pps_line_num,
712 s.vbid6_line_reference,
713 s.vbid6_line_num,
714 s.sec_gsp_pps_enable,
715 s.sec_stream_enable);
716 DTN_INFO("\n");
717 }
718 }
719 DTN_INFO("\n");
720
721 DTN_INFO("L_ENC: DPHY_FEC_EN DPHY_FEC_READY_SHADOW DPHY_FEC_ACTIVE_STATUS DP_LINK_TRAINING_COMPLETE\n");
722 for (i = 0; i < dc->link_count; i++) {
723 struct link_encoder *lenc = dc->links[i]->link_enc;
724
725 struct link_enc_state s = {0};
726
727 if (lenc && lenc->funcs->read_state) {
728 lenc->funcs->read_state(lenc, &s);
729 DTN_INFO("[%-3d]: %-12d %-22d %-22d %-25d\n",
730 i,
731 s.dphy_fec_en,
732 s.dphy_fec_ready_shadow,
733 s.dphy_fec_active_status,
734 s.dp_link_training_complete);
735 DTN_INFO("\n");
736 }
737 }
738 DTN_INFO("\n");
739
740 DTN_INFO("\nCALCULATED Clocks: dcfclk_khz:%d dcfclk_deep_sleep_khz:%d dispclk_khz:%d\n"
741 "dppclk_khz:%d max_supported_dppclk_khz:%d fclk_khz:%d socclk_khz:%d\n\n",
742 dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_khz,
743 dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz,
744 dc->current_state->bw_ctx.bw.dcn.clk.dispclk_khz,
745 dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz,
746 dc->current_state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz,
747 dc->current_state->bw_ctx.bw.dcn.clk.fclk_khz,
748 dc->current_state->bw_ctx.bw.dcn.clk.socclk_khz);
749
750 log_mpc_crc(dc, log_ctx);
751
752 {
753 if (pool->hpo_dp_stream_enc_count > 0) {
754 DTN_INFO("DP HPO S_ENC: Enabled OTG Format Depth Vid SDP Compressed Link\n");
755 for (i = 0; i < pool->hpo_dp_stream_enc_count; i++) {
756 struct hpo_dp_stream_encoder_state hpo_dp_se_state = {0};
757 struct hpo_dp_stream_encoder *hpo_dp_stream_enc = pool->hpo_dp_stream_enc[i];
758
759 if (hpo_dp_stream_enc && hpo_dp_stream_enc->funcs->read_state) {
760 hpo_dp_stream_enc->funcs->read_state(hpo_dp_stream_enc, &hpo_dp_se_state);
761
762 DTN_INFO("[%d]: %d %d %6s %d %d %d %d %d\n",
763 hpo_dp_stream_enc->id - ENGINE_ID_HPO_DP_0,
764 hpo_dp_se_state.stream_enc_enabled,
765 hpo_dp_se_state.otg_inst,
766 (hpo_dp_se_state.pixel_encoding == 0) ? "4:4:4" :
767 ((hpo_dp_se_state.pixel_encoding == 1) ? "4:2:2" :
768 (hpo_dp_se_state.pixel_encoding == 2) ? "4:2:0" : "Y-Only"),
769 (hpo_dp_se_state.component_depth == 0) ? 6 :
770 ((hpo_dp_se_state.component_depth == 1) ? 8 :
771 (hpo_dp_se_state.component_depth == 2) ? 10 : 12),
772 hpo_dp_se_state.vid_stream_enabled,
773 hpo_dp_se_state.sdp_enabled,
774 hpo_dp_se_state.compressed_format,
775 hpo_dp_se_state.mapped_to_link_enc);
776 }
777 }
778
779 DTN_INFO("\n");
780 }
781
782 /* log DP HPO L_ENC section if any hpo_dp_link_enc exists */
783 if (pool->hpo_dp_link_enc_count) {
784 DTN_INFO("DP HPO L_ENC: Enabled Mode Lanes Stream Slots VC Rate X VC Rate Y\n");
785
786 for (i = 0; i < pool->hpo_dp_link_enc_count; i++) {
787 struct hpo_dp_link_encoder *hpo_dp_link_enc = pool->hpo_dp_link_enc[i];
788 struct hpo_dp_link_enc_state hpo_dp_le_state = {0};
789
790 if (hpo_dp_link_enc->funcs->read_state) {
791 hpo_dp_link_enc->funcs->read_state(hpo_dp_link_enc, &hpo_dp_le_state);
792 DTN_INFO("[%d]: %d %6s %d %d %d %d %d\n",
793 hpo_dp_link_enc->inst,
794 hpo_dp_le_state.link_enc_enabled,
795 (hpo_dp_le_state.link_mode == 0) ? "TPS1" :
796 (hpo_dp_le_state.link_mode == 1) ? "TPS2" :
797 (hpo_dp_le_state.link_mode == 2) ? "ACTIVE" : "TEST",
798 hpo_dp_le_state.lane_count,
799 hpo_dp_le_state.stream_src[0],
800 hpo_dp_le_state.slot_count[0],
801 hpo_dp_le_state.vc_rate_x[0],
802 hpo_dp_le_state.vc_rate_y[0]);
803 DTN_INFO("\n");
804 }
805 }
806
807 DTN_INFO("\n");
808 }
809 }
810
811 DTN_INFO_END();
812 }
813
dcn10_did_underflow_occur(struct dc * dc,struct pipe_ctx * pipe_ctx)814 bool dcn10_did_underflow_occur(struct dc *dc, struct pipe_ctx *pipe_ctx)
815 {
816 struct hubp *hubp = pipe_ctx->plane_res.hubp;
817 struct timing_generator *tg = pipe_ctx->stream_res.tg;
818
819 if (tg->funcs->is_optc_underflow_occurred(tg)) {
820 tg->funcs->clear_optc_underflow(tg);
821 return true;
822 }
823
824 if (hubp->funcs->hubp_get_underflow_status(hubp)) {
825 hubp->funcs->hubp_clear_underflow(hubp);
826 return true;
827 }
828 return false;
829 }
830
dcn10_enable_power_gating_plane(struct dce_hwseq * hws,bool enable)831 void dcn10_enable_power_gating_plane(
832 struct dce_hwseq *hws,
833 bool enable)
834 {
835 bool force_on = true; /* disable power gating */
836
837 if (enable)
838 force_on = false;
839
840 /* DCHUBP0/1/2/3 */
841 REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN0_POWER_FORCEON, force_on);
842 REG_UPDATE(DOMAIN2_PG_CONFIG, DOMAIN2_POWER_FORCEON, force_on);
843 REG_UPDATE(DOMAIN4_PG_CONFIG, DOMAIN4_POWER_FORCEON, force_on);
844 REG_UPDATE(DOMAIN6_PG_CONFIG, DOMAIN6_POWER_FORCEON, force_on);
845
846 /* DPP0/1/2/3 */
847 REG_UPDATE(DOMAIN1_PG_CONFIG, DOMAIN1_POWER_FORCEON, force_on);
848 REG_UPDATE(DOMAIN3_PG_CONFIG, DOMAIN3_POWER_FORCEON, force_on);
849 REG_UPDATE(DOMAIN5_PG_CONFIG, DOMAIN5_POWER_FORCEON, force_on);
850 REG_UPDATE(DOMAIN7_PG_CONFIG, DOMAIN7_POWER_FORCEON, force_on);
851 }
852
dcn10_disable_vga(struct dce_hwseq * hws)853 void dcn10_disable_vga(
854 struct dce_hwseq *hws)
855 {
856 unsigned int in_vga1_mode = 0;
857 unsigned int in_vga2_mode = 0;
858 unsigned int in_vga3_mode = 0;
859 unsigned int in_vga4_mode = 0;
860
861 REG_GET(D1VGA_CONTROL, D1VGA_MODE_ENABLE, &in_vga1_mode);
862 REG_GET(D2VGA_CONTROL, D2VGA_MODE_ENABLE, &in_vga2_mode);
863 REG_GET(D3VGA_CONTROL, D3VGA_MODE_ENABLE, &in_vga3_mode);
864 REG_GET(D4VGA_CONTROL, D4VGA_MODE_ENABLE, &in_vga4_mode);
865
866 if (in_vga1_mode == 0 && in_vga2_mode == 0 &&
867 in_vga3_mode == 0 && in_vga4_mode == 0)
868 return;
869
870 REG_WRITE(D1VGA_CONTROL, 0);
871 REG_WRITE(D2VGA_CONTROL, 0);
872 REG_WRITE(D3VGA_CONTROL, 0);
873 REG_WRITE(D4VGA_CONTROL, 0);
874
875 /* HW Engineer's Notes:
876 * During switch from vga->extended, if we set the VGA_TEST_ENABLE and
877 * then hit the VGA_TEST_RENDER_START, then the DCHUBP timing gets updated correctly.
878 *
879 * Then vBIOS will have it poll for the VGA_TEST_RENDER_DONE and unset
880 * VGA_TEST_ENABLE, to leave it in the same state as before.
881 */
882 REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_ENABLE, 1);
883 REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_RENDER_START, 1);
884 }
885
886 /**
887 * dcn10_dpp_pg_control - DPP power gate control.
888 *
889 * @hws: dce_hwseq reference.
890 * @dpp_inst: DPP instance reference.
891 * @power_on: true if we want to enable power gate, false otherwise.
892 *
893 * Enable or disable power gate in the specific DPP instance.
894 */
dcn10_dpp_pg_control(struct dce_hwseq * hws,unsigned int dpp_inst,bool power_on)895 void dcn10_dpp_pg_control(
896 struct dce_hwseq *hws,
897 unsigned int dpp_inst,
898 bool power_on)
899 {
900 uint32_t power_gate = power_on ? 0 : 1;
901 uint32_t pwr_status = power_on ? PGFSM_POWER_ON : PGFSM_POWER_OFF;
902
903 if (hws->ctx->dc->debug.disable_dpp_power_gate)
904 return;
905 if (REG(DOMAIN1_PG_CONFIG) == 0)
906 return;
907
908 switch (dpp_inst) {
909 case 0: /* DPP0 */
910 REG_UPDATE(DOMAIN1_PG_CONFIG,
911 DOMAIN1_POWER_GATE, power_gate);
912
913 REG_WAIT(DOMAIN1_PG_STATUS,
914 DOMAIN1_PGFSM_PWR_STATUS, pwr_status,
915 1, 1000);
916 break;
917 case 1: /* DPP1 */
918 REG_UPDATE(DOMAIN3_PG_CONFIG,
919 DOMAIN3_POWER_GATE, power_gate);
920
921 REG_WAIT(DOMAIN3_PG_STATUS,
922 DOMAIN3_PGFSM_PWR_STATUS, pwr_status,
923 1, 1000);
924 break;
925 case 2: /* DPP2 */
926 REG_UPDATE(DOMAIN5_PG_CONFIG,
927 DOMAIN5_POWER_GATE, power_gate);
928
929 REG_WAIT(DOMAIN5_PG_STATUS,
930 DOMAIN5_PGFSM_PWR_STATUS, pwr_status,
931 1, 1000);
932 break;
933 case 3: /* DPP3 */
934 REG_UPDATE(DOMAIN7_PG_CONFIG,
935 DOMAIN7_POWER_GATE, power_gate);
936
937 REG_WAIT(DOMAIN7_PG_STATUS,
938 DOMAIN7_PGFSM_PWR_STATUS, pwr_status,
939 1, 1000);
940 break;
941 default:
942 BREAK_TO_DEBUGGER();
943 break;
944 }
945 }
946
947 /**
948 * dcn10_hubp_pg_control - HUBP power gate control.
949 *
950 * @hws: dce_hwseq reference.
951 * @hubp_inst: DPP instance reference.
952 * @power_on: true if we want to enable power gate, false otherwise.
953 *
954 * Enable or disable power gate in the specific HUBP instance.
955 */
dcn10_hubp_pg_control(struct dce_hwseq * hws,unsigned int hubp_inst,bool power_on)956 void dcn10_hubp_pg_control(
957 struct dce_hwseq *hws,
958 unsigned int hubp_inst,
959 bool power_on)
960 {
961 uint32_t power_gate = power_on ? 0 : 1;
962 uint32_t pwr_status = power_on ? PGFSM_POWER_ON : PGFSM_POWER_OFF;
963
964 if (hws->ctx->dc->debug.disable_hubp_power_gate)
965 return;
966 if (REG(DOMAIN0_PG_CONFIG) == 0)
967 return;
968
969 switch (hubp_inst) {
970 case 0: /* DCHUBP0 */
971 REG_UPDATE(DOMAIN0_PG_CONFIG,
972 DOMAIN0_POWER_GATE, power_gate);
973
974 REG_WAIT(DOMAIN0_PG_STATUS,
975 DOMAIN0_PGFSM_PWR_STATUS, pwr_status,
976 1, 1000);
977 break;
978 case 1: /* DCHUBP1 */
979 REG_UPDATE(DOMAIN2_PG_CONFIG,
980 DOMAIN2_POWER_GATE, power_gate);
981
982 REG_WAIT(DOMAIN2_PG_STATUS,
983 DOMAIN2_PGFSM_PWR_STATUS, pwr_status,
984 1, 1000);
985 break;
986 case 2: /* DCHUBP2 */
987 REG_UPDATE(DOMAIN4_PG_CONFIG,
988 DOMAIN4_POWER_GATE, power_gate);
989
990 REG_WAIT(DOMAIN4_PG_STATUS,
991 DOMAIN4_PGFSM_PWR_STATUS, pwr_status,
992 1, 1000);
993 break;
994 case 3: /* DCHUBP3 */
995 REG_UPDATE(DOMAIN6_PG_CONFIG,
996 DOMAIN6_POWER_GATE, power_gate);
997
998 REG_WAIT(DOMAIN6_PG_STATUS,
999 DOMAIN6_PGFSM_PWR_STATUS, pwr_status,
1000 1, 1000);
1001 break;
1002 default:
1003 BREAK_TO_DEBUGGER();
1004 break;
1005 }
1006 }
1007
power_on_plane_resources(struct dce_hwseq * hws,int plane_id)1008 static void power_on_plane_resources(
1009 struct dce_hwseq *hws,
1010 int plane_id)
1011 {
1012 DC_LOGGER_INIT(hws->ctx->logger);
1013
1014 if (hws->funcs.dpp_root_clock_control)
1015 hws->funcs.dpp_root_clock_control(hws, plane_id, true);
1016
1017 if (REG(DC_IP_REQUEST_CNTL)) {
1018 REG_SET(DC_IP_REQUEST_CNTL, 0,
1019 IP_REQUEST_EN, 1);
1020
1021 if (hws->funcs.dpp_pg_control)
1022 hws->funcs.dpp_pg_control(hws, plane_id, true);
1023
1024 if (hws->funcs.hubp_pg_control)
1025 hws->funcs.hubp_pg_control(hws, plane_id, true);
1026
1027 REG_SET(DC_IP_REQUEST_CNTL, 0,
1028 IP_REQUEST_EN, 0);
1029 DC_LOG_DEBUG(
1030 "Un-gated front end for pipe %d\n", plane_id);
1031 }
1032 }
1033
undo_DEGVIDCN10_253_wa(struct dc * dc)1034 static void undo_DEGVIDCN10_253_wa(struct dc *dc)
1035 {
1036 struct dce_hwseq *hws = dc->hwseq;
1037 struct hubp *hubp = dc->res_pool->hubps[0];
1038
1039 if (!hws->wa_state.DEGVIDCN10_253_applied)
1040 return;
1041
1042 hubp->funcs->set_blank(hubp, true);
1043
1044 REG_SET(DC_IP_REQUEST_CNTL, 0,
1045 IP_REQUEST_EN, 1);
1046
1047 hws->funcs.hubp_pg_control(hws, 0, false);
1048 REG_SET(DC_IP_REQUEST_CNTL, 0,
1049 IP_REQUEST_EN, 0);
1050
1051 hws->wa_state.DEGVIDCN10_253_applied = false;
1052 }
1053
apply_DEGVIDCN10_253_wa(struct dc * dc)1054 static void apply_DEGVIDCN10_253_wa(struct dc *dc)
1055 {
1056 struct dce_hwseq *hws = dc->hwseq;
1057 struct hubp *hubp = dc->res_pool->hubps[0];
1058 int i;
1059
1060 if (dc->debug.disable_stutter)
1061 return;
1062
1063 if (!hws->wa.DEGVIDCN10_253)
1064 return;
1065
1066 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1067 if (!dc->res_pool->hubps[i]->power_gated)
1068 return;
1069 }
1070
1071 /* all pipe power gated, apply work around to enable stutter. */
1072
1073 REG_SET(DC_IP_REQUEST_CNTL, 0,
1074 IP_REQUEST_EN, 1);
1075
1076 hws->funcs.hubp_pg_control(hws, 0, true);
1077 REG_SET(DC_IP_REQUEST_CNTL, 0,
1078 IP_REQUEST_EN, 0);
1079
1080 hubp->funcs->set_hubp_blank_en(hubp, false);
1081 hws->wa_state.DEGVIDCN10_253_applied = true;
1082 }
1083
dcn10_bios_golden_init(struct dc * dc)1084 void dcn10_bios_golden_init(struct dc *dc)
1085 {
1086 struct dce_hwseq *hws = dc->hwseq;
1087 struct dc_bios *bp = dc->ctx->dc_bios;
1088 int i;
1089 bool allow_self_fresh_force_enable = true;
1090
1091 if (hws->funcs.s0i3_golden_init_wa && hws->funcs.s0i3_golden_init_wa(dc))
1092 return;
1093
1094 if (dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled)
1095 allow_self_fresh_force_enable =
1096 dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub);
1097
1098
1099 /* WA for making DF sleep when idle after resume from S0i3.
1100 * DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE is set to 1 by
1101 * command table, if DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE = 0
1102 * before calling command table and it changed to 1 after,
1103 * it should be set back to 0.
1104 */
1105
1106 /* initialize dcn global */
1107 bp->funcs->enable_disp_power_gating(bp,
1108 CONTROLLER_ID_D0, ASIC_PIPE_INIT);
1109
1110 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1111 /* initialize dcn per pipe */
1112 bp->funcs->enable_disp_power_gating(bp,
1113 CONTROLLER_ID_D0 + i, ASIC_PIPE_DISABLE);
1114 }
1115
1116 if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
1117 if (allow_self_fresh_force_enable == false &&
1118 dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub))
1119 dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
1120 !dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
1121
1122 }
1123
false_optc_underflow_wa(struct dc * dc,const struct dc_stream_state * stream,struct timing_generator * tg)1124 static void false_optc_underflow_wa(
1125 struct dc *dc,
1126 const struct dc_stream_state *stream,
1127 struct timing_generator *tg)
1128 {
1129 int i;
1130 bool underflow;
1131
1132 if (!dc->hwseq->wa.false_optc_underflow)
1133 return;
1134
1135 underflow = tg->funcs->is_optc_underflow_occurred(tg);
1136
1137 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1138 struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
1139
1140 if (old_pipe_ctx->stream != stream)
1141 continue;
1142
1143 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, old_pipe_ctx);
1144 }
1145
1146 if (tg->funcs->set_blank_data_double_buffer)
1147 tg->funcs->set_blank_data_double_buffer(tg, true);
1148
1149 if (tg->funcs->is_optc_underflow_occurred(tg) && !underflow)
1150 tg->funcs->clear_optc_underflow(tg);
1151 }
1152
calculate_vready_offset_for_group(struct pipe_ctx * pipe)1153 static int calculate_vready_offset_for_group(struct pipe_ctx *pipe)
1154 {
1155 struct pipe_ctx *other_pipe;
1156 int vready_offset = pipe->pipe_dlg_param.vready_offset;
1157
1158 /* Always use the largest vready_offset of all connected pipes */
1159 for (other_pipe = pipe->bottom_pipe; other_pipe != NULL; other_pipe = other_pipe->bottom_pipe) {
1160 if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
1161 vready_offset = other_pipe->pipe_dlg_param.vready_offset;
1162 }
1163 for (other_pipe = pipe->top_pipe; other_pipe != NULL; other_pipe = other_pipe->top_pipe) {
1164 if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
1165 vready_offset = other_pipe->pipe_dlg_param.vready_offset;
1166 }
1167 for (other_pipe = pipe->next_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->next_odm_pipe) {
1168 if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
1169 vready_offset = other_pipe->pipe_dlg_param.vready_offset;
1170 }
1171 for (other_pipe = pipe->prev_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->prev_odm_pipe) {
1172 if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
1173 vready_offset = other_pipe->pipe_dlg_param.vready_offset;
1174 }
1175
1176 return vready_offset;
1177 }
1178
dcn10_enable_stream_timing(struct pipe_ctx * pipe_ctx,struct dc_state * context,struct dc * dc)1179 enum dc_status dcn10_enable_stream_timing(
1180 struct pipe_ctx *pipe_ctx,
1181 struct dc_state *context,
1182 struct dc *dc)
1183 {
1184 struct dc_stream_state *stream = pipe_ctx->stream;
1185 enum dc_color_space color_space;
1186 struct tg_color black_color = {0};
1187
1188 /* by upper caller loop, pipe0 is parent pipe and be called first.
1189 * back end is set up by for pipe0. Other children pipe share back end
1190 * with pipe 0. No program is needed.
1191 */
1192 if (pipe_ctx->top_pipe != NULL)
1193 return DC_OK;
1194
1195 /* TODO check if timing_changed, disable stream if timing changed */
1196
1197 /* HW program guide assume display already disable
1198 * by unplug sequence. OTG assume stop.
1199 */
1200 pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, true);
1201
1202 if (false == pipe_ctx->clock_source->funcs->program_pix_clk(
1203 pipe_ctx->clock_source,
1204 &pipe_ctx->stream_res.pix_clk_params,
1205 dc->link_srv->dp_get_encoding_format(&pipe_ctx->link_config.dp_link_settings),
1206 &pipe_ctx->pll_settings)) {
1207 BREAK_TO_DEBUGGER();
1208 return DC_ERROR_UNEXPECTED;
1209 }
1210
1211 if (dc_is_hdmi_tmds_signal(stream->signal)) {
1212 stream->link->phy_state.symclk_ref_cnts.otg = 1;
1213 if (stream->link->phy_state.symclk_state == SYMCLK_OFF_TX_OFF)
1214 stream->link->phy_state.symclk_state = SYMCLK_ON_TX_OFF;
1215 else
1216 stream->link->phy_state.symclk_state = SYMCLK_ON_TX_ON;
1217 }
1218
1219 pipe_ctx->stream_res.tg->funcs->program_timing(
1220 pipe_ctx->stream_res.tg,
1221 &stream->timing,
1222 calculate_vready_offset_for_group(pipe_ctx),
1223 pipe_ctx->pipe_dlg_param.vstartup_start,
1224 pipe_ctx->pipe_dlg_param.vupdate_offset,
1225 pipe_ctx->pipe_dlg_param.vupdate_width,
1226 pipe_ctx->pipe_dlg_param.pstate_keepout,
1227 pipe_ctx->stream->signal,
1228 true);
1229
1230 #if 0 /* move to after enable_crtc */
1231 /* TODO: OPP FMT, ABM. etc. should be done here. */
1232 /* or FPGA now. instance 0 only. TODO: move to opp.c */
1233
1234 inst_offset = reg_offsets[pipe_ctx->stream_res.tg->inst].fmt;
1235
1236 pipe_ctx->stream_res.opp->funcs->opp_program_fmt(
1237 pipe_ctx->stream_res.opp,
1238 &stream->bit_depth_params,
1239 &stream->clamping);
1240 #endif
1241 /* program otg blank color */
1242 color_space = stream->output_color_space;
1243 color_space_to_black_color(dc, color_space, &black_color);
1244
1245 /*
1246 * The way 420 is packed, 2 channels carry Y component, 1 channel
1247 * alternate between Cb and Cr, so both channels need the pixel
1248 * value for Y
1249 */
1250 if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
1251 black_color.color_r_cr = black_color.color_g_y;
1252
1253 if (pipe_ctx->stream_res.tg->funcs->set_blank_color)
1254 pipe_ctx->stream_res.tg->funcs->set_blank_color(
1255 pipe_ctx->stream_res.tg,
1256 &black_color);
1257
1258 if (pipe_ctx->stream_res.tg->funcs->is_blanked &&
1259 !pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg)) {
1260 pipe_ctx->stream_res.tg->funcs->set_blank(pipe_ctx->stream_res.tg, true);
1261 hwss_wait_for_blank_complete(pipe_ctx->stream_res.tg);
1262 false_optc_underflow_wa(dc, pipe_ctx->stream, pipe_ctx->stream_res.tg);
1263 }
1264
1265 /* VTG is within DCHUB command block. DCFCLK is always on */
1266 if (false == pipe_ctx->stream_res.tg->funcs->enable_crtc(pipe_ctx->stream_res.tg)) {
1267 BREAK_TO_DEBUGGER();
1268 return DC_ERROR_UNEXPECTED;
1269 }
1270
1271 /* TODO program crtc source select for non-virtual signal*/
1272 /* TODO program FMT */
1273 /* TODO setup link_enc */
1274 /* TODO set stream attributes */
1275 /* TODO program audio */
1276 /* TODO enable stream if timing changed */
1277 /* TODO unblank stream if DP */
1278
1279 return DC_OK;
1280 }
1281
dcn10_reset_back_end_for_pipe(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context)1282 static void dcn10_reset_back_end_for_pipe(
1283 struct dc *dc,
1284 struct pipe_ctx *pipe_ctx,
1285 struct dc_state *context)
1286 {
1287 int i;
1288 struct dc_link *link;
1289 DC_LOGGER_INIT(dc->ctx->logger);
1290 if (pipe_ctx->stream_res.stream_enc == NULL) {
1291 pipe_ctx->stream = NULL;
1292 return;
1293 }
1294
1295 link = pipe_ctx->stream->link;
1296 /* DPMS may already disable or */
1297 /* dpms_off status is incorrect due to fastboot
1298 * feature. When system resume from S4 with second
1299 * screen only, the dpms_off would be true but
1300 * VBIOS lit up eDP, so check link status too.
1301 */
1302 if (!pipe_ctx->stream->dpms_off || link->link_status.link_active)
1303 dc->link_srv->set_dpms_off(pipe_ctx);
1304 else if (pipe_ctx->stream_res.audio)
1305 dc->hwss.disable_audio_stream(pipe_ctx);
1306
1307 if (pipe_ctx->stream_res.audio) {
1308 /*disable az_endpoint*/
1309 pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
1310
1311 /*free audio*/
1312 if (dc->caps.dynamic_audio == true) {
1313 /*we have to dynamic arbitrate the audio endpoints*/
1314 /*we free the resource, need reset is_audio_acquired*/
1315 update_audio_usage(&dc->current_state->res_ctx, dc->res_pool,
1316 pipe_ctx->stream_res.audio, false);
1317 pipe_ctx->stream_res.audio = NULL;
1318 }
1319 }
1320
1321 /* by upper caller loop, parent pipe: pipe0, will be reset last.
1322 * back end share by all pipes and will be disable only when disable
1323 * parent pipe.
1324 */
1325 if (pipe_ctx->top_pipe == NULL) {
1326
1327 if (pipe_ctx->stream_res.abm)
1328 dc->hwss.set_abm_immediate_disable(pipe_ctx);
1329
1330 pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);
1331
1332 pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false);
1333 set_drr_and_clear_adjust_pending(pipe_ctx, pipe_ctx->stream, NULL);
1334 if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal))
1335 pipe_ctx->stream->link->phy_state.symclk_ref_cnts.otg = 0;
1336 }
1337
1338 for (i = 0; i < dc->res_pool->pipe_count; i++)
1339 if (&dc->current_state->res_ctx.pipe_ctx[i] == pipe_ctx)
1340 break;
1341
1342 if (i == dc->res_pool->pipe_count)
1343 return;
1344
1345 pipe_ctx->stream = NULL;
1346 DC_LOG_DEBUG("Reset back end for pipe %d, tg:%d\n",
1347 pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst);
1348 }
1349
dcn10_hw_wa_force_recovery(struct dc * dc)1350 static bool dcn10_hw_wa_force_recovery(struct dc *dc)
1351 {
1352 struct hubp *hubp ;
1353 unsigned int i;
1354
1355 if (!dc->debug.recovery_enabled)
1356 return false;
1357 /*
1358 DCHUBP_CNTL:HUBP_BLANK_EN=1
1359 DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1
1360 DCHUBP_CNTL:HUBP_DISABLE=1
1361 DCHUBP_CNTL:HUBP_DISABLE=0
1362 DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0
1363 DCSURF_PRIMARY_SURFACE_ADDRESS
1364 DCHUBP_CNTL:HUBP_BLANK_EN=0
1365 */
1366
1367 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1368 struct pipe_ctx *pipe_ctx =
1369 &dc->current_state->res_ctx.pipe_ctx[i];
1370 if (pipe_ctx != NULL) {
1371 hubp = pipe_ctx->plane_res.hubp;
1372 /*DCHUBP_CNTL:HUBP_BLANK_EN=1*/
1373 if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
1374 hubp->funcs->set_hubp_blank_en(hubp, true);
1375 }
1376 }
1377 /*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1*/
1378 hubbub1_soft_reset(dc->res_pool->hubbub, true);
1379
1380 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1381 struct pipe_ctx *pipe_ctx =
1382 &dc->current_state->res_ctx.pipe_ctx[i];
1383 if (pipe_ctx != NULL) {
1384 hubp = pipe_ctx->plane_res.hubp;
1385 /*DCHUBP_CNTL:HUBP_DISABLE=1*/
1386 if (hubp != NULL && hubp->funcs->hubp_disable_control)
1387 hubp->funcs->hubp_disable_control(hubp, true);
1388 }
1389 }
1390 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1391 struct pipe_ctx *pipe_ctx =
1392 &dc->current_state->res_ctx.pipe_ctx[i];
1393 if (pipe_ctx != NULL) {
1394 hubp = pipe_ctx->plane_res.hubp;
1395 /*DCHUBP_CNTL:HUBP_DISABLE=0*/
1396 if (hubp != NULL && hubp->funcs->hubp_disable_control)
1397 hubp->funcs->hubp_disable_control(hubp, true);
1398 }
1399 }
1400 /*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0*/
1401 hubbub1_soft_reset(dc->res_pool->hubbub, false);
1402 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1403 struct pipe_ctx *pipe_ctx =
1404 &dc->current_state->res_ctx.pipe_ctx[i];
1405 if (pipe_ctx != NULL) {
1406 hubp = pipe_ctx->plane_res.hubp;
1407 /*DCHUBP_CNTL:HUBP_BLANK_EN=0*/
1408 if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
1409 hubp->funcs->set_hubp_blank_en(hubp, true);
1410 }
1411 }
1412 return true;
1413
1414 }
1415
dcn10_verify_allow_pstate_change_high(struct dc * dc)1416 void dcn10_verify_allow_pstate_change_high(struct dc *dc)
1417 {
1418 struct hubbub *hubbub = dc->res_pool->hubbub;
1419 static bool should_log_hw_state; /* prevent hw state log by default */
1420
1421 if (!hubbub->funcs->verify_allow_pstate_change_high)
1422 return;
1423
1424 if (!hubbub->funcs->verify_allow_pstate_change_high(hubbub)) {
1425 int i = 0;
1426
1427 if (should_log_hw_state)
1428 dcn10_log_hw_state(dc, NULL);
1429
1430 TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES);
1431 BREAK_TO_DEBUGGER();
1432 if (dcn10_hw_wa_force_recovery(dc)) {
1433 /*check again*/
1434 if (!hubbub->funcs->verify_allow_pstate_change_high(hubbub))
1435 BREAK_TO_DEBUGGER();
1436 }
1437 }
1438 }
1439
1440 /* trigger HW to start disconnect plane from stream on the next vsync */
dcn10_plane_atomic_disconnect(struct dc * dc,struct dc_state * state,struct pipe_ctx * pipe_ctx)1441 void dcn10_plane_atomic_disconnect(struct dc *dc,
1442 struct dc_state *state,
1443 struct pipe_ctx *pipe_ctx)
1444 {
1445 struct dce_hwseq *hws = dc->hwseq;
1446 struct hubp *hubp = pipe_ctx->plane_res.hubp;
1447 int dpp_id = pipe_ctx->plane_res.dpp->inst;
1448 struct mpc *mpc = dc->res_pool->mpc;
1449 struct mpc_tree *mpc_tree_params;
1450 struct mpcc *mpcc_to_remove = NULL;
1451 struct output_pixel_processor *opp = pipe_ctx->stream_res.opp;
1452
1453 mpc_tree_params = &(opp->mpc_tree_params);
1454 mpcc_to_remove = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, dpp_id);
1455
1456 /*Already reset*/
1457 if (mpcc_to_remove == NULL)
1458 return;
1459
1460 mpc->funcs->remove_mpcc(mpc, mpc_tree_params, mpcc_to_remove);
1461 // Phantom pipes have OTG disabled by default, so MPCC_STATUS will never assert idle,
1462 // so don't wait for MPCC_IDLE in the programming sequence
1463 if (dc_state_get_pipe_subvp_type(state, pipe_ctx) != SUBVP_PHANTOM)
1464 opp->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
1465
1466 dc->optimized_required = true;
1467
1468 if (hubp->funcs->hubp_disconnect)
1469 hubp->funcs->hubp_disconnect(hubp);
1470
1471 if (dc->debug.sanity_checks)
1472 hws->funcs.verify_allow_pstate_change_high(dc);
1473 }
1474
1475 /**
1476 * dcn10_plane_atomic_power_down - Power down plane components.
1477 *
1478 * @dc: dc struct reference. used for grab hwseq.
1479 * @dpp: dpp struct reference.
1480 * @hubp: hubp struct reference.
1481 *
1482 * Keep in mind that this operation requires a power gate configuration;
1483 * however, requests for switch power gate are precisely controlled to avoid
1484 * problems. For this reason, power gate request is usually disabled. This
1485 * function first needs to enable the power gate request before disabling DPP
1486 * and HUBP. Finally, it disables the power gate request again.
1487 */
dcn10_plane_atomic_power_down(struct dc * dc,struct dpp * dpp,struct hubp * hubp)1488 void dcn10_plane_atomic_power_down(struct dc *dc,
1489 struct dpp *dpp,
1490 struct hubp *hubp)
1491 {
1492 struct dce_hwseq *hws = dc->hwseq;
1493 DC_LOGGER_INIT(dc->ctx->logger);
1494
1495 if (REG(DC_IP_REQUEST_CNTL)) {
1496 REG_SET(DC_IP_REQUEST_CNTL, 0,
1497 IP_REQUEST_EN, 1);
1498
1499 if (hws->funcs.dpp_pg_control)
1500 hws->funcs.dpp_pg_control(hws, dpp->inst, false);
1501
1502 if (hws->funcs.hubp_pg_control)
1503 hws->funcs.hubp_pg_control(hws, hubp->inst, false);
1504
1505 hubp->funcs->hubp_reset(hubp);
1506 dpp->funcs->dpp_reset(dpp);
1507
1508 REG_SET(DC_IP_REQUEST_CNTL, 0,
1509 IP_REQUEST_EN, 0);
1510 DC_LOG_DEBUG(
1511 "Power gated front end %d\n", hubp->inst);
1512 }
1513
1514 if (hws->funcs.dpp_root_clock_control)
1515 hws->funcs.dpp_root_clock_control(hws, dpp->inst, false);
1516 }
1517
1518 /* disable HW used by plane.
1519 * note: cannot disable until disconnect is complete
1520 */
dcn10_plane_atomic_disable(struct dc * dc,struct pipe_ctx * pipe_ctx)1521 void dcn10_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
1522 {
1523 struct dce_hwseq *hws = dc->hwseq;
1524 struct hubp *hubp = pipe_ctx->plane_res.hubp;
1525 struct dpp *dpp = pipe_ctx->plane_res.dpp;
1526 int opp_id = hubp->opp_id;
1527
1528 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe_ctx);
1529
1530 hubp->funcs->hubp_clk_cntl(hubp, false);
1531
1532 dpp->funcs->dpp_dppclk_control(dpp, false, false);
1533
1534 if (opp_id != 0xf && pipe_ctx->stream_res.opp->mpc_tree_params.opp_list == NULL)
1535 pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
1536 pipe_ctx->stream_res.opp,
1537 false);
1538
1539 hubp->power_gated = true;
1540 dc->optimized_required = false; /* We're powering off, no need to optimize */
1541
1542 hws->funcs.plane_atomic_power_down(dc,
1543 pipe_ctx->plane_res.dpp,
1544 pipe_ctx->plane_res.hubp);
1545
1546 pipe_ctx->stream = NULL;
1547 memset(&pipe_ctx->stream_res, 0, sizeof(pipe_ctx->stream_res));
1548 memset(&pipe_ctx->plane_res, 0, sizeof(pipe_ctx->plane_res));
1549 pipe_ctx->top_pipe = NULL;
1550 pipe_ctx->bottom_pipe = NULL;
1551 pipe_ctx->plane_state = NULL;
1552 }
1553
dcn10_disable_plane(struct dc * dc,struct dc_state * state,struct pipe_ctx * pipe_ctx)1554 void dcn10_disable_plane(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx)
1555 {
1556 struct dce_hwseq *hws = dc->hwseq;
1557 DC_LOGGER_INIT(dc->ctx->logger);
1558
1559 if (!pipe_ctx->plane_res.hubp || pipe_ctx->plane_res.hubp->power_gated)
1560 return;
1561
1562 hws->funcs.plane_atomic_disable(dc, pipe_ctx);
1563
1564 apply_DEGVIDCN10_253_wa(dc);
1565
1566 DC_LOG_DC("Power down front end %d\n",
1567 pipe_ctx->pipe_idx);
1568 }
1569
dcn10_init_pipes(struct dc * dc,struct dc_state * context)1570 void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
1571 {
1572 int i;
1573 struct dce_hwseq *hws = dc->hwseq;
1574 struct hubbub *hubbub = dc->res_pool->hubbub;
1575 bool can_apply_seamless_boot = false;
1576 bool tg_enabled[MAX_PIPES] = {false};
1577
1578 for (i = 0; i < context->stream_count; i++) {
1579 if (context->streams[i]->apply_seamless_boot_optimization) {
1580 can_apply_seamless_boot = true;
1581 break;
1582 }
1583 }
1584
1585 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1586 struct timing_generator *tg = dc->res_pool->timing_generators[i];
1587 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1588
1589 /* There is assumption that pipe_ctx is not mapping irregularly
1590 * to non-preferred front end. If pipe_ctx->stream is not NULL,
1591 * we will use the pipe, so don't disable
1592 */
1593 if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1594 continue;
1595
1596 /* Blank controller using driver code instead of
1597 * command table.
1598 */
1599 if (tg->funcs->is_tg_enabled(tg)) {
1600 if (hws->funcs.init_blank != NULL) {
1601 hws->funcs.init_blank(dc, tg);
1602 tg->funcs->lock(tg);
1603 } else {
1604 tg->funcs->lock(tg);
1605 tg->funcs->set_blank(tg, true);
1606 hwss_wait_for_blank_complete(tg);
1607 }
1608 }
1609 }
1610
1611 /* Reset det size */
1612 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1613 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1614 struct hubp *hubp = dc->res_pool->hubps[i];
1615
1616 /* Do not need to reset for seamless boot */
1617 if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1618 continue;
1619
1620 if (hubbub && hubp) {
1621 if (hubbub->funcs->program_det_size)
1622 hubbub->funcs->program_det_size(hubbub, hubp->inst, 0);
1623 if (hubbub->funcs->program_det_segments)
1624 hubbub->funcs->program_det_segments(hubbub, hubp->inst, 0);
1625 }
1626 }
1627
1628 /* num_opp will be equal to number of mpcc */
1629 for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) {
1630 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1631
1632 /* Cannot reset the MPC mux if seamless boot */
1633 if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1634 continue;
1635
1636 dc->res_pool->mpc->funcs->mpc_init_single_inst(
1637 dc->res_pool->mpc, i);
1638 }
1639
1640 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1641 struct timing_generator *tg = dc->res_pool->timing_generators[i];
1642 struct hubp *hubp = dc->res_pool->hubps[i];
1643 struct dpp *dpp = dc->res_pool->dpps[i];
1644 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1645
1646 /* There is assumption that pipe_ctx is not mapping irregularly
1647 * to non-preferred front end. If pipe_ctx->stream is not NULL,
1648 * we will use the pipe, so don't disable
1649 */
1650 if (can_apply_seamless_boot &&
1651 pipe_ctx->stream != NULL &&
1652 pipe_ctx->stream_res.tg->funcs->is_tg_enabled(
1653 pipe_ctx->stream_res.tg)) {
1654 // Enable double buffering for OTG_BLANK no matter if
1655 // seamless boot is enabled or not to suppress global sync
1656 // signals when OTG blanked. This is to prevent pipe from
1657 // requesting data while in PSR.
1658 tg->funcs->tg_init(tg);
1659 hubp->power_gated = true;
1660 tg_enabled[i] = true;
1661 continue;
1662 }
1663
1664 /* Disable on the current state so the new one isn't cleared. */
1665 pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
1666
1667 hubp->funcs->hubp_reset(hubp);
1668 dpp->funcs->dpp_reset(dpp);
1669
1670 pipe_ctx->stream_res.tg = tg;
1671 pipe_ctx->pipe_idx = i;
1672
1673 pipe_ctx->plane_res.hubp = hubp;
1674 pipe_ctx->plane_res.dpp = dpp;
1675 pipe_ctx->plane_res.mpcc_inst = dpp->inst;
1676 hubp->mpcc_id = dpp->inst;
1677 hubp->opp_id = OPP_ID_INVALID;
1678 hubp->power_gated = false;
1679
1680 dc->res_pool->opps[i]->mpc_tree_params.opp_id = dc->res_pool->opps[i]->inst;
1681 dc->res_pool->opps[i]->mpc_tree_params.opp_list = NULL;
1682 dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
1683 pipe_ctx->stream_res.opp = dc->res_pool->opps[i];
1684
1685 hws->funcs.plane_atomic_disconnect(dc, context, pipe_ctx);
1686
1687 if (tg->funcs->is_tg_enabled(tg))
1688 tg->funcs->unlock(tg);
1689
1690 dc->hwss.disable_plane(dc, context, pipe_ctx);
1691
1692 pipe_ctx->stream_res.tg = NULL;
1693 pipe_ctx->plane_res.hubp = NULL;
1694
1695 if (tg->funcs->is_tg_enabled(tg)) {
1696 if (tg->funcs->init_odm)
1697 tg->funcs->init_odm(tg);
1698 }
1699
1700 tg->funcs->tg_init(tg);
1701 }
1702
1703 /* Clean up MPC tree */
1704 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1705 if (tg_enabled[i]) {
1706 if (dc->res_pool->opps[i]->mpc_tree_params.opp_list) {
1707 if (dc->res_pool->opps[i]->mpc_tree_params.opp_list->mpcc_bot) {
1708 int bot_id = dc->res_pool->opps[i]->mpc_tree_params.opp_list->mpcc_bot->mpcc_id;
1709
1710 if ((bot_id < MAX_MPCC) && (bot_id < MAX_PIPES) && (!tg_enabled[bot_id]))
1711 dc->res_pool->opps[i]->mpc_tree_params.opp_list = NULL;
1712 }
1713 }
1714 }
1715 }
1716
1717 /* Power gate DSCs */
1718 if (hws->funcs.dsc_pg_control != NULL) {
1719 uint32_t num_opps = 0;
1720 uint32_t opp_id_src0 = OPP_ID_INVALID;
1721 uint32_t opp_id_src1 = OPP_ID_INVALID;
1722
1723 // Step 1: To find out which OPTC is running & OPTC DSC is ON
1724 // We can't use res_pool->res_cap->num_timing_generator to check
1725 // Because it records display pipes default setting built in driver,
1726 // not display pipes of the current chip.
1727 // Some ASICs would be fused display pipes less than the default setting.
1728 // In dcnxx_resource_construct function, driver would obatin real information.
1729 for (i = 0; i < dc->res_pool->timing_generator_count; i++) {
1730 uint32_t optc_dsc_state = 0;
1731 struct timing_generator *tg = dc->res_pool->timing_generators[i];
1732
1733 if (tg->funcs->is_tg_enabled(tg)) {
1734 if (tg->funcs->get_dsc_status)
1735 tg->funcs->get_dsc_status(tg, &optc_dsc_state);
1736 // Only one OPTC with DSC is ON, so if we got one result, we would exit this block.
1737 // non-zero value is DSC enabled
1738 if (optc_dsc_state != 0) {
1739 tg->funcs->get_optc_source(tg, &num_opps, &opp_id_src0, &opp_id_src1);
1740 break;
1741 }
1742 }
1743 }
1744
1745 // Step 2: To power down DSC but skip DSC of running OPTC
1746 for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++) {
1747 struct dcn_dsc_state s = {0};
1748
1749 dc->res_pool->dscs[i]->funcs->dsc_read_state(dc->res_pool->dscs[i], &s);
1750
1751 if ((s.dsc_opp_source == opp_id_src0 || s.dsc_opp_source == opp_id_src1) &&
1752 s.dsc_clock_en && s.dsc_fw_en)
1753 continue;
1754
1755 hws->funcs.dsc_pg_control(hws, dc->res_pool->dscs[i]->inst, false);
1756 }
1757 }
1758 }
1759
dcn10_init_hw(struct dc * dc)1760 void dcn10_init_hw(struct dc *dc)
1761 {
1762 int i;
1763 struct abm *abm = dc->res_pool->abm;
1764 struct dmcu *dmcu = dc->res_pool->dmcu;
1765 struct dce_hwseq *hws = dc->hwseq;
1766 struct dc_bios *dcb = dc->ctx->dc_bios;
1767 struct resource_pool *res_pool = dc->res_pool;
1768 uint32_t backlight = MAX_BACKLIGHT_LEVEL;
1769 uint32_t user_level = MAX_BACKLIGHT_LEVEL;
1770 bool is_optimized_init_done = false;
1771
1772 if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
1773 dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
1774
1775 /* Align bw context with hw config when system resume. */
1776 if (dc->clk_mgr && dc->clk_mgr->clks.dispclk_khz != 0 && dc->clk_mgr->clks.dppclk_khz != 0) {
1777 dc->current_state->bw_ctx.bw.dcn.clk.dispclk_khz = dc->clk_mgr->clks.dispclk_khz;
1778 dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz = dc->clk_mgr->clks.dppclk_khz;
1779 }
1780
1781 // Initialize the dccg
1782 if (dc->res_pool->dccg && dc->res_pool->dccg->funcs->dccg_init)
1783 dc->res_pool->dccg->funcs->dccg_init(res_pool->dccg);
1784
1785 if (!dcb->funcs->is_accelerated_mode(dcb))
1786 hws->funcs.disable_vga(dc->hwseq);
1787
1788 if (!dc_dmub_srv_optimized_init_done(dc->ctx->dmub_srv))
1789 hws->funcs.bios_golden_init(dc);
1790
1791
1792 if (dc->ctx->dc_bios->fw_info_valid) {
1793 res_pool->ref_clocks.xtalin_clock_inKhz =
1794 dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency;
1795
1796 if (res_pool->dccg && res_pool->hubbub) {
1797
1798 (res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg,
1799 dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency,
1800 &res_pool->ref_clocks.dccg_ref_clock_inKhz);
1801
1802 (res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub,
1803 res_pool->ref_clocks.dccg_ref_clock_inKhz,
1804 &res_pool->ref_clocks.dchub_ref_clock_inKhz);
1805 } else {
1806 // Not all ASICs have DCCG sw component
1807 res_pool->ref_clocks.dccg_ref_clock_inKhz =
1808 res_pool->ref_clocks.xtalin_clock_inKhz;
1809 res_pool->ref_clocks.dchub_ref_clock_inKhz =
1810 res_pool->ref_clocks.xtalin_clock_inKhz;
1811 }
1812 } else
1813 ASSERT_CRITICAL(false);
1814
1815 for (i = 0; i < dc->link_count; i++) {
1816 /* Power up AND update implementation according to the
1817 * required signal (which may be different from the
1818 * default signal on connector).
1819 */
1820 struct dc_link *link = dc->links[i];
1821
1822 if (!is_optimized_init_done)
1823 link->link_enc->funcs->hw_init(link->link_enc);
1824
1825 /* Check for enabled DIG to identify enabled display */
1826 if (link->link_enc->funcs->is_dig_enabled &&
1827 link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
1828 link->link_status.link_active = true;
1829 if (link->link_enc->funcs->fec_is_active &&
1830 link->link_enc->funcs->fec_is_active(link->link_enc))
1831 link->fec_state = dc_link_fec_enabled;
1832 }
1833 }
1834
1835 /* we want to turn off all dp displays before doing detection */
1836 dc->link_srv->blank_all_dp_displays(dc);
1837
1838 if (hws->funcs.enable_power_gating_plane)
1839 hws->funcs.enable_power_gating_plane(dc->hwseq, true);
1840
1841 /* If taking control over from VBIOS, we may want to optimize our first
1842 * mode set, so we need to skip powering down pipes until we know which
1843 * pipes we want to use.
1844 * Otherwise, if taking control is not possible, we need to power
1845 * everything down.
1846 */
1847 if (dcb->funcs->is_accelerated_mode(dcb) || !dc->config.seamless_boot_edp_requested) {
1848 if (!is_optimized_init_done) {
1849 hws->funcs.init_pipes(dc, dc->current_state);
1850 if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
1851 dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
1852 !dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
1853 }
1854 }
1855
1856 if (!is_optimized_init_done) {
1857
1858 for (i = 0; i < res_pool->audio_count; i++) {
1859 struct audio *audio = res_pool->audios[i];
1860
1861 audio->funcs->hw_init(audio);
1862 }
1863
1864 for (i = 0; i < dc->link_count; i++) {
1865 struct dc_link *link = dc->links[i];
1866
1867 if (link->panel_cntl) {
1868 backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl);
1869 user_level = link->panel_cntl->stored_backlight_registers.USER_LEVEL;
1870 }
1871 }
1872
1873 if (abm != NULL)
1874 abm->funcs->abm_init(abm, backlight, user_level);
1875
1876 if (dmcu != NULL && !dmcu->auto_load_dmcu)
1877 dmcu->funcs->dmcu_init(dmcu);
1878 }
1879
1880 if (abm != NULL && dmcu != NULL)
1881 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1882
1883 /* power AFMT HDMI memory TODO: may move to dis/en output save power*/
1884 if (!is_optimized_init_done)
1885 if (dc->res_pool->dio && dc->res_pool->dio->funcs->mem_pwr_ctrl)
1886 dc->res_pool->dio->funcs->mem_pwr_ctrl(dc->res_pool->dio, false);
1887
1888 if (!dc->debug.disable_clock_gate) {
1889 /* enable all DCN clock gating */
1890 if (dc->res_pool->dccg && dc->res_pool->dccg->funcs && dc->res_pool->dccg->funcs->allow_clock_gating)
1891 dc->res_pool->dccg->funcs->allow_clock_gating(dc->res_pool->dccg, true);
1892
1893 REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
1894 }
1895
1896 if (dc->clk_mgr && dc->clk_mgr->funcs->notify_wm_ranges)
1897 dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
1898 }
1899
1900 /* In headless boot cases, DIG may be turned
1901 * on which causes HW/SW discrepancies.
1902 * To avoid this, power down hardware on boot
1903 * if DIG is turned on
1904 */
dcn10_power_down_on_boot(struct dc * dc)1905 void dcn10_power_down_on_boot(struct dc *dc)
1906 {
1907 struct dc_link *edp_links[MAX_NUM_EDP];
1908 struct dc_link *edp_link = NULL;
1909 int edp_num;
1910 int i = 0;
1911
1912 dc_get_edp_links(dc, edp_links, &edp_num);
1913 if (edp_num)
1914 edp_link = edp_links[0];
1915
1916 if (edp_link && edp_link->link_enc->funcs->is_dig_enabled &&
1917 edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) &&
1918 dc->hwseq->funcs.edp_backlight_control &&
1919 dc->hwseq->funcs.power_down &&
1920 dc->hwss.edp_power_control) {
1921 dc->hwseq->funcs.edp_backlight_control(edp_link, false);
1922 dc->hwseq->funcs.power_down(dc);
1923 dc->hwss.edp_power_control(edp_link, false);
1924 } else {
1925 for (i = 0; i < dc->link_count; i++) {
1926 struct dc_link *link = dc->links[i];
1927
1928 if (link->link_enc && link->link_enc->funcs->is_dig_enabled &&
1929 link->link_enc->funcs->is_dig_enabled(link->link_enc) &&
1930 dc->hwseq->funcs.power_down) {
1931 dc->hwseq->funcs.power_down(dc);
1932 break;
1933 }
1934
1935 }
1936 }
1937
1938 /*
1939 * Call update_clocks with empty context
1940 * to send DISPLAY_OFF
1941 * Otherwise DISPLAY_OFF may not be asserted
1942 */
1943 if (dc->clk_mgr->funcs->set_low_power_state)
1944 dc->clk_mgr->funcs->set_low_power_state(dc->clk_mgr);
1945 }
1946
dcn10_reset_hw_ctx_wrap(struct dc * dc,struct dc_state * context)1947 void dcn10_reset_hw_ctx_wrap(
1948 struct dc *dc,
1949 struct dc_state *context)
1950 {
1951 int i;
1952 struct dce_hwseq *hws = dc->hwseq;
1953
1954 /* Reset Back End*/
1955 for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) {
1956 struct pipe_ctx *pipe_ctx_old =
1957 &dc->current_state->res_ctx.pipe_ctx[i];
1958 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1959
1960 if (!pipe_ctx_old->stream)
1961 continue;
1962
1963 if (pipe_ctx_old->top_pipe)
1964 continue;
1965
1966 if (!pipe_ctx->stream ||
1967 pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) {
1968 struct clock_source *old_clk = pipe_ctx_old->clock_source;
1969
1970 dcn10_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
1971 if (hws->funcs.enable_stream_gating)
1972 hws->funcs.enable_stream_gating(dc, pipe_ctx_old);
1973 if (old_clk)
1974 old_clk->funcs->cs_power_down(old_clk);
1975 }
1976 }
1977 }
1978
patch_address_for_sbs_tb_stereo(struct pipe_ctx * pipe_ctx,PHYSICAL_ADDRESS_LOC * addr)1979 static bool patch_address_for_sbs_tb_stereo(
1980 struct pipe_ctx *pipe_ctx, PHYSICAL_ADDRESS_LOC *addr)
1981 {
1982 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
1983 bool sec_split = pipe_ctx->top_pipe &&
1984 pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state;
1985 if (sec_split && plane_state->address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
1986 (pipe_ctx->stream->timing.timing_3d_format ==
1987 TIMING_3D_FORMAT_SIDE_BY_SIDE ||
1988 pipe_ctx->stream->timing.timing_3d_format ==
1989 TIMING_3D_FORMAT_TOP_AND_BOTTOM)) {
1990 *addr = plane_state->address.grph_stereo.left_addr;
1991 plane_state->address.grph_stereo.left_addr =
1992 plane_state->address.grph_stereo.right_addr;
1993 return true;
1994 } else {
1995 if (pipe_ctx->stream->view_format != VIEW_3D_FORMAT_NONE &&
1996 plane_state->address.type != PLN_ADDR_TYPE_GRPH_STEREO) {
1997 plane_state->address.type = PLN_ADDR_TYPE_GRPH_STEREO;
1998 plane_state->address.grph_stereo.right_addr =
1999 plane_state->address.grph_stereo.left_addr;
2000 plane_state->address.grph_stereo.right_meta_addr =
2001 plane_state->address.grph_stereo.left_meta_addr;
2002 }
2003 }
2004 return false;
2005 }
2006
dcn10_update_plane_addr(const struct dc * dc,struct pipe_ctx * pipe_ctx)2007 void dcn10_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx)
2008 {
2009 bool addr_patched = false;
2010 PHYSICAL_ADDRESS_LOC addr;
2011 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
2012
2013 if (plane_state == NULL)
2014 return;
2015
2016 addr_patched = patch_address_for_sbs_tb_stereo(pipe_ctx, &addr);
2017
2018 pipe_ctx->plane_res.hubp->funcs->hubp_program_surface_flip_and_addr(
2019 pipe_ctx->plane_res.hubp,
2020 &plane_state->address,
2021 plane_state->flip_immediate);
2022
2023 plane_state->status.requested_address = plane_state->address;
2024
2025 if (plane_state->flip_immediate)
2026 plane_state->status.current_address = plane_state->address;
2027
2028 if (addr_patched)
2029 pipe_ctx->plane_state->address.grph_stereo.left_addr = addr;
2030 }
2031
dcn10_set_input_transfer_func(struct dc * dc,struct pipe_ctx * pipe_ctx,const struct dc_plane_state * plane_state)2032 bool dcn10_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
2033 const struct dc_plane_state *plane_state)
2034 {
2035 struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
2036 const struct dc_transfer_func *tf = NULL;
2037 bool result = true;
2038
2039 if (dpp_base == NULL)
2040 return false;
2041
2042 tf = &plane_state->in_transfer_func;
2043
2044 if (!dpp_base->ctx->dc->debug.always_use_regamma
2045 && !plane_state->gamma_correction.is_identity
2046 && dce_use_lut(plane_state->format))
2047 dpp_base->funcs->dpp_program_input_lut(dpp_base, &plane_state->gamma_correction);
2048
2049 if (tf->type == TF_TYPE_PREDEFINED) {
2050 switch (tf->tf) {
2051 case TRANSFER_FUNCTION_SRGB:
2052 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_HW_sRGB);
2053 break;
2054 case TRANSFER_FUNCTION_BT709:
2055 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_HW_xvYCC);
2056 break;
2057 case TRANSFER_FUNCTION_LINEAR:
2058 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
2059 break;
2060 case TRANSFER_FUNCTION_PQ:
2061 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_USER_PWL);
2062 cm_helper_translate_curve_to_degamma_hw_format(tf, &dpp_base->degamma_params);
2063 dpp_base->funcs->dpp_program_degamma_pwl(dpp_base, &dpp_base->degamma_params);
2064 result = true;
2065 break;
2066 default:
2067 result = false;
2068 break;
2069 }
2070 } else if (tf->type == TF_TYPE_BYPASS) {
2071 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
2072 } else {
2073 cm_helper_translate_curve_to_degamma_hw_format(tf,
2074 &dpp_base->degamma_params);
2075 dpp_base->funcs->dpp_program_degamma_pwl(dpp_base,
2076 &dpp_base->degamma_params);
2077 result = true;
2078 }
2079
2080 return result;
2081 }
2082
2083 #define MAX_NUM_HW_POINTS 0x200
2084
log_tf(struct dc_context * ctx,const struct dc_transfer_func * tf,uint32_t hw_points_num)2085 static void log_tf(struct dc_context *ctx,
2086 const struct dc_transfer_func *tf, uint32_t hw_points_num)
2087 {
2088 // DC_LOG_GAMMA is default logging of all hw points
2089 // DC_LOG_ALL_GAMMA logs all points, not only hw points
2090 // DC_LOG_ALL_TF_POINTS logs all channels of the tf
2091 int i = 0;
2092
2093 DC_LOG_GAMMA("Gamma Correction TF");
2094 DC_LOG_ALL_GAMMA("Logging all tf points...");
2095 DC_LOG_ALL_TF_CHANNELS("Logging all channels...");
2096
2097 for (i = 0; i < hw_points_num; i++) {
2098 DC_LOG_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value);
2099 DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value);
2100 DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value);
2101 }
2102
2103 for (i = hw_points_num; i < MAX_NUM_HW_POINTS; i++) {
2104 DC_LOG_ALL_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value);
2105 DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value);
2106 DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value);
2107 }
2108 }
2109
dcn10_set_output_transfer_func(struct dc * dc,struct pipe_ctx * pipe_ctx,const struct dc_stream_state * stream)2110 bool dcn10_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
2111 const struct dc_stream_state *stream)
2112 {
2113 struct dpp *dpp = pipe_ctx->plane_res.dpp;
2114
2115 if (!stream)
2116 return false;
2117
2118 if (dpp == NULL)
2119 return false;
2120
2121 dpp->regamma_params.hw_points_num = GAMMA_HW_POINTS_NUM;
2122
2123 if (stream->out_transfer_func.type == TF_TYPE_PREDEFINED &&
2124 stream->out_transfer_func.tf == TRANSFER_FUNCTION_SRGB)
2125 dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_SRGB);
2126
2127 /* dcn10_translate_regamma_to_hw_format takes 750us, only do it when full
2128 * update.
2129 */
2130 else if (cm_helper_translate_curve_to_hw_format(dc->ctx,
2131 &stream->out_transfer_func,
2132 &dpp->regamma_params, false)) {
2133 dpp->funcs->dpp_program_regamma_pwl(
2134 dpp,
2135 &dpp->regamma_params, OPP_REGAMMA_USER);
2136 } else
2137 dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_BYPASS);
2138
2139 if (stream->ctx) {
2140 log_tf(stream->ctx,
2141 &stream->out_transfer_func,
2142 dpp->regamma_params.hw_points_num);
2143 }
2144
2145 return true;
2146 }
2147
dcn10_pipe_control_lock(struct dc * dc,struct pipe_ctx * pipe,bool lock)2148 void dcn10_pipe_control_lock(
2149 struct dc *dc,
2150 struct pipe_ctx *pipe,
2151 bool lock)
2152 {
2153 struct dce_hwseq *hws = dc->hwseq;
2154
2155 /* use TG master update lock to lock everything on the TG
2156 * therefore only top pipe need to lock
2157 */
2158 if (!pipe || pipe->top_pipe)
2159 return;
2160
2161 if (dc->debug.sanity_checks)
2162 hws->funcs.verify_allow_pstate_change_high(dc);
2163
2164 if (lock)
2165 pipe->stream_res.tg->funcs->lock(pipe->stream_res.tg);
2166 else
2167 pipe->stream_res.tg->funcs->unlock(pipe->stream_res.tg);
2168
2169 if (dc->debug.sanity_checks)
2170 hws->funcs.verify_allow_pstate_change_high(dc);
2171 }
2172
2173 /**
2174 * delay_cursor_until_vupdate() - Delay cursor update if too close to VUPDATE.
2175 *
2176 * Software keepout workaround to prevent cursor update locking from stalling
2177 * out cursor updates indefinitely or from old values from being retained in
2178 * the case where the viewport changes in the same frame as the cursor.
2179 *
2180 * The idea is to calculate the remaining time from VPOS to VUPDATE. If it's
2181 * too close to VUPDATE, then stall out until VUPDATE finishes.
2182 *
2183 * TODO: Optimize cursor programming to be once per frame before VUPDATE
2184 * to avoid the need for this workaround.
2185 *
2186 * @dc: Current DC state
2187 * @pipe_ctx: Pipe_ctx pointer for delayed cursor update
2188 *
2189 * Return: void
2190 */
delay_cursor_until_vupdate(struct dc * dc,struct pipe_ctx * pipe_ctx)2191 static void delay_cursor_until_vupdate(struct dc *dc, struct pipe_ctx *pipe_ctx)
2192 {
2193 struct dc_stream_state *stream = pipe_ctx->stream;
2194 struct crtc_position position;
2195 uint32_t vupdate_start, vupdate_end;
2196 unsigned int lines_to_vupdate, us_to_vupdate, vpos;
2197 unsigned int us_per_line, us_vupdate;
2198
2199 if (!dc->hwss.calc_vupdate_position || !dc->hwss.get_position)
2200 return;
2201
2202 if (!pipe_ctx->stream_res.stream_enc || !pipe_ctx->stream_res.tg)
2203 return;
2204
2205 dc->hwss.calc_vupdate_position(dc, pipe_ctx, &vupdate_start,
2206 &vupdate_end);
2207
2208 dc->hwss.get_position(&pipe_ctx, 1, &position);
2209 vpos = position.vertical_count;
2210
2211 if (vpos <= vupdate_start) {
2212 /* VPOS is in VACTIVE or back porch. */
2213 lines_to_vupdate = vupdate_start - vpos;
2214 } else {
2215 lines_to_vupdate = stream->timing.v_total - vpos + vupdate_start;
2216 }
2217
2218 /* Calculate time until VUPDATE in microseconds. */
2219 us_per_line =
2220 stream->timing.h_total * 10000u / stream->timing.pix_clk_100hz;
2221 us_to_vupdate = lines_to_vupdate * us_per_line;
2222
2223 /* Stall out until the cursor update completes. */
2224 if (vupdate_end < vupdate_start)
2225 vupdate_end += stream->timing.v_total;
2226
2227 /* Position is in the range of vupdate start and end*/
2228 if (lines_to_vupdate > stream->timing.v_total - vupdate_end + vupdate_start)
2229 us_to_vupdate = 0;
2230
2231 /* 70 us is a conservative estimate of cursor update time*/
2232 if (us_to_vupdate > 70)
2233 return;
2234
2235 us_vupdate = (vupdate_end - vupdate_start + 1) * us_per_line;
2236 udelay(us_to_vupdate + us_vupdate);
2237 }
2238
dcn10_cursor_lock(struct dc * dc,struct pipe_ctx * pipe,bool lock)2239 void dcn10_cursor_lock(struct dc *dc, struct pipe_ctx *pipe, bool lock)
2240 {
2241 /* cursor lock is per MPCC tree, so only need to lock one pipe per stream */
2242 if (!pipe || pipe->top_pipe)
2243 return;
2244
2245 /* Prevent cursor lock from stalling out cursor updates. */
2246 if (lock)
2247 delay_cursor_until_vupdate(dc, pipe);
2248
2249 if (pipe->stream && should_use_dmub_inbox1_lock(dc, pipe->stream->link)) {
2250 union dmub_hw_lock_flags hw_locks = { 0 };
2251 struct dmub_hw_lock_inst_flags inst_flags = { 0 };
2252
2253 hw_locks.bits.lock_cursor = 1;
2254 inst_flags.opp_inst = pipe->stream_res.opp->inst;
2255
2256 dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
2257 lock,
2258 &hw_locks,
2259 &inst_flags);
2260 } else
2261 dc->res_pool->mpc->funcs->cursor_lock(dc->res_pool->mpc,
2262 pipe->stream_res.opp->inst, lock);
2263 }
2264
wait_for_reset_trigger_to_occur(struct dc_context * dc_ctx,struct timing_generator * tg)2265 static bool wait_for_reset_trigger_to_occur(
2266 struct dc_context *dc_ctx,
2267 struct timing_generator *tg)
2268 {
2269 bool rc = false;
2270
2271 DC_LOGGER_INIT(dc_ctx->logger);
2272
2273 /* To avoid endless loop we wait at most
2274 * frames_to_wait_on_triggered_reset frames for the reset to occur. */
2275 const uint32_t frames_to_wait_on_triggered_reset = 10;
2276 int i;
2277
2278 for (i = 0; i < frames_to_wait_on_triggered_reset; i++) {
2279
2280 if (!tg->funcs->is_counter_moving(tg)) {
2281 DC_ERROR("TG counter is not moving!\n");
2282 break;
2283 }
2284
2285 if (tg->funcs->did_triggered_reset_occur(tg)) {
2286 rc = true;
2287 /* usually occurs at i=1 */
2288 DC_SYNC_INFO("GSL: reset occurred at wait count: %d\n",
2289 i);
2290 break;
2291 }
2292
2293 /* Wait for one frame. */
2294 tg->funcs->wait_for_state(tg, CRTC_STATE_VACTIVE);
2295 tg->funcs->wait_for_state(tg, CRTC_STATE_VBLANK);
2296 }
2297
2298 if (false == rc)
2299 DC_ERROR("GSL: Timeout on reset trigger!\n");
2300
2301 return rc;
2302 }
2303
reduceSizeAndFraction(uint64_t * numerator,uint64_t * denominator,bool checkUint32Bounary)2304 static uint64_t reduceSizeAndFraction(uint64_t *numerator,
2305 uint64_t *denominator,
2306 bool checkUint32Bounary)
2307 {
2308 int i;
2309 bool ret = checkUint32Bounary == false;
2310 uint64_t max_int32 = 0xffffffff;
2311 uint64_t num, denom;
2312 static const uint16_t prime_numbers[] = {
2313 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43,
2314 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103,
2315 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163,
2316 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227,
2317 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281,
2318 283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353,
2319 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421,
2320 431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487,
2321 491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569,
2322 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631,
2323 641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701,
2324 709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773,
2325 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857,
2326 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937,
2327 941, 947, 953, 967, 971, 977, 983, 991, 997};
2328 int count = ARRAY_SIZE(prime_numbers);
2329
2330 num = *numerator;
2331 denom = *denominator;
2332 for (i = 0; i < count; i++) {
2333 uint32_t num_remainder, denom_remainder;
2334 uint64_t num_result, denom_result;
2335 if (checkUint32Bounary &&
2336 num <= max_int32 && denom <= max_int32) {
2337 ret = true;
2338 break;
2339 }
2340 do {
2341 num_result = div_u64_rem(num, prime_numbers[i], &num_remainder);
2342 denom_result = div_u64_rem(denom, prime_numbers[i], &denom_remainder);
2343 if (num_remainder == 0 && denom_remainder == 0) {
2344 num = num_result;
2345 denom = denom_result;
2346 }
2347 } while (num_remainder == 0 && denom_remainder == 0);
2348 }
2349 *numerator = num;
2350 *denominator = denom;
2351 return ret;
2352 }
2353
is_low_refresh_rate(struct pipe_ctx * pipe)2354 static bool is_low_refresh_rate(struct pipe_ctx *pipe)
2355 {
2356 uint32_t master_pipe_refresh_rate =
2357 pipe->stream->timing.pix_clk_100hz * 100 /
2358 pipe->stream->timing.h_total /
2359 pipe->stream->timing.v_total;
2360 return master_pipe_refresh_rate <= 30;
2361 }
2362
get_clock_divider(struct pipe_ctx * pipe,bool account_low_refresh_rate)2363 static uint8_t get_clock_divider(struct pipe_ctx *pipe,
2364 bool account_low_refresh_rate)
2365 {
2366 uint32_t clock_divider = 1;
2367 uint32_t numpipes = 1;
2368
2369 if (account_low_refresh_rate && is_low_refresh_rate(pipe))
2370 clock_divider *= 2;
2371
2372 if (pipe->stream_res.pix_clk_params.pixel_encoding == PIXEL_ENCODING_YCBCR420)
2373 clock_divider *= 2;
2374
2375 while (pipe->next_odm_pipe) {
2376 pipe = pipe->next_odm_pipe;
2377 numpipes++;
2378 }
2379 clock_divider *= numpipes;
2380
2381 return clock_divider;
2382 }
2383
dcn10_align_pixel_clocks(struct dc * dc,int group_size,struct pipe_ctx * grouped_pipes[])2384 static int dcn10_align_pixel_clocks(struct dc *dc, int group_size,
2385 struct pipe_ctx *grouped_pipes[])
2386 {
2387 struct dc_context *dc_ctx = dc->ctx;
2388 int i, master = -1, embedded = -1;
2389 struct dc_crtc_timing *hw_crtc_timing;
2390 uint64_t phase[MAX_PIPES];
2391 uint64_t modulo[MAX_PIPES];
2392 unsigned int pclk = 0;
2393
2394 uint32_t embedded_pix_clk_100hz;
2395 uint16_t embedded_h_total;
2396 uint16_t embedded_v_total;
2397 uint32_t dp_ref_clk_100hz =
2398 dc->res_pool->dp_clock_source->ctx->dc->clk_mgr->dprefclk_khz*10;
2399
2400 DC_LOGGER_INIT(dc_ctx->logger);
2401
2402 hw_crtc_timing = kzalloc_objs(*hw_crtc_timing, MAX_PIPES);
2403 if (!hw_crtc_timing)
2404 return master;
2405
2406 if (dc->config.vblank_alignment_dto_params &&
2407 dc->res_pool->dp_clock_source->funcs->override_dp_pix_clk) {
2408 embedded_h_total =
2409 (dc->config.vblank_alignment_dto_params >> 32) & 0x7FFF;
2410 embedded_v_total =
2411 (dc->config.vblank_alignment_dto_params >> 48) & 0x7FFF;
2412 embedded_pix_clk_100hz =
2413 dc->config.vblank_alignment_dto_params & 0xFFFFFFFF;
2414
2415 for (i = 0; i < group_size; i++) {
2416 grouped_pipes[i]->stream_res.tg->funcs->get_hw_timing(
2417 grouped_pipes[i]->stream_res.tg,
2418 &hw_crtc_timing[i]);
2419 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
2420 dc->res_pool->dp_clock_source,
2421 grouped_pipes[i]->stream_res.tg->inst,
2422 &pclk);
2423 hw_crtc_timing[i].pix_clk_100hz = pclk;
2424 if (dc_is_embedded_signal(
2425 grouped_pipes[i]->stream->signal)) {
2426 embedded = i;
2427 master = i;
2428 phase[i] = embedded_pix_clk_100hz*(uint64_t)100;
2429 modulo[i] = dp_ref_clk_100hz*100;
2430 } else {
2431
2432 phase[i] = (uint64_t)embedded_pix_clk_100hz*
2433 hw_crtc_timing[i].h_total*
2434 hw_crtc_timing[i].v_total;
2435 phase[i] = div_u64(phase[i], get_clock_divider(grouped_pipes[i], true));
2436 modulo[i] = (uint64_t)dp_ref_clk_100hz*
2437 embedded_h_total*
2438 embedded_v_total;
2439
2440 if (reduceSizeAndFraction(&phase[i],
2441 &modulo[i], true) == false) {
2442 /*
2443 * this will help to stop reporting
2444 * this timing synchronizable
2445 */
2446 DC_SYNC_INFO("Failed to reduce DTO parameters\n");
2447 grouped_pipes[i]->stream->has_non_synchronizable_pclk = true;
2448 }
2449 }
2450 }
2451
2452 for (i = 0; i < group_size; i++) {
2453 if (i != embedded && !grouped_pipes[i]->stream->has_non_synchronizable_pclk) {
2454 dc->res_pool->dp_clock_source->funcs->override_dp_pix_clk(
2455 dc->res_pool->dp_clock_source,
2456 grouped_pipes[i]->stream_res.tg->inst,
2457 phase[i], modulo[i]);
2458 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
2459 dc->res_pool->dp_clock_source,
2460 grouped_pipes[i]->stream_res.tg->inst, &pclk);
2461 grouped_pipes[i]->stream->timing.pix_clk_100hz =
2462 pclk*get_clock_divider(grouped_pipes[i], false);
2463 if (master == -1)
2464 master = i;
2465 }
2466 }
2467
2468 }
2469
2470 kfree(hw_crtc_timing);
2471 return master;
2472 }
2473
dcn10_enable_vblanks_synchronization(struct dc * dc,int group_index,int group_size,struct pipe_ctx * grouped_pipes[])2474 void dcn10_enable_vblanks_synchronization(
2475 struct dc *dc,
2476 int group_index,
2477 int group_size,
2478 struct pipe_ctx *grouped_pipes[])
2479 {
2480 struct dc_context *dc_ctx = dc->ctx;
2481 struct output_pixel_processor *opp;
2482 struct timing_generator *tg;
2483 int i, width = 0, height = 0, master;
2484
2485 DC_LOGGER_INIT(dc_ctx->logger);
2486
2487 for (i = 1; i < group_size; i++) {
2488 opp = grouped_pipes[i]->stream_res.opp;
2489 tg = grouped_pipes[i]->stream_res.tg;
2490 tg->funcs->get_otg_active_size(tg, &width, &height);
2491
2492 if (!tg->funcs->is_tg_enabled(tg)) {
2493 DC_SYNC_INFO("Skipping timing sync on disabled OTG\n");
2494 return;
2495 }
2496
2497 if (opp->funcs->opp_program_dpg_dimensions)
2498 opp->funcs->opp_program_dpg_dimensions(opp, width, 2*(height) + 1);
2499 }
2500
2501 for (i = 0; i < group_size; i++) {
2502 if (grouped_pipes[i]->stream == NULL)
2503 continue;
2504 grouped_pipes[i]->stream->vblank_synchronized = false;
2505 grouped_pipes[i]->stream->has_non_synchronizable_pclk = false;
2506 }
2507
2508 DC_SYNC_INFO("Aligning DP DTOs\n");
2509
2510 master = dcn10_align_pixel_clocks(dc, group_size, grouped_pipes);
2511
2512 DC_SYNC_INFO("Synchronizing VBlanks\n");
2513
2514 if (master >= 0) {
2515 for (i = 0; i < group_size; i++) {
2516 if (i != master && !grouped_pipes[i]->stream->has_non_synchronizable_pclk)
2517 grouped_pipes[i]->stream_res.tg->funcs->align_vblanks(
2518 grouped_pipes[master]->stream_res.tg,
2519 grouped_pipes[i]->stream_res.tg,
2520 grouped_pipes[master]->stream->timing.pix_clk_100hz,
2521 grouped_pipes[i]->stream->timing.pix_clk_100hz,
2522 get_clock_divider(grouped_pipes[master], false),
2523 get_clock_divider(grouped_pipes[i], false));
2524 grouped_pipes[i]->stream->vblank_synchronized = true;
2525 }
2526 grouped_pipes[master]->stream->vblank_synchronized = true;
2527 DC_SYNC_INFO("Sync complete\n");
2528 }
2529
2530 for (i = 1; i < group_size; i++) {
2531 opp = grouped_pipes[i]->stream_res.opp;
2532 tg = grouped_pipes[i]->stream_res.tg;
2533 tg->funcs->get_otg_active_size(tg, &width, &height);
2534 if (opp->funcs->opp_program_dpg_dimensions)
2535 opp->funcs->opp_program_dpg_dimensions(opp, width, height);
2536 }
2537 }
2538
dcn10_enable_timing_synchronization(struct dc * dc,struct dc_state * state,int group_index,int group_size,struct pipe_ctx * grouped_pipes[])2539 void dcn10_enable_timing_synchronization(
2540 struct dc *dc,
2541 struct dc_state *state,
2542 int group_index,
2543 int group_size,
2544 struct pipe_ctx *grouped_pipes[])
2545 {
2546 struct dc_context *dc_ctx = dc->ctx;
2547 struct output_pixel_processor *opp;
2548 struct timing_generator *tg;
2549 int i, width = 0, height = 0;
2550
2551 DC_LOGGER_INIT(dc_ctx->logger);
2552
2553 DC_SYNC_INFO("Setting up OTG reset trigger\n");
2554
2555 for (i = 1; i < group_size; i++) {
2556 if (grouped_pipes[i]->stream && dc_state_get_pipe_subvp_type(state, grouped_pipes[i]) == SUBVP_PHANTOM)
2557 continue;
2558
2559 opp = grouped_pipes[i]->stream_res.opp;
2560 tg = grouped_pipes[i]->stream_res.tg;
2561 tg->funcs->get_otg_active_size(tg, &width, &height);
2562
2563 if (!tg->funcs->is_tg_enabled(tg)) {
2564 DC_SYNC_INFO("Skipping timing sync on disabled OTG\n");
2565 return;
2566 }
2567
2568 if (opp->funcs->opp_program_dpg_dimensions)
2569 opp->funcs->opp_program_dpg_dimensions(opp, width, 2*(height) + 1);
2570 }
2571
2572 for (i = 0; i < group_size; i++) {
2573 if (grouped_pipes[i]->stream == NULL)
2574 continue;
2575
2576 if (grouped_pipes[i]->stream && dc_state_get_pipe_subvp_type(state, grouped_pipes[i]) == SUBVP_PHANTOM)
2577 continue;
2578
2579 grouped_pipes[i]->stream->vblank_synchronized = false;
2580 }
2581
2582 for (i = 1; i < group_size; i++) {
2583 if (grouped_pipes[i]->stream && dc_state_get_pipe_subvp_type(state, grouped_pipes[i]) == SUBVP_PHANTOM)
2584 continue;
2585
2586 grouped_pipes[i]->stream_res.tg->funcs->enable_reset_trigger(
2587 grouped_pipes[i]->stream_res.tg,
2588 grouped_pipes[0]->stream_res.tg->inst);
2589 }
2590
2591 DC_SYNC_INFO("Waiting for trigger\n");
2592
2593 /* Need to get only check 1 pipe for having reset as all the others are
2594 * synchronized. Look at last pipe programmed to reset.
2595 */
2596
2597 if (grouped_pipes[1]->stream && dc_state_get_pipe_subvp_type(state, grouped_pipes[1]) != SUBVP_PHANTOM)
2598 wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[1]->stream_res.tg);
2599
2600 for (i = 1; i < group_size; i++) {
2601 if (grouped_pipes[i]->stream && dc_state_get_pipe_subvp_type(state, grouped_pipes[i]) == SUBVP_PHANTOM)
2602 continue;
2603
2604 grouped_pipes[i]->stream_res.tg->funcs->disable_reset_trigger(
2605 grouped_pipes[i]->stream_res.tg);
2606 }
2607
2608 for (i = 1; i < group_size; i++) {
2609 if (dc_state_get_pipe_subvp_type(state, grouped_pipes[i]) == SUBVP_PHANTOM)
2610 continue;
2611
2612 opp = grouped_pipes[i]->stream_res.opp;
2613 tg = grouped_pipes[i]->stream_res.tg;
2614 tg->funcs->get_otg_active_size(tg, &width, &height);
2615 if (opp->funcs->opp_program_dpg_dimensions)
2616 opp->funcs->opp_program_dpg_dimensions(opp, width, height);
2617 }
2618
2619 DC_SYNC_INFO("Sync complete\n");
2620 }
2621
dcn10_enable_per_frame_crtc_position_reset(struct dc * dc,int group_size,struct pipe_ctx * grouped_pipes[])2622 void dcn10_enable_per_frame_crtc_position_reset(
2623 struct dc *dc,
2624 int group_size,
2625 struct pipe_ctx *grouped_pipes[])
2626 {
2627 struct dc_context *dc_ctx = dc->ctx;
2628 int i;
2629
2630 DC_LOGGER_INIT(dc_ctx->logger);
2631
2632 DC_SYNC_INFO("Setting up\n");
2633 for (i = 0; i < group_size; i++)
2634 if (grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset)
2635 grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset(
2636 grouped_pipes[i]->stream_res.tg,
2637 0,
2638 &grouped_pipes[i]->stream->triggered_crtc_reset);
2639
2640 DC_SYNC_INFO("Waiting for trigger\n");
2641
2642 for (i = 0; i < group_size; i++)
2643 wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[i]->stream_res.tg);
2644
2645 DC_SYNC_INFO("Multi-display sync is complete\n");
2646 }
2647
mmhub_read_vm_system_aperture_settings(struct dcn10_hubp * hubp1,struct vm_system_aperture_param * apt,struct dce_hwseq * hws)2648 static void mmhub_read_vm_system_aperture_settings(struct dcn10_hubp *hubp1,
2649 struct vm_system_aperture_param *apt,
2650 struct dce_hwseq *hws)
2651 {
2652 PHYSICAL_ADDRESS_LOC physical_page_number;
2653 uint32_t logical_addr_low;
2654 uint32_t logical_addr_high;
2655
2656 REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
2657 PHYSICAL_PAGE_NUMBER_MSB, &physical_page_number.high_part);
2658 REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
2659 PHYSICAL_PAGE_NUMBER_LSB, &physical_page_number.low_part);
2660
2661 REG_GET(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
2662 LOGICAL_ADDR, &logical_addr_low);
2663
2664 REG_GET(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
2665 LOGICAL_ADDR, &logical_addr_high);
2666
2667 apt->sys_default.quad_part = physical_page_number.quad_part << 12;
2668 apt->sys_low.quad_part = (int64_t)logical_addr_low << 18;
2669 apt->sys_high.quad_part = (int64_t)logical_addr_high << 18;
2670 }
2671
2672 /* Temporary read settings, future will get values from kmd directly */
mmhub_read_vm_context0_settings(struct dcn10_hubp * hubp1,struct vm_context0_param * vm0,struct dce_hwseq * hws)2673 static void mmhub_read_vm_context0_settings(struct dcn10_hubp *hubp1,
2674 struct vm_context0_param *vm0,
2675 struct dce_hwseq *hws)
2676 {
2677 PHYSICAL_ADDRESS_LOC fb_base;
2678 PHYSICAL_ADDRESS_LOC fb_offset;
2679 uint32_t fb_base_value;
2680 uint32_t fb_offset_value;
2681
2682 dcn10_hubbub_read_fb_aperture(hws->ctx->dc->res_pool->hubbub, &fb_base_value, &fb_offset_value);
2683
2684 REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
2685 PAGE_DIRECTORY_ENTRY_HI32, &vm0->pte_base.high_part);
2686 REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
2687 PAGE_DIRECTORY_ENTRY_LO32, &vm0->pte_base.low_part);
2688
2689 REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
2690 LOGICAL_PAGE_NUMBER_HI4, &vm0->pte_start.high_part);
2691 REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
2692 LOGICAL_PAGE_NUMBER_LO32, &vm0->pte_start.low_part);
2693
2694 REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
2695 LOGICAL_PAGE_NUMBER_HI4, &vm0->pte_end.high_part);
2696 REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
2697 LOGICAL_PAGE_NUMBER_LO32, &vm0->pte_end.low_part);
2698
2699 REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
2700 PHYSICAL_PAGE_ADDR_HI4, &vm0->fault_default.high_part);
2701 REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
2702 PHYSICAL_PAGE_ADDR_LO32, &vm0->fault_default.low_part);
2703
2704 /*
2705 * The values in VM_CONTEXT0_PAGE_TABLE_BASE_ADDR is in UMA space.
2706 * Therefore we need to do
2707 * DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR
2708 * - DCHUBBUB_SDPIF_FB_OFFSET + DCHUBBUB_SDPIF_FB_BASE
2709 */
2710 fb_base.quad_part = (uint64_t)fb_base_value << 24;
2711 fb_offset.quad_part = (uint64_t)fb_offset_value << 24;
2712 vm0->pte_base.quad_part += fb_base.quad_part;
2713 vm0->pte_base.quad_part -= fb_offset.quad_part;
2714 }
2715
2716
dcn10_program_pte_vm(struct dce_hwseq * hws,struct hubp * hubp)2717 static void dcn10_program_pte_vm(struct dce_hwseq *hws, struct hubp *hubp)
2718 {
2719 struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
2720 struct vm_system_aperture_param apt = {0};
2721 struct vm_context0_param vm0 = {0};
2722
2723 mmhub_read_vm_system_aperture_settings(hubp1, &apt, hws);
2724 mmhub_read_vm_context0_settings(hubp1, &vm0, hws);
2725
2726 hubp->funcs->hubp_set_vm_system_aperture_settings(hubp, &apt);
2727 hubp->funcs->hubp_set_vm_context0_settings(hubp, &vm0);
2728 }
2729
dcn10_enable_plane(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context)2730 static void dcn10_enable_plane(
2731 struct dc *dc,
2732 struct pipe_ctx *pipe_ctx,
2733 struct dc_state *context)
2734 {
2735 struct dce_hwseq *hws = dc->hwseq;
2736
2737 if (dc->debug.sanity_checks) {
2738 hws->funcs.verify_allow_pstate_change_high(dc);
2739 }
2740
2741 undo_DEGVIDCN10_253_wa(dc);
2742
2743 power_on_plane_resources(dc->hwseq,
2744 pipe_ctx->plane_res.hubp->inst);
2745
2746 /* enable DCFCLK current DCHUB */
2747 pipe_ctx->plane_res.hubp->funcs->hubp_clk_cntl(pipe_ctx->plane_res.hubp, true);
2748
2749 /* make sure OPP_PIPE_CLOCK_EN = 1 */
2750 pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
2751 pipe_ctx->stream_res.opp,
2752 true);
2753
2754 if (dc->config.gpu_vm_support)
2755 dcn10_program_pte_vm(hws, pipe_ctx->plane_res.hubp);
2756
2757 if (dc->debug.sanity_checks) {
2758 hws->funcs.verify_allow_pstate_change_high(dc);
2759 }
2760
2761 if (!pipe_ctx->top_pipe
2762 && pipe_ctx->plane_state
2763 && pipe_ctx->plane_state->flip_int_enabled
2764 && pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int)
2765 pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int(pipe_ctx->plane_res.hubp);
2766
2767 }
2768
dcn10_program_gamut_remap(struct pipe_ctx * pipe_ctx)2769 void dcn10_program_gamut_remap(struct pipe_ctx *pipe_ctx)
2770 {
2771 int i = 0;
2772 struct dpp_grph_csc_adjustment adjust;
2773 memset(&adjust, 0, sizeof(adjust));
2774 adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
2775
2776
2777 if (pipe_ctx->stream->gamut_remap_matrix.enable_remap == true) {
2778 adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
2779 for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
2780 adjust.temperature_matrix[i] =
2781 pipe_ctx->stream->gamut_remap_matrix.matrix[i];
2782 } else if (pipe_ctx->plane_state &&
2783 pipe_ctx->plane_state->gamut_remap_matrix.enable_remap == true) {
2784 adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
2785 for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
2786 adjust.temperature_matrix[i] =
2787 pipe_ctx->plane_state->gamut_remap_matrix.matrix[i];
2788 }
2789
2790 pipe_ctx->plane_res.dpp->funcs->dpp_set_gamut_remap(pipe_ctx->plane_res.dpp, &adjust);
2791 }
2792
2793
dcn10_is_rear_mpo_fix_required(struct pipe_ctx * pipe_ctx,enum dc_color_space colorspace)2794 static bool dcn10_is_rear_mpo_fix_required(struct pipe_ctx *pipe_ctx, enum dc_color_space colorspace)
2795 {
2796 if (pipe_ctx->plane_state && pipe_ctx->plane_state->layer_index > 0 && is_rgb_cspace(colorspace)) {
2797 if (pipe_ctx->top_pipe) {
2798 struct pipe_ctx *top = pipe_ctx->top_pipe;
2799
2800 while (top->top_pipe)
2801 top = top->top_pipe; // Traverse to top pipe_ctx
2802 if (top->plane_state && top->plane_state->layer_index == 0 && !top->plane_state->global_alpha)
2803 // Global alpha used by top plane for PIP overlay
2804 // Pre-multiplied/per-pixel alpha used by MPO
2805 // Check top plane's global alpha to ensure layer_index > 0 not caused by PIP
2806 return true; // MPO in use and front plane not hidden
2807 }
2808 }
2809 return false;
2810 }
2811
dcn10_set_csc_adjustment_rgb_mpo_fix(struct pipe_ctx * pipe_ctx,uint16_t * matrix)2812 static void dcn10_set_csc_adjustment_rgb_mpo_fix(struct pipe_ctx *pipe_ctx, uint16_t *matrix)
2813 {
2814 // Override rear plane RGB bias to fix MPO brightness
2815 uint16_t rgb_bias = matrix[3];
2816
2817 matrix[3] = 0;
2818 matrix[7] = 0;
2819 matrix[11] = 0;
2820 pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
2821 matrix[3] = rgb_bias;
2822 matrix[7] = rgb_bias;
2823 matrix[11] = rgb_bias;
2824 }
2825
dcn10_program_output_csc(struct dc * dc,struct pipe_ctx * pipe_ctx,enum dc_color_space colorspace,uint16_t * matrix,int opp_id)2826 void dcn10_program_output_csc(struct dc *dc,
2827 struct pipe_ctx *pipe_ctx,
2828 enum dc_color_space colorspace,
2829 uint16_t *matrix,
2830 int opp_id)
2831 {
2832 if (pipe_ctx->stream->csc_color_matrix.enable_adjustment == true) {
2833 if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment != NULL) {
2834
2835 /* MPO is broken with RGB colorspaces when OCSC matrix
2836 * brightness offset >= 0 on DCN1 due to OCSC before MPC
2837 * Blending adds offsets from front + rear to rear plane
2838 *
2839 * Fix is to set RGB bias to 0 on rear plane, top plane
2840 * black value pixels add offset instead of rear + front
2841 */
2842
2843 int16_t rgb_bias = matrix[3];
2844 // matrix[3/7/11] are all the same offset value
2845
2846 if (rgb_bias > 0 && dcn10_is_rear_mpo_fix_required(pipe_ctx, colorspace)) {
2847 dcn10_set_csc_adjustment_rgb_mpo_fix(pipe_ctx, matrix);
2848 } else {
2849 pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
2850 }
2851 }
2852 } else {
2853 if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default != NULL)
2854 pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default(pipe_ctx->plane_res.dpp, colorspace);
2855 }
2856 }
2857
dcn10_update_dpp(struct dpp * dpp,struct dc_plane_state * plane_state)2858 static void dcn10_update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state)
2859 {
2860 struct dc_bias_and_scale bns_params = {0};
2861
2862 // program the input csc
2863 dpp->funcs->dpp_setup(dpp,
2864 plane_state->format,
2865 EXPANSION_MODE_ZERO,
2866 plane_state->input_csc_color_matrix,
2867 plane_state->color_space,
2868 NULL);
2869
2870 //set scale and bias registers
2871 build_prescale_params(&bns_params, plane_state);
2872 if (dpp->funcs->dpp_program_bias_and_scale)
2873 dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params);
2874 }
2875
dcn10_update_visual_confirm_color(struct dc * dc,struct pipe_ctx * pipe_ctx,int mpcc_id)2876 void dcn10_update_visual_confirm_color(struct dc *dc,
2877 struct pipe_ctx *pipe_ctx,
2878 int mpcc_id)
2879 {
2880 struct mpc *mpc = dc->res_pool->mpc;
2881
2882 if (mpc->funcs->set_bg_color) {
2883 mpc->funcs->set_bg_color(mpc, &(pipe_ctx->visual_confirm_color), mpcc_id);
2884 }
2885 }
2886
dcn10_update_mpcc(struct dc * dc,struct pipe_ctx * pipe_ctx)2887 void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
2888 {
2889 struct hubp *hubp = pipe_ctx->plane_res.hubp;
2890 struct mpcc_blnd_cfg blnd_cfg = {0};
2891 bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
2892 int mpcc_id;
2893 struct mpcc *new_mpcc;
2894 struct mpc *mpc = dc->res_pool->mpc;
2895 struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
2896
2897 blnd_cfg.overlap_only = false;
2898 blnd_cfg.global_gain = 0xff;
2899
2900 if (per_pixel_alpha) {
2901 /* DCN1.0 has output CM before MPC which seems to screw with
2902 * pre-multiplied alpha.
2903 */
2904 blnd_cfg.pre_multiplied_alpha = (is_rgb_cspace(
2905 pipe_ctx->stream->output_color_space)
2906 && pipe_ctx->plane_state->pre_multiplied_alpha);
2907 if (pipe_ctx->plane_state->global_alpha) {
2908 blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN;
2909 blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value;
2910 } else {
2911 blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
2912 }
2913 } else {
2914 blnd_cfg.pre_multiplied_alpha = false;
2915 blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
2916 }
2917
2918 if (pipe_ctx->plane_state->global_alpha)
2919 blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value;
2920 else
2921 blnd_cfg.global_alpha = 0xff;
2922
2923 /*
2924 * TODO: remove hack
2925 * Note: currently there is a bug in init_hw such that
2926 * on resume from hibernate, BIOS sets up MPCC0, and
2927 * we do mpcc_remove but the mpcc cannot go to idle
2928 * after remove. This cause us to pick mpcc1 here,
2929 * which causes a pstate hang for yet unknown reason.
2930 */
2931 mpcc_id = hubp->inst;
2932
2933 /* If there is no full update, don't need to touch MPC tree*/
2934 if (!pipe_ctx->plane_state->update_flags.bits.full_update) {
2935 mpc->funcs->update_blending(mpc, &blnd_cfg, mpcc_id);
2936 dc->hwss.update_visual_confirm_color(dc, pipe_ctx, mpcc_id);
2937 return;
2938 }
2939
2940 /* check if this MPCC is already being used */
2941 new_mpcc = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, mpcc_id);
2942 /* remove MPCC if being used */
2943 if (new_mpcc != NULL)
2944 mpc->funcs->remove_mpcc(mpc, mpc_tree_params, new_mpcc);
2945 else
2946 if (dc->debug.sanity_checks)
2947 mpc->funcs->assert_mpcc_idle_before_connect(
2948 dc->res_pool->mpc, mpcc_id);
2949
2950 /* Call MPC to insert new plane */
2951 new_mpcc = mpc->funcs->insert_plane(dc->res_pool->mpc,
2952 mpc_tree_params,
2953 &blnd_cfg,
2954 NULL,
2955 NULL,
2956 hubp->inst,
2957 mpcc_id);
2958 dc->hwss.update_visual_confirm_color(dc, pipe_ctx, mpcc_id);
2959
2960 ASSERT(new_mpcc != NULL);
2961 hubp->opp_id = pipe_ctx->stream_res.opp->inst;
2962 hubp->mpcc_id = mpcc_id;
2963 }
2964
update_scaler(struct pipe_ctx * pipe_ctx)2965 static void update_scaler(struct pipe_ctx *pipe_ctx)
2966 {
2967 bool per_pixel_alpha =
2968 pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
2969
2970 pipe_ctx->plane_res.scl_data.lb_params.alpha_en = per_pixel_alpha;
2971 pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_36BPP;
2972 /* scaler configuration */
2973 pipe_ctx->plane_res.dpp->funcs->dpp_set_scaler(
2974 pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data);
2975 }
2976
dcn10_update_dchubp_dpp(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context)2977 static void dcn10_update_dchubp_dpp(
2978 struct dc *dc,
2979 struct pipe_ctx *pipe_ctx,
2980 struct dc_state *context)
2981 {
2982 struct dce_hwseq *hws = dc->hwseq;
2983 struct hubp *hubp = pipe_ctx->plane_res.hubp;
2984 struct dpp *dpp = pipe_ctx->plane_res.dpp;
2985 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
2986 struct plane_size size = plane_state->plane_size;
2987 unsigned int compat_level = 0;
2988 bool should_divided_by_2 = false;
2989
2990 /* depends on DML calculation, DPP clock value may change dynamically */
2991 /* If request max dpp clk is lower than current dispclk, no need to
2992 * divided by 2
2993 */
2994 if (plane_state->update_flags.bits.full_update) {
2995
2996 /* new calculated dispclk, dppclk are stored in
2997 * context->bw_ctx.bw.dcn.clk.dispclk_khz / dppclk_khz. current
2998 * dispclk, dppclk are from dc->clk_mgr->clks.dispclk_khz.
2999 * dcn10_validate_bandwidth compute new dispclk, dppclk.
3000 * dispclk will put in use after optimize_bandwidth when
3001 * ramp_up_dispclk_with_dpp is called.
3002 * there are two places for dppclk be put in use. One location
3003 * is the same as the location as dispclk. Another is within
3004 * update_dchubp_dpp which happens between pre_bandwidth and
3005 * optimize_bandwidth.
3006 * dppclk updated within update_dchubp_dpp will cause new
3007 * clock values of dispclk and dppclk not be in use at the same
3008 * time. when clocks are decreased, this may cause dppclk is
3009 * lower than previous configuration and let pipe stuck.
3010 * for example, eDP + external dp, change resolution of DP from
3011 * 1920x1080x144hz to 1280x960x60hz.
3012 * before change: dispclk = 337889 dppclk = 337889
3013 * change mode, dcn10_validate_bandwidth calculate
3014 * dispclk = 143122 dppclk = 143122
3015 * update_dchubp_dpp be executed before dispclk be updated,
3016 * dispclk = 337889, but dppclk use new value dispclk /2 =
3017 * 168944. this will cause pipe pstate warning issue.
3018 * solution: between pre_bandwidth and optimize_bandwidth, while
3019 * dispclk is going to be decreased, keep dppclk = dispclk
3020 **/
3021 if (context->bw_ctx.bw.dcn.clk.dispclk_khz <
3022 dc->clk_mgr->clks.dispclk_khz)
3023 should_divided_by_2 = false;
3024 else
3025 should_divided_by_2 =
3026 context->bw_ctx.bw.dcn.clk.dppclk_khz <=
3027 dc->clk_mgr->clks.dispclk_khz / 2;
3028
3029 dpp->funcs->dpp_dppclk_control(
3030 dpp,
3031 should_divided_by_2,
3032 true);
3033
3034 if (dc->res_pool->dccg)
3035 dc->res_pool->dccg->funcs->update_dpp_dto(
3036 dc->res_pool->dccg,
3037 dpp->inst,
3038 pipe_ctx->plane_res.bw.dppclk_khz);
3039 else
3040 dc->clk_mgr->clks.dppclk_khz = should_divided_by_2 ?
3041 dc->clk_mgr->clks.dispclk_khz / 2 :
3042 dc->clk_mgr->clks.dispclk_khz;
3043 }
3044
3045 /* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG
3046 * VTG is within DCHUBBUB which is commond block share by each pipe HUBP.
3047 * VTG is 1:1 mapping with OTG. Each pipe HUBP will select which VTG
3048 */
3049 if (plane_state->update_flags.bits.full_update) {
3050 hubp->funcs->hubp_vtg_sel(hubp, pipe_ctx->stream_res.tg->inst);
3051
3052 hubp->funcs->hubp_setup(
3053 hubp,
3054 &pipe_ctx->dlg_regs,
3055 &pipe_ctx->ttu_regs,
3056 &pipe_ctx->rq_regs,
3057 &pipe_ctx->pipe_dlg_param);
3058 hubp->funcs->hubp_setup_interdependent(
3059 hubp,
3060 &pipe_ctx->dlg_regs,
3061 &pipe_ctx->ttu_regs);
3062 }
3063
3064 size.surface_size = pipe_ctx->plane_res.scl_data.viewport;
3065
3066 if (plane_state->update_flags.bits.full_update ||
3067 plane_state->update_flags.bits.bpp_change)
3068 dcn10_update_dpp(dpp, plane_state);
3069
3070 if (plane_state->update_flags.bits.full_update ||
3071 plane_state->update_flags.bits.per_pixel_alpha_change ||
3072 plane_state->update_flags.bits.global_alpha_change)
3073 hws->funcs.update_mpcc(dc, pipe_ctx);
3074
3075 if (plane_state->update_flags.bits.full_update ||
3076 plane_state->update_flags.bits.per_pixel_alpha_change ||
3077 plane_state->update_flags.bits.global_alpha_change ||
3078 plane_state->update_flags.bits.scaling_change ||
3079 plane_state->update_flags.bits.position_change) {
3080 update_scaler(pipe_ctx);
3081 }
3082
3083 if (plane_state->update_flags.bits.full_update ||
3084 plane_state->update_flags.bits.scaling_change ||
3085 plane_state->update_flags.bits.position_change) {
3086 hubp->funcs->mem_program_viewport(
3087 hubp,
3088 &pipe_ctx->plane_res.scl_data.viewport,
3089 &pipe_ctx->plane_res.scl_data.viewport_c);
3090 }
3091
3092 if (pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
3093 if (dc->hwss.abort_cursor_offload_update)
3094 dc->hwss.abort_cursor_offload_update(dc, pipe_ctx);
3095
3096 dc->hwss.set_cursor_attribute(pipe_ctx);
3097 dc->hwss.set_cursor_position(pipe_ctx);
3098
3099 if (dc->hwss.set_cursor_sdr_white_level)
3100 dc->hwss.set_cursor_sdr_white_level(pipe_ctx);
3101 }
3102
3103 if (plane_state->update_flags.bits.full_update) {
3104 /*gamut remap*/
3105 dc->hwss.program_gamut_remap(pipe_ctx);
3106
3107 dc->hwss.program_output_csc(dc,
3108 pipe_ctx,
3109 pipe_ctx->stream->output_color_space,
3110 pipe_ctx->stream->csc_color_matrix.matrix,
3111 pipe_ctx->stream_res.opp->inst);
3112 }
3113
3114 if (plane_state->update_flags.bits.full_update ||
3115 plane_state->update_flags.bits.pixel_format_change ||
3116 plane_state->update_flags.bits.horizontal_mirror_change ||
3117 plane_state->update_flags.bits.rotation_change ||
3118 plane_state->update_flags.bits.swizzle_change ||
3119 plane_state->update_flags.bits.dcc_change ||
3120 plane_state->update_flags.bits.bpp_change ||
3121 plane_state->update_flags.bits.scaling_change ||
3122 plane_state->update_flags.bits.plane_size_change) {
3123 hubp->funcs->hubp_program_surface_config(
3124 hubp,
3125 plane_state->format,
3126 &plane_state->tiling_info,
3127 &size,
3128 plane_state->rotation,
3129 &plane_state->dcc,
3130 plane_state->horizontal_mirror,
3131 compat_level);
3132 }
3133
3134 hubp->power_gated = false;
3135
3136 dc->hwss.update_plane_addr(dc, pipe_ctx);
3137
3138 if (is_pipe_tree_visible(pipe_ctx))
3139 hubp->funcs->set_blank(hubp, false);
3140 }
3141
dcn10_blank_pixel_data(struct dc * dc,struct pipe_ctx * pipe_ctx,bool blank)3142 void dcn10_blank_pixel_data(
3143 struct dc *dc,
3144 struct pipe_ctx *pipe_ctx,
3145 bool blank)
3146 {
3147 enum dc_color_space color_space;
3148 struct tg_color black_color = {0};
3149 struct stream_resource *stream_res = &pipe_ctx->stream_res;
3150 struct dc_stream_state *stream = pipe_ctx->stream;
3151
3152 /* program otg blank color */
3153 color_space = stream->output_color_space;
3154 color_space_to_black_color(dc, color_space, &black_color);
3155
3156 /*
3157 * The way 420 is packed, 2 channels carry Y component, 1 channel
3158 * alternate between Cb and Cr, so both channels need the pixel
3159 * value for Y
3160 */
3161 if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
3162 black_color.color_r_cr = black_color.color_g_y;
3163
3164
3165 if (stream_res->tg->funcs->set_blank_color)
3166 stream_res->tg->funcs->set_blank_color(
3167 stream_res->tg,
3168 &black_color);
3169
3170 if (!blank) {
3171 if (stream_res->tg->funcs->set_blank)
3172 stream_res->tg->funcs->set_blank(stream_res->tg, blank);
3173 if (stream_res->abm) {
3174 dc->hwss.set_pipe(pipe_ctx);
3175 stream_res->abm->funcs->set_abm_level(stream_res->abm, stream->abm_level);
3176 }
3177 } else {
3178 dc->hwss.set_abm_immediate_disable(pipe_ctx);
3179 if (stream_res->tg->funcs->set_blank) {
3180 stream_res->tg->funcs->wait_for_state(stream_res->tg, CRTC_STATE_VBLANK);
3181 stream_res->tg->funcs->set_blank(stream_res->tg, blank);
3182 }
3183 }
3184 }
3185
dcn10_set_hdr_multiplier(struct pipe_ctx * pipe_ctx)3186 void dcn10_set_hdr_multiplier(struct pipe_ctx *pipe_ctx)
3187 {
3188 struct fixed31_32 multiplier = pipe_ctx->plane_state->hdr_mult;
3189 uint32_t hw_mult = 0x1f000; // 1.0 default multiplier
3190 struct custom_float_format fmt;
3191
3192 fmt.exponenta_bits = 6;
3193 fmt.mantissa_bits = 12;
3194 fmt.sign = true;
3195
3196
3197 if (!dc_fixpt_eq(multiplier, dc_fixpt_from_int(0))) // check != 0
3198 convert_to_custom_float_format(multiplier, &fmt, &hw_mult);
3199
3200 pipe_ctx->plane_res.dpp->funcs->dpp_set_hdr_multiplier(
3201 pipe_ctx->plane_res.dpp, hw_mult);
3202 }
3203
dcn10_program_pipe(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context)3204 void dcn10_program_pipe(
3205 struct dc *dc,
3206 struct pipe_ctx *pipe_ctx,
3207 struct dc_state *context)
3208 {
3209 struct dce_hwseq *hws = dc->hwseq;
3210
3211 if (pipe_ctx->top_pipe == NULL) {
3212 bool blank = !is_pipe_tree_visible(pipe_ctx);
3213
3214 pipe_ctx->stream_res.tg->funcs->program_global_sync(
3215 pipe_ctx->stream_res.tg,
3216 calculate_vready_offset_for_group(pipe_ctx),
3217 pipe_ctx->pipe_dlg_param.vstartup_start,
3218 pipe_ctx->pipe_dlg_param.vupdate_offset,
3219 pipe_ctx->pipe_dlg_param.vupdate_width,
3220 pipe_ctx->pipe_dlg_param.pstate_keepout);
3221
3222 pipe_ctx->stream_res.tg->funcs->set_vtg_params(
3223 pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, true);
3224
3225 if (hws->funcs.setup_vupdate_interrupt)
3226 hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
3227
3228 hws->funcs.blank_pixel_data(dc, pipe_ctx, blank);
3229 }
3230
3231 if (pipe_ctx->plane_state->update_flags.bits.full_update)
3232 dcn10_enable_plane(dc, pipe_ctx, context);
3233
3234 dcn10_update_dchubp_dpp(dc, pipe_ctx, context);
3235
3236 hws->funcs.set_hdr_multiplier(pipe_ctx);
3237
3238 if (pipe_ctx->plane_state->update_flags.bits.full_update ||
3239 pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
3240 pipe_ctx->plane_state->update_flags.bits.gamma_change)
3241 hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state);
3242
3243 /* dcn10_translate_regamma_to_hw_format takes 750us to finish
3244 * only do gamma programming for full update.
3245 * TODO: This can be further optimized/cleaned up
3246 * Always call this for now since it does memcmp inside before
3247 * doing heavy calculation and programming
3248 */
3249 if (pipe_ctx->plane_state->update_flags.bits.full_update)
3250 hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream);
3251 }
3252
dcn10_wait_for_pending_cleared(struct dc * dc,struct dc_state * context)3253 void dcn10_wait_for_pending_cleared(struct dc *dc,
3254 struct dc_state *context)
3255 {
3256 struct pipe_ctx *pipe_ctx;
3257 struct timing_generator *tg;
3258 int i;
3259
3260 for (i = 0; i < dc->res_pool->pipe_count; i++) {
3261 pipe_ctx = &context->res_ctx.pipe_ctx[i];
3262 tg = pipe_ctx->stream_res.tg;
3263
3264 /*
3265 * Only wait for top pipe's tg penindg bit
3266 * Also skip if pipe is disabled.
3267 */
3268 if (pipe_ctx->top_pipe ||
3269 !pipe_ctx->stream || !pipe_ctx->plane_state ||
3270 !tg->funcs->is_tg_enabled(tg))
3271 continue;
3272
3273 /*
3274 * Wait for VBLANK then VACTIVE to ensure we get VUPDATE.
3275 * For some reason waiting for OTG_UPDATE_PENDING cleared
3276 * seems to not trigger the update right away, and if we
3277 * lock again before VUPDATE then we don't get a separated
3278 * operation.
3279 */
3280 pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK);
3281 pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
3282 }
3283 }
3284
dcn10_post_unlock_program_front_end(struct dc * dc,struct dc_state * context)3285 void dcn10_post_unlock_program_front_end(
3286 struct dc *dc,
3287 struct dc_state *context)
3288 {
3289 int i;
3290
3291 for (i = 0; i < dc->res_pool->pipe_count; i++) {
3292 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
3293
3294 if (!pipe_ctx->top_pipe &&
3295 !pipe_ctx->prev_odm_pipe &&
3296 pipe_ctx->stream) {
3297 struct timing_generator *tg = pipe_ctx->stream_res.tg;
3298
3299 if (context->stream_status[i].plane_count == 0)
3300 false_optc_underflow_wa(dc, pipe_ctx->stream, tg);
3301 }
3302 }
3303
3304 for (i = 0; i < dc->res_pool->pipe_count; i++)
3305 if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable)
3306 dc->hwss.disable_plane(dc, dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]);
3307
3308 for (i = 0; i < dc->res_pool->pipe_count; i++)
3309 if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable) {
3310 dc->hwss.optimize_bandwidth(dc, context);
3311 break;
3312 }
3313
3314 if (dc->hwseq->wa.DEGVIDCN10_254)
3315 hubbub1_wm_change_req_wa(dc->res_pool->hubbub);
3316 }
3317
dcn10_stereo_hw_frame_pack_wa(struct dc * dc,struct dc_state * context)3318 static void dcn10_stereo_hw_frame_pack_wa(struct dc *dc, struct dc_state *context)
3319 {
3320 uint8_t i;
3321
3322 for (i = 0; i < context->stream_count; i++) {
3323 if (context->streams[i]->timing.timing_3d_format
3324 == TIMING_3D_FORMAT_HW_FRAME_PACKING) {
3325 /*
3326 * Disable stutter
3327 */
3328 hubbub1_allow_self_refresh_control(dc->res_pool->hubbub, false);
3329 break;
3330 }
3331 }
3332 }
3333
dcn10_prepare_bandwidth(struct dc * dc,struct dc_state * context)3334 void dcn10_prepare_bandwidth(
3335 struct dc *dc,
3336 struct dc_state *context)
3337 {
3338 struct dce_hwseq *hws = dc->hwseq;
3339 struct hubbub *hubbub = dc->res_pool->hubbub;
3340 int min_fclk_khz, min_dcfclk_khz, socclk_khz;
3341
3342 if (dc->debug.sanity_checks)
3343 hws->funcs.verify_allow_pstate_change_high(dc);
3344
3345 if (context->stream_count == 0)
3346 context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
3347
3348 dc->clk_mgr->funcs->update_clocks(
3349 dc->clk_mgr,
3350 context,
3351 false);
3352
3353 dc->optimized_required = hubbub->funcs->program_watermarks(hubbub,
3354 &context->bw_ctx.bw.dcn.watermarks,
3355 dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
3356 true);
3357 dcn10_stereo_hw_frame_pack_wa(dc, context);
3358
3359 if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) {
3360 DC_FP_START();
3361 dcn_get_soc_clks(
3362 dc, &min_fclk_khz, &min_dcfclk_khz, &socclk_khz);
3363 DC_FP_END();
3364 dcn_bw_notify_pplib_of_wm_ranges(
3365 dc, min_fclk_khz, min_dcfclk_khz, socclk_khz);
3366 }
3367
3368 if (dc->debug.sanity_checks)
3369 hws->funcs.verify_allow_pstate_change_high(dc);
3370 }
3371
dcn10_optimize_bandwidth(struct dc * dc,struct dc_state * context)3372 void dcn10_optimize_bandwidth(
3373 struct dc *dc,
3374 struct dc_state *context)
3375 {
3376 struct dce_hwseq *hws = dc->hwseq;
3377 struct hubbub *hubbub = dc->res_pool->hubbub;
3378 int min_fclk_khz, min_dcfclk_khz, socclk_khz;
3379
3380 if (dc->debug.sanity_checks)
3381 hws->funcs.verify_allow_pstate_change_high(dc);
3382
3383 if (context->stream_count == 0)
3384 context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
3385
3386 dc->clk_mgr->funcs->update_clocks(
3387 dc->clk_mgr,
3388 context,
3389 true);
3390
3391 hubbub->funcs->program_watermarks(hubbub,
3392 &context->bw_ctx.bw.dcn.watermarks,
3393 dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
3394 true);
3395
3396 dcn10_stereo_hw_frame_pack_wa(dc, context);
3397
3398 if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) {
3399 DC_FP_START();
3400 dcn_get_soc_clks(
3401 dc, &min_fclk_khz, &min_dcfclk_khz, &socclk_khz);
3402 DC_FP_END();
3403 dcn_bw_notify_pplib_of_wm_ranges(
3404 dc, min_fclk_khz, min_dcfclk_khz, socclk_khz);
3405 }
3406
3407 if (dc->debug.sanity_checks)
3408 hws->funcs.verify_allow_pstate_change_high(dc);
3409 }
3410
dcn10_set_drr(struct pipe_ctx ** pipe_ctx,int num_pipes,struct dc_crtc_timing_adjust adjust)3411 void dcn10_set_drr(struct pipe_ctx **pipe_ctx,
3412 int num_pipes, struct dc_crtc_timing_adjust adjust)
3413 {
3414 int i = 0;
3415 struct drr_params params = {0};
3416 // DRR set trigger event mapped to OTG_TRIG_A (bit 11) for manual control flow
3417 unsigned int event_triggers = 0x800;
3418 // Note DRR trigger events are generated regardless of whether num frames met.
3419 unsigned int num_frames = 2;
3420
3421 params.vertical_total_max = adjust.v_total_max;
3422 params.vertical_total_min = adjust.v_total_min;
3423 params.vertical_total_mid = adjust.v_total_mid;
3424 params.vertical_total_mid_frame_num = adjust.v_total_mid_frame_num;
3425 /* TODO: If multiple pipes are to be supported, you need
3426 * some GSL stuff. Static screen triggers may be programmed differently
3427 * as well.
3428 */
3429 for (i = 0; i < num_pipes; i++) {
3430 /* dc_state_destruct() might null the stream resources, so fetch tg
3431 * here first to avoid a race condition. The lifetime of the pointee
3432 * itself (the timing_generator object) is not a problem here.
3433 */
3434 struct timing_generator *tg = pipe_ctx[i]->stream_res.tg;
3435
3436 if ((tg != NULL) && tg->funcs) {
3437 set_drr_and_clear_adjust_pending(pipe_ctx[i], pipe_ctx[i]->stream, ¶ms);
3438 if (adjust.v_total_max != 0 && adjust.v_total_min != 0)
3439 if (tg->funcs->set_static_screen_control)
3440 tg->funcs->set_static_screen_control(
3441 tg, event_triggers, num_frames);
3442 }
3443 }
3444 }
3445
dcn10_get_position(struct pipe_ctx ** pipe_ctx,int num_pipes,struct crtc_position * position)3446 void dcn10_get_position(struct pipe_ctx **pipe_ctx,
3447 int num_pipes,
3448 struct crtc_position *position)
3449 {
3450 int i = 0;
3451
3452 /* TODO: handle pipes > 1
3453 */
3454 for (i = 0; i < num_pipes; i++)
3455 pipe_ctx[i]->stream_res.tg->funcs->get_position(pipe_ctx[i]->stream_res.tg, position);
3456 }
3457
dcn10_set_static_screen_control(struct pipe_ctx ** pipe_ctx,int num_pipes,const struct dc_static_screen_params * params)3458 void dcn10_set_static_screen_control(struct pipe_ctx **pipe_ctx,
3459 int num_pipes, const struct dc_static_screen_params *params)
3460 {
3461 unsigned int i;
3462 unsigned int triggers = 0;
3463
3464 if (params->triggers.surface_update)
3465 triggers |= 0x80;
3466 if (params->triggers.cursor_update)
3467 triggers |= 0x2;
3468 if (params->triggers.force_trigger)
3469 triggers |= 0x1;
3470
3471 for (i = 0; i < num_pipes; i++)
3472 pipe_ctx[i]->stream_res.tg->funcs->
3473 set_static_screen_control(pipe_ctx[i]->stream_res.tg,
3474 triggers, params->num_frames);
3475 }
3476
dcn10_config_stereo_parameters(struct dc_stream_state * stream,struct crtc_stereo_flags * flags)3477 void dcn10_config_stereo_parameters(
3478 struct dc_stream_state *stream, struct crtc_stereo_flags *flags)
3479 {
3480 enum view_3d_format view_format = stream->view_format;
3481 enum dc_timing_3d_format timing_3d_format =\
3482 stream->timing.timing_3d_format;
3483 bool non_stereo_timing = false;
3484
3485 if (timing_3d_format == TIMING_3D_FORMAT_NONE ||
3486 timing_3d_format == TIMING_3D_FORMAT_SIDE_BY_SIDE ||
3487 timing_3d_format == TIMING_3D_FORMAT_TOP_AND_BOTTOM)
3488 non_stereo_timing = true;
3489
3490 if (non_stereo_timing == false &&
3491 view_format == VIEW_3D_FORMAT_FRAME_SEQUENTIAL) {
3492
3493 flags->PROGRAM_STEREO = 1;
3494 flags->PROGRAM_POLARITY = 1;
3495 if (timing_3d_format == TIMING_3D_FORMAT_FRAME_ALTERNATE ||
3496 timing_3d_format == TIMING_3D_FORMAT_INBAND_FA ||
3497 timing_3d_format == TIMING_3D_FORMAT_DP_HDMI_INBAND_FA ||
3498 timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) {
3499
3500 if (stream->link && stream->link->ddc) {
3501 enum display_dongle_type dongle = \
3502 stream->link->ddc->dongle_type;
3503
3504 if (dongle == DISPLAY_DONGLE_DP_VGA_CONVERTER ||
3505 dongle == DISPLAY_DONGLE_DP_DVI_CONVERTER ||
3506 dongle == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
3507 flags->DISABLE_STEREO_DP_SYNC = 1;
3508 }
3509 }
3510 flags->RIGHT_EYE_POLARITY =\
3511 stream->timing.flags.RIGHT_EYE_3D_POLARITY;
3512 if (timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
3513 flags->FRAME_PACKED = 1;
3514 }
3515
3516 return;
3517 }
3518
dcn10_setup_stereo(struct pipe_ctx * pipe_ctx,struct dc * dc)3519 void dcn10_setup_stereo(struct pipe_ctx *pipe_ctx, struct dc *dc)
3520 {
3521 struct crtc_stereo_flags flags = { 0 };
3522 struct dc_stream_state *stream = pipe_ctx->stream;
3523
3524 dcn10_config_stereo_parameters(stream, &flags);
3525
3526 if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) {
3527 if (!dc_set_generic_gpio_for_stereo(true, dc->ctx->gpio_service))
3528 dc_set_generic_gpio_for_stereo(false, dc->ctx->gpio_service);
3529 } else {
3530 dc_set_generic_gpio_for_stereo(false, dc->ctx->gpio_service);
3531 }
3532
3533 pipe_ctx->stream_res.opp->funcs->opp_program_stereo(
3534 pipe_ctx->stream_res.opp,
3535 flags.PROGRAM_STEREO == 1,
3536 &stream->timing);
3537
3538 pipe_ctx->stream_res.tg->funcs->program_stereo(
3539 pipe_ctx->stream_res.tg,
3540 &stream->timing,
3541 &flags);
3542
3543 return;
3544 }
3545
get_hubp_by_inst(struct resource_pool * res_pool,int mpcc_inst)3546 static struct hubp *get_hubp_by_inst(struct resource_pool *res_pool, int mpcc_inst)
3547 {
3548 int i;
3549
3550 for (i = 0; i < res_pool->pipe_count; i++) {
3551 if (res_pool->hubps[i]->inst == mpcc_inst)
3552 return res_pool->hubps[i];
3553 }
3554 ASSERT(false);
3555 return NULL;
3556 }
3557
dcn10_wait_for_mpcc_disconnect(struct dc * dc,struct resource_pool * res_pool,struct pipe_ctx * pipe_ctx)3558 void dcn10_wait_for_mpcc_disconnect(
3559 struct dc *dc,
3560 struct resource_pool *res_pool,
3561 struct pipe_ctx *pipe_ctx)
3562 {
3563 struct dce_hwseq *hws = dc->hwseq;
3564 int mpcc_inst;
3565
3566 if (dc->debug.sanity_checks) {
3567 hws->funcs.verify_allow_pstate_change_high(dc);
3568 }
3569
3570 if (!pipe_ctx->stream_res.opp)
3571 return;
3572
3573 for (mpcc_inst = 0; mpcc_inst < MAX_PIPES; mpcc_inst++) {
3574 if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) {
3575 struct hubp *hubp = get_hubp_by_inst(res_pool, mpcc_inst);
3576
3577 if (pipe_ctx->stream_res.tg &&
3578 pipe_ctx->stream_res.tg->funcs->is_tg_enabled(pipe_ctx->stream_res.tg))
3579 res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst);
3580 pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false;
3581 hubp->funcs->set_blank(hubp, true);
3582 }
3583 }
3584
3585 if (dc->debug.sanity_checks) {
3586 hws->funcs.verify_allow_pstate_change_high(dc);
3587 }
3588
3589 }
3590
dcn10_dummy_display_power_gating(struct dc * dc,uint8_t controller_id,struct dc_bios * dcb,enum pipe_gating_control power_gating)3591 bool dcn10_dummy_display_power_gating(
3592 struct dc *dc,
3593 uint8_t controller_id,
3594 struct dc_bios *dcb,
3595 enum pipe_gating_control power_gating)
3596 {
3597 return true;
3598 }
3599
dcn10_update_pending_status(struct pipe_ctx * pipe_ctx)3600 void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
3601 {
3602 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
3603 struct timing_generator *tg = pipe_ctx->stream_res.tg;
3604 bool flip_pending;
3605 struct dc *dc = pipe_ctx->stream->ctx->dc;
3606
3607 if (plane_state == NULL)
3608 return;
3609
3610 flip_pending = pipe_ctx->plane_res.hubp->funcs->hubp_is_flip_pending(
3611 pipe_ctx->plane_res.hubp);
3612
3613 plane_state->status.is_flip_pending = plane_state->status.is_flip_pending || flip_pending;
3614
3615 if (!flip_pending)
3616 plane_state->status.current_address = plane_state->status.requested_address;
3617
3618 if (plane_state->status.current_address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
3619 tg->funcs->is_stereo_left_eye) {
3620 plane_state->status.is_right_eye =
3621 !tg->funcs->is_stereo_left_eye(pipe_ctx->stream_res.tg);
3622 }
3623
3624 if (dc->hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied) {
3625 struct dce_hwseq *hwseq = dc->hwseq;
3626 struct timing_generator *tg = dc->res_pool->timing_generators[0];
3627 unsigned int cur_frame = tg->funcs->get_frame_count(tg);
3628
3629 if (cur_frame != hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied_on_frame) {
3630 struct hubbub *hubbub = dc->res_pool->hubbub;
3631
3632 hubbub->funcs->allow_self_refresh_control(hubbub, !dc->debug.disable_stutter);
3633 hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied = false;
3634 }
3635 }
3636 }
3637
dcn10_update_dchub(struct dce_hwseq * hws,struct dchub_init_data * dh_data)3638 void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data)
3639 {
3640 struct hubbub *hubbub = hws->ctx->dc->res_pool->hubbub;
3641
3642 /* In DCN, this programming sequence is owned by the hubbub */
3643 hubbub->funcs->update_dchub(hubbub, dh_data);
3644 }
3645
dcn10_set_cursor_position(struct pipe_ctx * pipe_ctx)3646 void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
3647 {
3648 struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position;
3649 struct hubp *hubp = pipe_ctx->plane_res.hubp;
3650 struct dpp *dpp = pipe_ctx->plane_res.dpp;
3651 struct dc_cursor_mi_param param = {
3652 .pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10,
3653 .ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz,
3654 .viewport = pipe_ctx->plane_res.scl_data.viewport,
3655 .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz,
3656 .v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert,
3657 .rotation = pipe_ctx->plane_state->rotation,
3658 .mirror = pipe_ctx->plane_state->horizontal_mirror,
3659 .stream = pipe_ctx->stream,
3660 };
3661 bool pipe_split_on = false;
3662 bool odm_combine_on = (pipe_ctx->next_odm_pipe != NULL) ||
3663 (pipe_ctx->prev_odm_pipe != NULL);
3664
3665 int x_plane = pipe_ctx->plane_state->dst_rect.x;
3666 int y_plane = pipe_ctx->plane_state->dst_rect.y;
3667 int x_pos = pos_cpy.x;
3668 int y_pos = pos_cpy.y;
3669 bool is_primary_plane = (pipe_ctx->plane_state->layer_index == 0);
3670
3671 int clip_x = (pos_cpy.use_viewport_for_clip && is_primary_plane &&
3672 !odm_combine_on && !pipe_split_on && param.viewport.x != 0)
3673 ? param.viewport.x : pipe_ctx->plane_state->clip_rect.x;
3674 int clip_width = pipe_ctx->plane_state->clip_rect.width;
3675
3676 if ((pipe_ctx->top_pipe != NULL) || (pipe_ctx->bottom_pipe != NULL)) {
3677 if ((pipe_ctx->plane_state->src_rect.width != pipe_ctx->plane_res.scl_data.viewport.width) ||
3678 (pipe_ctx->plane_state->src_rect.height != pipe_ctx->plane_res.scl_data.viewport.height)) {
3679 pipe_split_on = true;
3680 }
3681 }
3682
3683 /**
3684 * DC cursor is stream space, HW cursor is plane space and drawn
3685 * as part of the framebuffer.
3686 *
3687 * Cursor position can't be negative, but hotspot can be used to
3688 * shift cursor out of the plane bounds. Hotspot must be smaller
3689 * than the cursor size.
3690 */
3691
3692 /**
3693 * Translate cursor and clip offset from stream space to plane space.
3694 *
3695 * If the cursor is scaled then we need to scale the position
3696 * to be in the approximately correct place. We can't do anything
3697 * about the actual size being incorrect, that's a limitation of
3698 * the hardware.
3699 */
3700 if (param.rotation == ROTATION_ANGLE_90 || param.rotation == ROTATION_ANGLE_270) {
3701 x_pos = (x_pos - x_plane) * pipe_ctx->plane_state->src_rect.height /
3702 pipe_ctx->plane_state->dst_rect.width;
3703 y_pos = (y_pos - y_plane) * pipe_ctx->plane_state->src_rect.width /
3704 pipe_ctx->plane_state->dst_rect.height;
3705 } else {
3706 x_pos = (x_pos - x_plane) * pipe_ctx->plane_state->src_rect.width /
3707 pipe_ctx->plane_state->dst_rect.width;
3708 y_pos = (y_pos - y_plane) * pipe_ctx->plane_state->src_rect.height /
3709 pipe_ctx->plane_state->dst_rect.height;
3710 clip_x = (clip_x - x_plane) * pipe_ctx->plane_state->src_rect.width /
3711 pipe_ctx->plane_state->dst_rect.width;
3712 clip_width = clip_width * pipe_ctx->plane_state->src_rect.width /
3713 pipe_ctx->plane_state->dst_rect.width;
3714 }
3715
3716 /**
3717 * If the cursor's source viewport is clipped then we need to
3718 * translate the cursor to appear in the correct position on
3719 * the screen.
3720 *
3721 * This translation isn't affected by scaling so it needs to be
3722 * done *after* we adjust the position for the scale factor.
3723 *
3724 * This is only done by opt-in for now since there are still
3725 * some usecases like tiled display that might enable the
3726 * cursor on both streams while expecting dc to clip it.
3727 */
3728 if (pos_cpy.translate_by_source) {
3729 x_pos += pipe_ctx->plane_state->src_rect.x;
3730 y_pos += pipe_ctx->plane_state->src_rect.y;
3731 }
3732
3733 /**
3734 * If the position is negative then we need to add to the hotspot
3735 * to shift the cursor outside the plane.
3736 */
3737
3738 if (x_pos < 0) {
3739 pos_cpy.x_hotspot -= x_pos;
3740 x_pos = 0;
3741 }
3742
3743 if (y_pos < 0) {
3744 pos_cpy.y_hotspot -= y_pos;
3745 y_pos = 0;
3746 }
3747
3748 pos_cpy.x = (uint32_t)x_pos;
3749 pos_cpy.y = (uint32_t)y_pos;
3750
3751 if (pipe_ctx->plane_state->address.type
3752 == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
3753 pos_cpy.enable = false;
3754
3755 if (pos_cpy.enable && resource_can_pipe_disable_cursor(pipe_ctx))
3756 pos_cpy.enable = false;
3757
3758
3759 if (param.rotation == ROTATION_ANGLE_0) {
3760
3761 if (param.mirror) {
3762 /*
3763 * The plane is split into multiple viewports.
3764 * The combination of all viewports span the
3765 * entirety of the clip rect.
3766 *
3767 * For no pipe_split, viewport_width is represents
3768 * the full width of the clip_rect, so we can just
3769 * mirror it.
3770 */
3771 pos_cpy.x = clip_width - pos_cpy.x + 2 * clip_x;
3772 }
3773 }
3774 // Swap axis and mirror horizontally
3775 else if (param.rotation == ROTATION_ANGLE_90) {
3776 uint32_t temp_x = pos_cpy.x;
3777
3778 pos_cpy.x = pipe_ctx->plane_res.scl_data.viewport.width -
3779 (pos_cpy.y - pipe_ctx->plane_res.scl_data.viewport.x) + pipe_ctx->plane_res.scl_data.viewport.x;
3780 pos_cpy.y = temp_x;
3781 }
3782 // Swap axis and mirror vertically
3783 else if (param.rotation == ROTATION_ANGLE_270) {
3784 uint32_t temp_y = pos_cpy.y;
3785 int viewport_height =
3786 pipe_ctx->plane_res.scl_data.viewport.height;
3787 int viewport_y =
3788 pipe_ctx->plane_res.scl_data.viewport.y;
3789
3790 /**
3791 * Display groups that are 1xnY, have pos_cpy.x > 2 * viewport.height
3792 * For pipe split cases:
3793 * - apply offset of viewport.y to normalize pos_cpy.x
3794 * - calculate the pos_cpy.y as before
3795 * - shift pos_cpy.y back by same offset to get final value
3796 * - since we iterate through both pipes, use the lower
3797 * viewport.y for offset
3798 * For non pipe split cases, use the same calculation for
3799 * pos_cpy.y as the 180 degree rotation case below,
3800 * but use pos_cpy.x as our input because we are rotating
3801 * 270 degrees
3802 */
3803 if (pipe_split_on || odm_combine_on) {
3804 int pos_cpy_x_offset;
3805 int other_pipe_viewport_y;
3806
3807 if (pipe_split_on) {
3808 if (pipe_ctx->bottom_pipe) {
3809 other_pipe_viewport_y =
3810 pipe_ctx->bottom_pipe->plane_res.scl_data.viewport.y;
3811 } else {
3812 other_pipe_viewport_y =
3813 pipe_ctx->top_pipe->plane_res.scl_data.viewport.y;
3814 }
3815 } else {
3816 if (pipe_ctx->next_odm_pipe) {
3817 other_pipe_viewport_y =
3818 pipe_ctx->next_odm_pipe->plane_res.scl_data.viewport.y;
3819 } else {
3820 other_pipe_viewport_y =
3821 pipe_ctx->prev_odm_pipe->plane_res.scl_data.viewport.y;
3822 }
3823 }
3824 pos_cpy_x_offset = (viewport_y > other_pipe_viewport_y) ?
3825 other_pipe_viewport_y : viewport_y;
3826 pos_cpy.x -= pos_cpy_x_offset;
3827 if (pos_cpy.x > viewport_height) {
3828 pos_cpy.x = pos_cpy.x - viewport_height;
3829 pos_cpy.y = viewport_height - pos_cpy.x;
3830 } else {
3831 pos_cpy.y = 2 * viewport_height - pos_cpy.x;
3832 }
3833 pos_cpy.y += pos_cpy_x_offset;
3834 } else {
3835 pos_cpy.y = (2 * viewport_y) + viewport_height - pos_cpy.x;
3836 }
3837 pos_cpy.x = temp_y;
3838 }
3839 // Mirror horizontally and vertically
3840 else if (param.rotation == ROTATION_ANGLE_180) {
3841 if (!param.mirror) {
3842 /*
3843 * The plane is split into multiple viewports.
3844 * The combination of all viewports span the
3845 * entirety of the clip rect.
3846 *
3847 * For no pipe_split, viewport_width is represents
3848 * the full width of the clip_rect, so we can just
3849 * mirror it.
3850 */
3851 pos_cpy.x = clip_width - pos_cpy.x + 2 * clip_x;
3852 }
3853
3854 /**
3855 * Display groups that are 1xnY, have pos_cpy.y > viewport.height
3856 * Calculation:
3857 * delta_from_bottom = viewport.y + viewport.height - pos_cpy.y
3858 * pos_cpy.y_new = viewport.y + delta_from_bottom
3859 * Simplify it as:
3860 * pos_cpy.y = viewport.y * 2 + viewport.height - pos_cpy.y
3861 */
3862 pos_cpy.y = (2 * pipe_ctx->plane_res.scl_data.viewport.y) +
3863 pipe_ctx->plane_res.scl_data.viewport.height - pos_cpy.y;
3864 }
3865
3866 hubp->funcs->set_cursor_position(hubp, &pos_cpy, ¶m);
3867 dpp->funcs->set_cursor_position(dpp, &pos_cpy, ¶m, hubp->curs_attr.width, hubp->curs_attr.height);
3868 }
3869
dcn10_set_cursor_attribute(struct pipe_ctx * pipe_ctx)3870 void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
3871 {
3872 struct dc_cursor_attributes *attributes = &pipe_ctx->stream->cursor_attributes;
3873
3874 pipe_ctx->plane_res.hubp->funcs->set_cursor_attributes(
3875 pipe_ctx->plane_res.hubp, attributes);
3876 pipe_ctx->plane_res.dpp->funcs->set_cursor_attributes(
3877 pipe_ctx->plane_res.dpp, attributes);
3878 }
3879
dcn10_set_cursor_sdr_white_level(struct pipe_ctx * pipe_ctx)3880 void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx)
3881 {
3882 uint32_t sdr_white_level = pipe_ctx->stream->cursor_attributes.sdr_white_level;
3883 struct fixed31_32 multiplier;
3884 struct dpp_cursor_attributes opt_attr = { 0 };
3885 uint32_t hw_scale = 0x3c00; // 1.0 default multiplier
3886 struct custom_float_format fmt;
3887
3888 if (!pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes)
3889 return;
3890
3891 fmt.exponenta_bits = 5;
3892 fmt.mantissa_bits = 10;
3893 fmt.sign = true;
3894
3895 if (sdr_white_level > 80) {
3896 multiplier = dc_fixpt_from_fraction(sdr_white_level, 80);
3897 convert_to_custom_float_format(multiplier, &fmt, &hw_scale);
3898 }
3899
3900 opt_attr.scale = hw_scale;
3901 opt_attr.bias = 0;
3902
3903 pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes(
3904 pipe_ctx->plane_res.dpp, &opt_attr);
3905 }
3906
3907 /*
3908 * apply_front_porch_workaround TODO FPGA still need?
3909 *
3910 * This is a workaround for a bug that has existed since R5xx and has not been
3911 * fixed keep Front porch at minimum 2 for Interlaced mode or 1 for progressive.
3912 */
apply_front_porch_workaround(struct dc_crtc_timing * timing)3913 static void apply_front_porch_workaround(
3914 struct dc_crtc_timing *timing)
3915 {
3916 if (timing->flags.INTERLACE == 1) {
3917 if (timing->v_front_porch < 2)
3918 timing->v_front_porch = 2;
3919 } else {
3920 if (timing->v_front_porch < 1)
3921 timing->v_front_porch = 1;
3922 }
3923 }
3924
dcn10_get_vupdate_offset_from_vsync(struct pipe_ctx * pipe_ctx)3925 int dcn10_get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx)
3926 {
3927 const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing;
3928 struct dc_crtc_timing patched_crtc_timing;
3929 int vesa_sync_start;
3930 int asic_blank_end;
3931 int interlace_factor;
3932
3933 patched_crtc_timing = *dc_crtc_timing;
3934 apply_front_porch_workaround(&patched_crtc_timing);
3935
3936 interlace_factor = patched_crtc_timing.flags.INTERLACE ? 2 : 1;
3937
3938 vesa_sync_start = patched_crtc_timing.v_addressable +
3939 patched_crtc_timing.v_border_bottom +
3940 patched_crtc_timing.v_front_porch;
3941
3942 asic_blank_end = (patched_crtc_timing.v_total -
3943 vesa_sync_start -
3944 patched_crtc_timing.v_border_top)
3945 * interlace_factor;
3946
3947 return asic_blank_end -
3948 pipe_ctx->pipe_dlg_param.vstartup_start + 1;
3949 }
3950
dcn10_calc_vupdate_position(struct dc * dc,struct pipe_ctx * pipe_ctx,uint32_t * start_line,uint32_t * end_line)3951 void dcn10_calc_vupdate_position(
3952 struct dc *dc,
3953 struct pipe_ctx *pipe_ctx,
3954 uint32_t *start_line,
3955 uint32_t *end_line)
3956 {
3957 const struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
3958 int vupdate_pos = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
3959
3960 if (vupdate_pos >= 0)
3961 *start_line = vupdate_pos - ((vupdate_pos / timing->v_total) * timing->v_total);
3962 else
3963 *start_line = vupdate_pos + ((-vupdate_pos / timing->v_total) + 1) * timing->v_total - 1;
3964 *end_line = (*start_line + 2) % timing->v_total;
3965 }
3966
dcn10_cal_vline_position(struct dc * dc,struct pipe_ctx * pipe_ctx,uint32_t * start_line,uint32_t * end_line)3967 static void dcn10_cal_vline_position(
3968 struct dc *dc,
3969 struct pipe_ctx *pipe_ctx,
3970 uint32_t *start_line,
3971 uint32_t *end_line)
3972 {
3973 const struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
3974 int vline_pos = pipe_ctx->stream->periodic_interrupt.lines_offset;
3975
3976 if (pipe_ctx->stream->periodic_interrupt.ref_point == START_V_UPDATE) {
3977 if (vline_pos > 0)
3978 vline_pos--;
3979 else if (vline_pos < 0)
3980 vline_pos++;
3981
3982 vline_pos += dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
3983 if (vline_pos >= 0)
3984 *start_line = vline_pos - ((vline_pos / timing->v_total) * timing->v_total);
3985 else
3986 *start_line = vline_pos + ((-vline_pos / timing->v_total) + 1) * timing->v_total - 1;
3987 *end_line = (*start_line + 2) % timing->v_total;
3988 } else if (pipe_ctx->stream->periodic_interrupt.ref_point == START_V_SYNC) {
3989 // vsync is line 0 so start_line is just the requested line offset
3990 *start_line = vline_pos;
3991 *end_line = (*start_line + 2) % timing->v_total;
3992 } else
3993 ASSERT(0);
3994 }
3995
dcn10_setup_periodic_interrupt(struct dc * dc,struct pipe_ctx * pipe_ctx)3996 void dcn10_setup_periodic_interrupt(
3997 struct dc *dc,
3998 struct pipe_ctx *pipe_ctx)
3999 {
4000 struct timing_generator *tg = pipe_ctx->stream_res.tg;
4001 uint32_t start_line = 0;
4002 uint32_t end_line = 0;
4003
4004 dcn10_cal_vline_position(dc, pipe_ctx, &start_line, &end_line);
4005
4006 tg->funcs->setup_vertical_interrupt0(tg, start_line, end_line);
4007 }
4008
dcn10_setup_vupdate_interrupt(struct dc * dc,struct pipe_ctx * pipe_ctx)4009 void dcn10_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx)
4010 {
4011 struct timing_generator *tg = pipe_ctx->stream_res.tg;
4012 int start_line = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
4013
4014 if (start_line < 0) {
4015 ASSERT(0);
4016 start_line = 0;
4017 }
4018
4019 if (tg->funcs->setup_vertical_interrupt2)
4020 tg->funcs->setup_vertical_interrupt2(tg, start_line);
4021 }
4022
dcn10_unblank_stream(struct pipe_ctx * pipe_ctx,struct dc_link_settings * link_settings)4023 void dcn10_unblank_stream(struct pipe_ctx *pipe_ctx,
4024 struct dc_link_settings *link_settings)
4025 {
4026 struct encoder_unblank_param params = {0};
4027 struct dc_stream_state *stream = pipe_ctx->stream;
4028 struct dc_link *link = stream->link;
4029 struct dce_hwseq *hws = link->dc->hwseq;
4030
4031 /* only 3 items below are used by unblank */
4032 params.timing = pipe_ctx->stream->timing;
4033
4034 params.link_settings.link_rate = link_settings->link_rate;
4035
4036 if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
4037 if (params.timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
4038 params.timing.pix_clk_100hz /= 2;
4039 pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(link, pipe_ctx->stream_res.stream_enc, ¶ms);
4040 }
4041
4042 if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
4043 hws->funcs.edp_backlight_control(link, true);
4044 }
4045 }
4046
dcn10_send_immediate_sdp_message(struct pipe_ctx * pipe_ctx,const uint8_t * custom_sdp_message,unsigned int sdp_message_size)4047 void dcn10_send_immediate_sdp_message(struct pipe_ctx *pipe_ctx,
4048 const uint8_t *custom_sdp_message,
4049 unsigned int sdp_message_size)
4050 {
4051 if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
4052 pipe_ctx->stream_res.stream_enc->funcs->send_immediate_sdp_message(
4053 pipe_ctx->stream_res.stream_enc,
4054 custom_sdp_message,
4055 sdp_message_size);
4056 }
4057 }
dcn10_set_clock(struct dc * dc,enum dc_clock_type clock_type,uint32_t clk_khz,uint32_t stepping)4058 enum dc_status dcn10_set_clock(struct dc *dc,
4059 enum dc_clock_type clock_type,
4060 uint32_t clk_khz,
4061 uint32_t stepping)
4062 {
4063 struct dc_state *context = dc->current_state;
4064 struct dc_clock_config clock_cfg = {0};
4065 struct dc_clocks *current_clocks = &context->bw_ctx.bw.dcn.clk;
4066
4067 if (!dc->clk_mgr || !dc->clk_mgr->funcs->get_clock)
4068 return DC_FAIL_UNSUPPORTED_1;
4069
4070 dc->clk_mgr->funcs->get_clock(dc->clk_mgr,
4071 context, clock_type, &clock_cfg);
4072
4073 if (clk_khz > clock_cfg.max_clock_khz)
4074 return DC_FAIL_CLK_EXCEED_MAX;
4075
4076 if (clk_khz < clock_cfg.min_clock_khz)
4077 return DC_FAIL_CLK_BELOW_MIN;
4078
4079 if (clk_khz < clock_cfg.bw_requirequired_clock_khz)
4080 return DC_FAIL_CLK_BELOW_CFG_REQUIRED;
4081
4082 /*update internal request clock for update clock use*/
4083 if (clock_type == DC_CLOCK_TYPE_DISPCLK)
4084 current_clocks->dispclk_khz = clk_khz;
4085 else if (clock_type == DC_CLOCK_TYPE_DPPCLK)
4086 current_clocks->dppclk_khz = clk_khz;
4087 else
4088 return DC_ERROR_UNEXPECTED;
4089
4090 if (dc->clk_mgr->funcs->update_clocks)
4091 dc->clk_mgr->funcs->update_clocks(dc->clk_mgr,
4092 context, true);
4093 return DC_OK;
4094
4095 }
4096
dcn10_get_clock(struct dc * dc,enum dc_clock_type clock_type,struct dc_clock_config * clock_cfg)4097 void dcn10_get_clock(struct dc *dc,
4098 enum dc_clock_type clock_type,
4099 struct dc_clock_config *clock_cfg)
4100 {
4101 struct dc_state *context = dc->current_state;
4102
4103 if (dc->clk_mgr && dc->clk_mgr->funcs->get_clock)
4104 dc->clk_mgr->funcs->get_clock(dc->clk_mgr, context, clock_type, clock_cfg);
4105
4106 }
4107
dcn10_get_dcc_en_bits(struct dc * dc,int * dcc_en_bits)4108 void dcn10_get_dcc_en_bits(struct dc *dc, int *dcc_en_bits)
4109 {
4110 struct resource_pool *pool = dc->res_pool;
4111 int i;
4112
4113 for (i = 0; i < pool->pipe_count; i++) {
4114 struct hubp *hubp = pool->hubps[i];
4115 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state);
4116
4117 hubp->funcs->hubp_read_state(hubp);
4118
4119 if (!s->blank_en)
4120 dcc_en_bits[i] = s->dcc_en ? 1 : 0;
4121 }
4122 }
4123
4124 /**
4125 * dcn10_reset_surface_dcc_and_tiling - Set DCC and tiling in DCN to their disable mode.
4126 *
4127 * @pipe_ctx: Pointer to the pipe context structure.
4128 * @plane_state: Surface state
4129 * @clear_tiling: If true set tiling to Linear, otherwise does not change tiling
4130 *
4131 * This function is responsible for call the HUBP block to disable DCC and set
4132 * tiling to the linear mode.
4133 */
dcn10_reset_surface_dcc_and_tiling(struct pipe_ctx * pipe_ctx,struct dc_plane_state * plane_state,bool clear_tiling)4134 void dcn10_reset_surface_dcc_and_tiling(struct pipe_ctx *pipe_ctx,
4135 struct dc_plane_state *plane_state,
4136 bool clear_tiling)
4137 {
4138 struct hubp *hubp = pipe_ctx->plane_res.hubp;
4139
4140 if (!hubp)
4141 return;
4142
4143 /* if framebuffer is tiled, disable tiling */
4144 if (clear_tiling && hubp->funcs->hubp_clear_tiling)
4145 hubp->funcs->hubp_clear_tiling(hubp);
4146
4147 /* force page flip to see the new content of the framebuffer */
4148 hubp->funcs->hubp_program_surface_flip_and_addr(hubp,
4149 &plane_state->address,
4150 true);
4151 }
4152