xref: /linux/drivers/gpu/drm/amd/display/dc/pg/dcn35/dcn35_pg_cntl.c (revision 965c995c9a4b395471ff48790a0155ee986ca405)
1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright 2023 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  *
23  * Authors: AMD
24  *
25  */
26 
27 #include "reg_helper.h"
28 #include "core_types.h"
29 #include "dcn35_pg_cntl.h"
30 #include "dccg.h"
31 
32 #define TO_DCN_PG_CNTL(pg_cntl)\
33 	container_of(pg_cntl, struct dcn_pg_cntl, base)
34 
35 #define REG(reg) \
36 	(pg_cntl_dcn->regs->reg)
37 
38 #undef FN
39 #define FN(reg_name, field_name) \
40 	pg_cntl_dcn->pg_cntl_shift->field_name, pg_cntl_dcn->pg_cntl_mask->field_name
41 
42 #define CTX \
43 	pg_cntl_dcn->base.ctx
44 #define DC_LOGGER \
45 	pg_cntl->ctx->logger
46 
pg_cntl35_dsc_pg_status(struct pg_cntl * pg_cntl,unsigned int dsc_inst)47 static bool pg_cntl35_dsc_pg_status(struct pg_cntl *pg_cntl, unsigned int dsc_inst)
48 {
49 	struct dcn_pg_cntl *pg_cntl_dcn = TO_DCN_PG_CNTL(pg_cntl);
50 	uint32_t pwr_status = 0;
51 
52 	if (pg_cntl->ctx->dc->debug.ignore_pg)
53 		return true;
54 
55 	switch (dsc_inst) {
56 	case 0: /* DSC0 */
57 		REG_GET(DOMAIN16_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, &pwr_status);
58 		break;
59 	case 1: /* DSC1 */
60 		REG_GET(DOMAIN17_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, &pwr_status);
61 		break;
62 	case 2: /* DSC2 */
63 		REG_GET(DOMAIN18_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, &pwr_status);
64 		break;
65 	case 3: /* DSC3 */
66 		REG_GET(DOMAIN19_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, &pwr_status);
67 		break;
68 	default:
69 		BREAK_TO_DEBUGGER();
70 		break;
71 	}
72 
73 	return pwr_status == 0;
74 }
75 
pg_cntl35_dsc_pg_control(struct pg_cntl * pg_cntl,unsigned int dsc_inst,bool power_on)76 void pg_cntl35_dsc_pg_control(struct pg_cntl *pg_cntl, unsigned int dsc_inst, bool power_on)
77 {
78 	struct dcn_pg_cntl *pg_cntl_dcn = TO_DCN_PG_CNTL(pg_cntl);
79 	uint32_t power_gate = power_on ? 0 : 1;
80 	uint32_t pwr_status = power_on ? 0 : 2;
81 	uint32_t org_ip_request_cntl = 0;
82 	bool block_enabled = false;
83 	bool skip_pg = pg_cntl->ctx->dc->debug.ignore_pg ||
84 		       pg_cntl->ctx->dc->debug.disable_dsc_power_gate ||
85 		       pg_cntl->ctx->dc->idle_optimizations_allowed;
86 
87 	if (skip_pg && !power_on)
88 		return;
89 
90 	block_enabled = pg_cntl35_dsc_pg_status(pg_cntl, dsc_inst);
91 	if (power_on) {
92 		if (block_enabled)
93 			return;
94 	} else {
95 		if (!block_enabled)
96 			return;
97 	}
98 
99 	REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
100 	if (org_ip_request_cntl == 0)
101 		REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1);
102 
103 	switch (dsc_inst) {
104 	case 0: /* DSC0 */
105 		REG_UPDATE(DOMAIN16_PG_CONFIG,
106 				DOMAIN_POWER_GATE, power_gate);
107 
108 		REG_WAIT(DOMAIN16_PG_STATUS,
109 				DOMAIN_PGFSM_PWR_STATUS, pwr_status,
110 				1, 10000);
111 		break;
112 	case 1: /* DSC1 */
113 		REG_UPDATE(DOMAIN17_PG_CONFIG,
114 				DOMAIN_POWER_GATE, power_gate);
115 
116 		REG_WAIT(DOMAIN17_PG_STATUS,
117 				DOMAIN_PGFSM_PWR_STATUS, pwr_status,
118 				1, 10000);
119 		break;
120 	case 2: /* DSC2 */
121 		REG_UPDATE(DOMAIN18_PG_CONFIG,
122 				DOMAIN_POWER_GATE, power_gate);
123 
124 		REG_WAIT(DOMAIN18_PG_STATUS,
125 				DOMAIN_PGFSM_PWR_STATUS, pwr_status,
126 				1, 10000);
127 		break;
128 	case 3: /* DSC3 */
129 		REG_UPDATE(DOMAIN19_PG_CONFIG,
130 				DOMAIN_POWER_GATE, power_gate);
131 
132 		REG_WAIT(DOMAIN19_PG_STATUS,
133 				DOMAIN_PGFSM_PWR_STATUS, pwr_status,
134 				1, 10000);
135 		break;
136 	default:
137 		BREAK_TO_DEBUGGER();
138 		break;
139 	}
140 
141 	if (dsc_inst < MAX_PIPES)
142 		pg_cntl->pg_pipe_res_enable[PG_DSC][dsc_inst] = power_on;
143 }
144 
pg_cntl35_hubp_dpp_pg_status(struct pg_cntl * pg_cntl,unsigned int hubp_dpp_inst)145 static bool pg_cntl35_hubp_dpp_pg_status(struct pg_cntl *pg_cntl, unsigned int hubp_dpp_inst)
146 {
147 	struct dcn_pg_cntl *pg_cntl_dcn = TO_DCN_PG_CNTL(pg_cntl);
148 	uint32_t pwr_status = 0;
149 
150 	switch (hubp_dpp_inst) {
151 	case 0:
152 		/* DPP0 & HUBP0 */
153 		REG_GET(DOMAIN0_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, &pwr_status);
154 		break;
155 	case 1:
156 		/* DPP1 & HUBP1 */
157 		REG_GET(DOMAIN1_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, &pwr_status);
158 		break;
159 	case 2:
160 		/* DPP2 & HUBP2 */
161 		REG_GET(DOMAIN2_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, &pwr_status);
162 		break;
163 	case 3:
164 		/* DPP3 & HUBP3 */
165 		REG_GET(DOMAIN3_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, &pwr_status);
166 		break;
167 	default:
168 		BREAK_TO_DEBUGGER();
169 		break;
170 	}
171 
172 	return pwr_status == 0;
173 }
174 
pg_cntl35_hubp_dpp_pg_control(struct pg_cntl * pg_cntl,unsigned int hubp_dpp_inst,bool power_on)175 void pg_cntl35_hubp_dpp_pg_control(struct pg_cntl *pg_cntl, unsigned int hubp_dpp_inst, bool power_on)
176 {
177 	struct dcn_pg_cntl *pg_cntl_dcn = TO_DCN_PG_CNTL(pg_cntl);
178 	uint32_t power_gate = power_on ? 0 : 1;
179 	uint32_t pwr_status = power_on ? 0 : 2;
180 	uint32_t org_ip_request_cntl;
181 	bool block_enabled;
182 	bool skip_pg = pg_cntl->ctx->dc->debug.ignore_pg ||
183 		       pg_cntl->ctx->dc->debug.disable_hubp_power_gate ||
184 		       pg_cntl->ctx->dc->debug.disable_dpp_power_gate ||
185 		       pg_cntl->ctx->dc->idle_optimizations_allowed;
186 
187 	if (skip_pg && !power_on)
188 		return;
189 
190 	block_enabled = pg_cntl35_hubp_dpp_pg_status(pg_cntl, hubp_dpp_inst);
191 	if (power_on) {
192 		if (block_enabled)
193 			return;
194 	} else {
195 		if (!block_enabled)
196 			return;
197 	}
198 
199 	REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
200 	if (org_ip_request_cntl == 0)
201 		REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1);
202 
203 	switch (hubp_dpp_inst) {
204 	case 0:
205 		/* DPP0 & HUBP0 */
206 		REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN_POWER_GATE, power_gate);
207 		REG_WAIT(DOMAIN0_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, pwr_status, 1, 10000);
208 		break;
209 	case 1:
210 		/* DPP1 & HUBP1 */
211 		REG_UPDATE(DOMAIN1_PG_CONFIG, DOMAIN_POWER_GATE, power_gate);
212 		REG_WAIT(DOMAIN1_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, pwr_status, 1, 10000);
213 		break;
214 	case 2:
215 		/* DPP2 & HUBP2 */
216 		REG_UPDATE(DOMAIN2_PG_CONFIG, DOMAIN_POWER_GATE, power_gate);
217 		REG_WAIT(DOMAIN2_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, pwr_status, 1, 10000);
218 		break;
219 	case 3:
220 		/* DPP3 & HUBP3 */
221 		REG_UPDATE(DOMAIN3_PG_CONFIG, DOMAIN_POWER_GATE, power_gate);
222 		REG_WAIT(DOMAIN3_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, pwr_status, 1, 10000);
223 		break;
224 	default:
225 		BREAK_TO_DEBUGGER();
226 		break;
227 	}
228 
229 	DC_LOG_DEBUG("HUBP DPP instance %d, power %s", hubp_dpp_inst,
230 		power_on ? "ON" : "OFF");
231 
232 	if (hubp_dpp_inst < MAX_PIPES) {
233 		pg_cntl->pg_pipe_res_enable[PG_HUBP][hubp_dpp_inst] = power_on;
234 		pg_cntl->pg_pipe_res_enable[PG_DPP][hubp_dpp_inst] = power_on;
235 	}
236 }
237 
pg_cntl35_hpo_pg_status(struct pg_cntl * pg_cntl)238 static bool pg_cntl35_hpo_pg_status(struct pg_cntl *pg_cntl)
239 {
240 	struct dcn_pg_cntl *pg_cntl_dcn = TO_DCN_PG_CNTL(pg_cntl);
241 	uint32_t pwr_status = 0;
242 
243 	REG_GET(DOMAIN25_PG_STATUS,
244 			DOMAIN_PGFSM_PWR_STATUS, &pwr_status);
245 
246 	return pwr_status == 0;
247 }
248 
pg_cntl35_hpo_pg_control(struct pg_cntl * pg_cntl,bool power_on)249 void pg_cntl35_hpo_pg_control(struct pg_cntl *pg_cntl, bool power_on)
250 {
251 	struct dcn_pg_cntl *pg_cntl_dcn = TO_DCN_PG_CNTL(pg_cntl);
252 	uint32_t power_gate = power_on ? 0 : 1;
253 	uint32_t pwr_status = power_on ? 0 : 2;
254 	uint32_t org_ip_request_cntl;
255 	uint32_t power_forceon;
256 	bool block_enabled;
257 
258 	if (pg_cntl->ctx->dc->debug.ignore_pg ||
259 		pg_cntl->ctx->dc->debug.disable_hpo_power_gate ||
260 		pg_cntl->ctx->dc->idle_optimizations_allowed)
261 		return;
262 
263 	block_enabled = pg_cntl35_hpo_pg_status(pg_cntl);
264 	if (power_on) {
265 		if (block_enabled)
266 			return;
267 	} else {
268 		if (!block_enabled)
269 			return;
270 	}
271 
272 	REG_GET(DOMAIN25_PG_CONFIG, DOMAIN_POWER_FORCEON, &power_forceon);
273 	if (power_forceon)
274 		return;
275 
276 	REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
277 	if (org_ip_request_cntl == 0)
278 		REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1);
279 
280 	REG_UPDATE(DOMAIN25_PG_CONFIG, DOMAIN_POWER_GATE, power_gate);
281 	REG_WAIT(DOMAIN25_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, pwr_status, 1, 1000);
282 
283 	pg_cntl->pg_res_enable[PG_HPO] = power_on;
284 }
285 
pg_cntl35_io_clk_status(struct pg_cntl * pg_cntl)286 static bool pg_cntl35_io_clk_status(struct pg_cntl *pg_cntl)
287 {
288 	struct dcn_pg_cntl *pg_cntl_dcn = TO_DCN_PG_CNTL(pg_cntl);
289 	uint32_t pwr_status = 0;
290 
291 	REG_GET(DOMAIN22_PG_STATUS,
292 		DOMAIN_PGFSM_PWR_STATUS, &pwr_status);
293 
294 	return pwr_status == 0;
295 }
296 
pg_cntl35_io_clk_pg_control(struct pg_cntl * pg_cntl,bool power_on)297 void pg_cntl35_io_clk_pg_control(struct pg_cntl *pg_cntl, bool power_on)
298 {
299 	struct dcn_pg_cntl *pg_cntl_dcn = TO_DCN_PG_CNTL(pg_cntl);
300 	uint32_t power_gate = power_on ? 0 : 1;
301 	uint32_t pwr_status = power_on ? 0 : 2;
302 	uint32_t org_ip_request_cntl;
303 	uint32_t power_forceon;
304 	bool block_enabled;
305 
306 	if (pg_cntl->ctx->dc->debug.ignore_pg ||
307 		pg_cntl->ctx->dc->idle_optimizations_allowed)
308 		return;
309 
310 	block_enabled = pg_cntl35_io_clk_status(pg_cntl);
311 	if (power_on) {
312 		if (block_enabled)
313 			return;
314 	} else {
315 		if (!block_enabled)
316 			return;
317 	}
318 
319 	REG_GET(DOMAIN22_PG_CONFIG, DOMAIN_POWER_FORCEON, &power_forceon);
320 	if (power_forceon)
321 		return;
322 
323 	REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
324 	if (org_ip_request_cntl == 0)
325 		REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1);
326 
327 	/* DCCG, DIO, DCIO */
328 	REG_UPDATE(DOMAIN22_PG_CONFIG, DOMAIN_POWER_GATE, power_gate);
329 	REG_WAIT(DOMAIN22_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, pwr_status, 1, 1000);
330 
331 	pg_cntl->pg_res_enable[PG_DCCG] = power_on;
332 	pg_cntl->pg_res_enable[PG_DIO] = power_on;
333 	pg_cntl->pg_res_enable[PG_DCIO] = power_on;
334 }
335 
pg_cntl35_plane_otg_status(struct pg_cntl * pg_cntl)336 static bool pg_cntl35_plane_otg_status(struct pg_cntl *pg_cntl)
337 {
338 	struct dcn_pg_cntl *pg_cntl_dcn = TO_DCN_PG_CNTL(pg_cntl);
339 	uint32_t pwr_status = 0;
340 
341 	REG_GET(DOMAIN24_PG_STATUS,
342 		DOMAIN_PGFSM_PWR_STATUS, &pwr_status);
343 
344 	return pwr_status == 0;
345 }
346 
pg_cntl35_mpcc_pg_control(struct pg_cntl * pg_cntl,unsigned int mpcc_inst,bool power_on)347 void pg_cntl35_mpcc_pg_control(struct pg_cntl *pg_cntl,
348 	unsigned int mpcc_inst, bool power_on)
349 {
350 	if (pg_cntl->ctx->dc->idle_optimizations_allowed)
351 		return;
352 
353 	if (mpcc_inst < MAX_PIPES)
354 		pg_cntl->pg_pipe_res_enable[PG_MPCC][mpcc_inst] = power_on;
355 }
356 
pg_cntl35_opp_pg_control(struct pg_cntl * pg_cntl,unsigned int opp_inst,bool power_on)357 void pg_cntl35_opp_pg_control(struct pg_cntl *pg_cntl,
358 	unsigned int opp_inst, bool power_on)
359 {
360 	if (pg_cntl->ctx->dc->idle_optimizations_allowed)
361 		return;
362 
363 	if (opp_inst < MAX_PIPES)
364 		pg_cntl->pg_pipe_res_enable[PG_OPP][opp_inst] = power_on;
365 }
366 
pg_cntl35_optc_pg_control(struct pg_cntl * pg_cntl,unsigned int optc_inst,bool power_on)367 void pg_cntl35_optc_pg_control(struct pg_cntl *pg_cntl,
368 	unsigned int optc_inst, bool power_on)
369 {
370 	if (pg_cntl->ctx->dc->idle_optimizations_allowed)
371 		return;
372 
373 	if (optc_inst < MAX_PIPES)
374 		pg_cntl->pg_pipe_res_enable[PG_OPTC][optc_inst] = power_on;
375 }
376 
pg_cntl35_plane_otg_pg_control(struct pg_cntl * pg_cntl,bool power_on)377 void pg_cntl35_plane_otg_pg_control(struct pg_cntl *pg_cntl, bool power_on)
378 {
379 	struct dcn_pg_cntl *pg_cntl_dcn = TO_DCN_PG_CNTL(pg_cntl);
380 	uint32_t power_gate = power_on ? 0 : 1;
381 	uint32_t pwr_status = power_on ? 0 : 2;
382 	uint32_t org_ip_request_cntl;
383 	int i;
384 	bool block_enabled;
385 	bool all_mpcc_disabled = true, all_opp_disabled = true;
386 	bool all_optc_disabled = true, all_stream_disabled = true;
387 
388 	if (pg_cntl->ctx->dc->debug.ignore_pg ||
389 		pg_cntl->ctx->dc->debug.disable_optc_power_gate ||
390 		pg_cntl->ctx->dc->idle_optimizations_allowed)
391 		return;
392 
393 	block_enabled = pg_cntl35_plane_otg_status(pg_cntl);
394 	if (power_on) {
395 		if (block_enabled)
396 			return;
397 	} else {
398 		if (!block_enabled)
399 			return;
400 	}
401 
402 	for (i = 0; i < pg_cntl->ctx->dc->res_pool->pipe_count; i++) {
403 		struct pipe_ctx *pipe_ctx = &pg_cntl->ctx->dc->current_state->res_ctx.pipe_ctx[i];
404 
405 		if (pipe_ctx) {
406 			if (pipe_ctx->stream)
407 				all_stream_disabled = false;
408 		}
409 
410 		if (pg_cntl->pg_pipe_res_enable[PG_MPCC][i])
411 			all_mpcc_disabled = false;
412 
413 		if (pg_cntl->pg_pipe_res_enable[PG_OPP][i])
414 			all_opp_disabled = false;
415 
416 		if (pg_cntl->pg_pipe_res_enable[PG_OPTC][i])
417 			all_optc_disabled = false;
418 	}
419 
420 	if (!power_on) {
421 		if (!all_mpcc_disabled || !all_opp_disabled || !all_optc_disabled
422 			|| !all_stream_disabled || pg_cntl->pg_res_enable[PG_DWB])
423 			return;
424 	}
425 
426 	REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
427 	if (org_ip_request_cntl == 0)
428 		REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1);
429 
430 	/* MPC, OPP, OPTC, DWB */
431 	REG_UPDATE(DOMAIN24_PG_CONFIG, DOMAIN_POWER_GATE, power_gate);
432 	REG_WAIT(DOMAIN24_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, pwr_status, 1, 1000);
433 
434 	for (i = 0; i < pg_cntl->ctx->dc->res_pool->pipe_count; i++) {
435 		pg_cntl->pg_pipe_res_enable[PG_MPCC][i] = power_on;
436 		pg_cntl->pg_pipe_res_enable[PG_OPP][i] = power_on;
437 		pg_cntl->pg_pipe_res_enable[PG_OPTC][i] = power_on;
438 	}
439 	pg_cntl->pg_res_enable[PG_DWB] = power_on;
440 }
441 
pg_cntl35_dwb_pg_control(struct pg_cntl * pg_cntl,bool power_on)442 void pg_cntl35_dwb_pg_control(struct pg_cntl *pg_cntl, bool power_on)
443 {
444 	if (pg_cntl->ctx->dc->idle_optimizations_allowed)
445 		return;
446 
447 	pg_cntl->pg_res_enable[PG_DWB] = power_on;
448 }
449 
pg_cntl35_mem_status(struct pg_cntl * pg_cntl)450 static bool pg_cntl35_mem_status(struct pg_cntl *pg_cntl)
451 {
452 	struct dcn_pg_cntl *pg_cntl_dcn = TO_DCN_PG_CNTL(pg_cntl);
453 	uint32_t pwr_status = 0;
454 
455 	REG_GET(DOMAIN23_PG_STATUS,
456 		DOMAIN_PGFSM_PWR_STATUS, &pwr_status);
457 
458 	return pwr_status == 0;
459 }
460 
pg_cntl35_init_pg_status(struct pg_cntl * pg_cntl)461 void pg_cntl35_init_pg_status(struct pg_cntl *pg_cntl)
462 {
463 	int i = 0;
464 	bool block_enabled;
465 
466 	pg_cntl->pg_res_enable[PG_HPO] = pg_cntl35_hpo_pg_status(pg_cntl);
467 
468 	block_enabled = pg_cntl35_io_clk_status(pg_cntl);
469 	pg_cntl->pg_res_enable[PG_DCCG] = block_enabled;
470 	pg_cntl->pg_res_enable[PG_DIO] = block_enabled;
471 	pg_cntl->pg_res_enable[PG_DCIO] = block_enabled;
472 
473 	block_enabled = pg_cntl35_mem_status(pg_cntl);
474 	pg_cntl->pg_res_enable[PG_DCHUBBUB] = block_enabled;
475 	pg_cntl->pg_res_enable[PG_DCHVM] = block_enabled;
476 
477 	for (i = 0; i < pg_cntl->ctx->dc->res_pool->pipe_count; i++) {
478 		block_enabled = pg_cntl35_hubp_dpp_pg_status(pg_cntl, i);
479 		pg_cntl->pg_pipe_res_enable[PG_HUBP][i] = block_enabled;
480 		pg_cntl->pg_pipe_res_enable[PG_DPP][i] = block_enabled;
481 
482 		block_enabled = pg_cntl35_dsc_pg_status(pg_cntl, i);
483 		pg_cntl->pg_pipe_res_enable[PG_DSC][i] = block_enabled;
484 	}
485 
486 	block_enabled = pg_cntl35_plane_otg_status(pg_cntl);
487 	for (i = 0; i < pg_cntl->ctx->dc->res_pool->pipe_count; i++) {
488 		pg_cntl->pg_pipe_res_enable[PG_MPCC][i] = block_enabled;
489 		pg_cntl->pg_pipe_res_enable[PG_OPP][i] = block_enabled;
490 		pg_cntl->pg_pipe_res_enable[PG_OPTC][i] = block_enabled;
491 	}
492 	pg_cntl->pg_res_enable[PG_DWB] = block_enabled;
493 }
494 
pg_cntl35_print_pg_status(struct pg_cntl * pg_cntl,const char * debug_func,const char * debug_log)495 static void pg_cntl35_print_pg_status(struct pg_cntl *pg_cntl, const char *debug_func, const char *debug_log)
496 {
497 	int i = 0;
498 	bool block_enabled = false;
499 
500 	DC_LOG_DEBUG("%s: %s", debug_func, debug_log);
501 
502 	DC_LOG_DEBUG("PG_CNTL status:\n");
503 
504 	block_enabled = pg_cntl35_io_clk_status(pg_cntl);
505 	DC_LOG_DEBUG("ONO0=%d (DCCG, DIO, DCIO)\n", block_enabled ? 1 : 0);
506 
507 	block_enabled = pg_cntl35_mem_status(pg_cntl);
508 	DC_LOG_DEBUG("ONO1=%d (DCHUBBUB, DCHVM, DCHUBBUBMEM)\n", block_enabled ? 1 : 0);
509 
510 	block_enabled = pg_cntl35_plane_otg_status(pg_cntl);
511 	DC_LOG_DEBUG("ONO2=%d (MPC, OPP, OPTC, DWB)\n", block_enabled ? 1 : 0);
512 
513 	block_enabled = pg_cntl35_hpo_pg_status(pg_cntl);
514 	DC_LOG_DEBUG("ONO3=%d (HPO)\n", block_enabled ? 1 : 0);
515 
516 	for (i = 0; i < pg_cntl->ctx->dc->res_pool->pipe_count; i++) {
517 		block_enabled = pg_cntl35_hubp_dpp_pg_status(pg_cntl, i);
518 		DC_LOG_DEBUG("ONO%d=%d (DCHUBP%d, DPP%d)\n", 4 + i * 2, block_enabled ? 1 : 0, i, i);
519 
520 		block_enabled = pg_cntl35_dsc_pg_status(pg_cntl, i);
521 		DC_LOG_DEBUG("ONO%d=%d (DSC%d)\n", 5 + i * 2, block_enabled ? 1 : 0, i);
522 	}
523 }
524 
525 static const struct pg_cntl_funcs pg_cntl35_funcs = {
526 	.init_pg_status = pg_cntl35_init_pg_status,
527 	.dsc_pg_control = pg_cntl35_dsc_pg_control,
528 	.hubp_dpp_pg_control = pg_cntl35_hubp_dpp_pg_control,
529 	.hpo_pg_control = pg_cntl35_hpo_pg_control,
530 	.io_clk_pg_control = pg_cntl35_io_clk_pg_control,
531 	.plane_otg_pg_control = pg_cntl35_plane_otg_pg_control,
532 	.mpcc_pg_control = pg_cntl35_mpcc_pg_control,
533 	.opp_pg_control = pg_cntl35_opp_pg_control,
534 	.optc_pg_control = pg_cntl35_optc_pg_control,
535 	.dwb_pg_control = pg_cntl35_dwb_pg_control,
536 	.print_pg_status = pg_cntl35_print_pg_status
537 };
538 
pg_cntl35_create(struct dc_context * ctx,const struct pg_cntl_registers * regs,const struct pg_cntl_shift * pg_cntl_shift,const struct pg_cntl_mask * pg_cntl_mask)539 struct pg_cntl *pg_cntl35_create(
540 	struct dc_context *ctx,
541 	const struct pg_cntl_registers *regs,
542 	const struct pg_cntl_shift *pg_cntl_shift,
543 	const struct pg_cntl_mask *pg_cntl_mask)
544 {
545 	struct dcn_pg_cntl *pg_cntl_dcn = kzalloc(sizeof(*pg_cntl_dcn), GFP_KERNEL);
546 	struct pg_cntl *base;
547 
548 	if (pg_cntl_dcn == NULL) {
549 		BREAK_TO_DEBUGGER();
550 		return NULL;
551 	}
552 
553 	base = &pg_cntl_dcn->base;
554 	base->ctx = ctx;
555 	base->funcs = &pg_cntl35_funcs;
556 
557 	pg_cntl_dcn->regs = regs;
558 	pg_cntl_dcn->pg_cntl_shift = pg_cntl_shift;
559 	pg_cntl_dcn->pg_cntl_mask = pg_cntl_mask;
560 
561 	memset(base->pg_pipe_res_enable, 0, PG_HW_PIPE_RESOURCES_NUM_ELEMENT * MAX_PIPES * sizeof(bool));
562 	memset(base->pg_res_enable, 0, PG_HW_RESOURCES_NUM_ELEMENT * sizeof(bool));
563 
564 	return &pg_cntl_dcn->base;
565 }
566 
dcn_pg_cntl_destroy(struct pg_cntl ** pg_cntl)567 void dcn_pg_cntl_destroy(struct pg_cntl **pg_cntl)
568 {
569 	struct dcn_pg_cntl *pg_cntl_dcn = TO_DCN_PG_CNTL(*pg_cntl);
570 
571 	kfree(pg_cntl_dcn);
572 	*pg_cntl = NULL;
573 }
574