xref: /linux/drivers/gpu/drm/i915/display/intel_pmdemand.c (revision 2c1ed907520c50326b8f604907a8478b27881a2e)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2023 Intel Corporation
4  */
5 
6 #include <linux/bitops.h>
7 
8 #include "i915_reg.h"
9 #include "i915_utils.h"
10 #include "intel_atomic.h"
11 #include "intel_bw.h"
12 #include "intel_cdclk.h"
13 #include "intel_de.h"
14 #include "intel_display_trace.h"
15 #include "intel_pmdemand.h"
16 #include "intel_step.h"
17 #include "skl_watermark.h"
18 
19 struct pmdemand_params {
20 	u16 qclk_gv_bw;
21 	u8 voltage_index;
22 	u8 qclk_gv_index;
23 	u8 active_pipes;
24 	u8 active_dbufs;	/* pre-Xe3 only */
25 	/* Total number of non type C active phys from active_phys_mask */
26 	u8 active_phys;
27 	u8 plls;
28 	u16 cdclk_freq_mhz;
29 	/* max from ddi_clocks[] */
30 	u16 ddiclk_max;
31 	u8 scalers;		/* pre-Xe3 only */
32 };
33 
34 struct intel_pmdemand_state {
35 	struct intel_global_state base;
36 
37 	/* Maintain a persistent list of port clocks across all crtcs */
38 	int ddi_clocks[I915_MAX_PIPES];
39 
40 	/* Maintain a persistent list of non type C phys mask */
41 	u16 active_combo_phys_mask;
42 
43 	/* Parameters to be configured in the pmdemand registers */
44 	struct pmdemand_params params;
45 };
46 
to_intel_pmdemand_state(struct intel_global_state * obj_state)47 struct intel_pmdemand_state *to_intel_pmdemand_state(struct intel_global_state *obj_state)
48 {
49 	return container_of(obj_state, struct intel_pmdemand_state, base);
50 }
51 
52 static struct intel_global_state *
intel_pmdemand_duplicate_state(struct intel_global_obj * obj)53 intel_pmdemand_duplicate_state(struct intel_global_obj *obj)
54 {
55 	struct intel_pmdemand_state *pmdemand_state;
56 
57 	pmdemand_state = kmemdup(obj->state, sizeof(*pmdemand_state), GFP_KERNEL);
58 	if (!pmdemand_state)
59 		return NULL;
60 
61 	return &pmdemand_state->base;
62 }
63 
intel_pmdemand_destroy_state(struct intel_global_obj * obj,struct intel_global_state * state)64 static void intel_pmdemand_destroy_state(struct intel_global_obj *obj,
65 					 struct intel_global_state *state)
66 {
67 	kfree(state);
68 }
69 
70 static const struct intel_global_state_funcs intel_pmdemand_funcs = {
71 	.atomic_duplicate_state = intel_pmdemand_duplicate_state,
72 	.atomic_destroy_state = intel_pmdemand_destroy_state,
73 };
74 
75 static struct intel_pmdemand_state *
intel_atomic_get_pmdemand_state(struct intel_atomic_state * state)76 intel_atomic_get_pmdemand_state(struct intel_atomic_state *state)
77 {
78 	struct intel_display *display = to_intel_display(state);
79 	struct intel_global_state *pmdemand_state =
80 		intel_atomic_get_global_obj_state(state,
81 						  &display->pmdemand.obj);
82 
83 	if (IS_ERR(pmdemand_state))
84 		return ERR_CAST(pmdemand_state);
85 
86 	return to_intel_pmdemand_state(pmdemand_state);
87 }
88 
89 static struct intel_pmdemand_state *
intel_atomic_get_old_pmdemand_state(struct intel_atomic_state * state)90 intel_atomic_get_old_pmdemand_state(struct intel_atomic_state *state)
91 {
92 	struct intel_display *display = to_intel_display(state);
93 	struct intel_global_state *pmdemand_state =
94 		intel_atomic_get_old_global_obj_state(state,
95 						      &display->pmdemand.obj);
96 
97 	if (!pmdemand_state)
98 		return NULL;
99 
100 	return to_intel_pmdemand_state(pmdemand_state);
101 }
102 
103 static struct intel_pmdemand_state *
intel_atomic_get_new_pmdemand_state(struct intel_atomic_state * state)104 intel_atomic_get_new_pmdemand_state(struct intel_atomic_state *state)
105 {
106 	struct intel_display *display = to_intel_display(state);
107 	struct intel_global_state *pmdemand_state =
108 		intel_atomic_get_new_global_obj_state(state,
109 						      &display->pmdemand.obj);
110 
111 	if (!pmdemand_state)
112 		return NULL;
113 
114 	return to_intel_pmdemand_state(pmdemand_state);
115 }
116 
intel_pmdemand_init(struct intel_display * display)117 int intel_pmdemand_init(struct intel_display *display)
118 {
119 	struct intel_pmdemand_state *pmdemand_state;
120 
121 	pmdemand_state = kzalloc(sizeof(*pmdemand_state), GFP_KERNEL);
122 	if (!pmdemand_state)
123 		return -ENOMEM;
124 
125 	intel_atomic_global_obj_init(display, &display->pmdemand.obj,
126 				     &pmdemand_state->base,
127 				     &intel_pmdemand_funcs);
128 
129 	if (IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_C0))
130 		/* Wa_14016740474 */
131 		intel_de_rmw(display, XELPD_CHICKEN_DCPR_3, 0, DMD_RSP_TIMEOUT_DISABLE);
132 
133 	return 0;
134 }
135 
intel_pmdemand_init_early(struct intel_display * display)136 void intel_pmdemand_init_early(struct intel_display *display)
137 {
138 	mutex_init(&display->pmdemand.lock);
139 	init_waitqueue_head(&display->pmdemand.waitqueue);
140 }
141 
142 void
intel_pmdemand_update_phys_mask(struct intel_display * display,struct intel_encoder * encoder,struct intel_pmdemand_state * pmdemand_state,bool set_bit)143 intel_pmdemand_update_phys_mask(struct intel_display *display,
144 				struct intel_encoder *encoder,
145 				struct intel_pmdemand_state *pmdemand_state,
146 				bool set_bit)
147 {
148 	enum phy phy;
149 
150 	if (DISPLAY_VER(display) < 14)
151 		return;
152 
153 	if (!encoder)
154 		return;
155 
156 	if (intel_encoder_is_tc(encoder))
157 		return;
158 
159 	phy = intel_encoder_to_phy(encoder);
160 
161 	if (set_bit)
162 		pmdemand_state->active_combo_phys_mask |= BIT(phy);
163 	else
164 		pmdemand_state->active_combo_phys_mask &= ~BIT(phy);
165 }
166 
167 void
intel_pmdemand_update_port_clock(struct intel_display * display,struct intel_pmdemand_state * pmdemand_state,enum pipe pipe,int port_clock)168 intel_pmdemand_update_port_clock(struct intel_display *display,
169 				 struct intel_pmdemand_state *pmdemand_state,
170 				 enum pipe pipe, int port_clock)
171 {
172 	if (DISPLAY_VER(display) < 14)
173 		return;
174 
175 	pmdemand_state->ddi_clocks[pipe] = port_clock;
176 }
177 
178 static void
intel_pmdemand_update_max_ddiclk(struct intel_display * display,struct intel_atomic_state * state,struct intel_pmdemand_state * pmdemand_state)179 intel_pmdemand_update_max_ddiclk(struct intel_display *display,
180 				 struct intel_atomic_state *state,
181 				 struct intel_pmdemand_state *pmdemand_state)
182 {
183 	int max_ddiclk = 0;
184 	const struct intel_crtc_state *new_crtc_state;
185 	struct intel_crtc *crtc;
186 	int i;
187 
188 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
189 		intel_pmdemand_update_port_clock(display, pmdemand_state,
190 						 crtc->pipe,
191 						 new_crtc_state->port_clock);
192 
193 	for (i = 0; i < ARRAY_SIZE(pmdemand_state->ddi_clocks); i++)
194 		max_ddiclk = max(pmdemand_state->ddi_clocks[i], max_ddiclk);
195 
196 	pmdemand_state->params.ddiclk_max = DIV_ROUND_UP(max_ddiclk, 1000);
197 }
198 
199 static void
intel_pmdemand_update_connector_phys(struct intel_display * display,struct intel_atomic_state * state,struct drm_connector_state * conn_state,bool set_bit,struct intel_pmdemand_state * pmdemand_state)200 intel_pmdemand_update_connector_phys(struct intel_display *display,
201 				     struct intel_atomic_state *state,
202 				     struct drm_connector_state *conn_state,
203 				     bool set_bit,
204 				     struct intel_pmdemand_state *pmdemand_state)
205 {
206 	struct intel_encoder *encoder = to_intel_encoder(conn_state->best_encoder);
207 	struct intel_crtc *crtc = to_intel_crtc(conn_state->crtc);
208 	struct intel_crtc_state *crtc_state;
209 
210 	if (!crtc)
211 		return;
212 
213 	if (set_bit)
214 		crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
215 	else
216 		crtc_state = intel_atomic_get_old_crtc_state(state, crtc);
217 
218 	if (!crtc_state->hw.active)
219 		return;
220 
221 	intel_pmdemand_update_phys_mask(display, encoder, pmdemand_state,
222 					set_bit);
223 }
224 
225 static void
intel_pmdemand_update_active_non_tc_phys(struct intel_display * display,struct intel_atomic_state * state,struct intel_pmdemand_state * pmdemand_state)226 intel_pmdemand_update_active_non_tc_phys(struct intel_display *display,
227 					 struct intel_atomic_state *state,
228 					 struct intel_pmdemand_state *pmdemand_state)
229 {
230 	struct drm_connector_state *old_conn_state;
231 	struct drm_connector_state *new_conn_state;
232 	struct drm_connector *connector;
233 	int i;
234 
235 	for_each_oldnew_connector_in_state(&state->base, connector,
236 					   old_conn_state, new_conn_state, i) {
237 		if (!intel_connector_needs_modeset(state, connector))
238 			continue;
239 
240 		/* First clear the active phys in the old connector state */
241 		intel_pmdemand_update_connector_phys(display, state,
242 						     old_conn_state, false,
243 						     pmdemand_state);
244 
245 		/* Then set the active phys in new connector state */
246 		intel_pmdemand_update_connector_phys(display, state,
247 						     new_conn_state, true,
248 						     pmdemand_state);
249 	}
250 
251 	pmdemand_state->params.active_phys =
252 		min_t(u16, hweight16(pmdemand_state->active_combo_phys_mask),
253 		      7);
254 }
255 
256 static bool
intel_pmdemand_encoder_has_tc_phy(struct intel_display * display,struct intel_encoder * encoder)257 intel_pmdemand_encoder_has_tc_phy(struct intel_display *display,
258 				  struct intel_encoder *encoder)
259 {
260 	return encoder && intel_encoder_is_tc(encoder);
261 }
262 
263 static bool
intel_pmdemand_connector_needs_update(struct intel_atomic_state * state)264 intel_pmdemand_connector_needs_update(struct intel_atomic_state *state)
265 {
266 	struct intel_display *display = to_intel_display(state);
267 	struct drm_connector_state *old_conn_state;
268 	struct drm_connector_state *new_conn_state;
269 	struct drm_connector *connector;
270 	int i;
271 
272 	for_each_oldnew_connector_in_state(&state->base, connector,
273 					   old_conn_state, new_conn_state, i) {
274 		struct intel_encoder *old_encoder =
275 			to_intel_encoder(old_conn_state->best_encoder);
276 		struct intel_encoder *new_encoder =
277 			to_intel_encoder(new_conn_state->best_encoder);
278 
279 		if (!intel_connector_needs_modeset(state, connector))
280 			continue;
281 
282 		if (old_encoder == new_encoder ||
283 		    (intel_pmdemand_encoder_has_tc_phy(display, old_encoder) &&
284 		     intel_pmdemand_encoder_has_tc_phy(display, new_encoder)))
285 			continue;
286 
287 		return true;
288 	}
289 
290 	return false;
291 }
292 
intel_pmdemand_needs_update(struct intel_atomic_state * state)293 static bool intel_pmdemand_needs_update(struct intel_atomic_state *state)
294 {
295 	struct intel_display *display = to_intel_display(state);
296 	const struct intel_bw_state *new_bw_state, *old_bw_state;
297 	const struct intel_cdclk_state *new_cdclk_state, *old_cdclk_state;
298 	const struct intel_crtc_state *new_crtc_state, *old_crtc_state;
299 	const struct intel_dbuf_state *new_dbuf_state, *old_dbuf_state;
300 	struct intel_crtc *crtc;
301 	int i;
302 
303 	new_bw_state = intel_atomic_get_new_bw_state(state);
304 	old_bw_state = intel_atomic_get_old_bw_state(state);
305 	if (new_bw_state && new_bw_state->qgv_point_peakbw !=
306 	    old_bw_state->qgv_point_peakbw)
307 		return true;
308 
309 	new_dbuf_state = intel_atomic_get_new_dbuf_state(state);
310 	old_dbuf_state = intel_atomic_get_old_dbuf_state(state);
311 	if (new_dbuf_state &&
312 	    new_dbuf_state->active_pipes != old_dbuf_state->active_pipes)
313 		return true;
314 
315 	if (DISPLAY_VER(display) < 30) {
316 		if (new_dbuf_state &&
317 		    new_dbuf_state->enabled_slices !=
318 		    old_dbuf_state->enabled_slices)
319 			return true;
320 	}
321 
322 	new_cdclk_state = intel_atomic_get_new_cdclk_state(state);
323 	old_cdclk_state = intel_atomic_get_old_cdclk_state(state);
324 	if (new_cdclk_state &&
325 	    (new_cdclk_state->actual.cdclk !=
326 	     old_cdclk_state->actual.cdclk ||
327 	     new_cdclk_state->actual.voltage_level !=
328 	     old_cdclk_state->actual.voltage_level))
329 		return true;
330 
331 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
332 					    new_crtc_state, i)
333 		if (new_crtc_state->port_clock != old_crtc_state->port_clock)
334 			return true;
335 
336 	return intel_pmdemand_connector_needs_update(state);
337 }
338 
intel_pmdemand_atomic_check(struct intel_atomic_state * state)339 int intel_pmdemand_atomic_check(struct intel_atomic_state *state)
340 {
341 	struct intel_display *display = to_intel_display(state);
342 	const struct intel_bw_state *new_bw_state;
343 	const struct intel_cdclk_state *new_cdclk_state;
344 	const struct intel_dbuf_state *new_dbuf_state;
345 	struct intel_pmdemand_state *new_pmdemand_state;
346 
347 	if (DISPLAY_VER(display) < 14)
348 		return 0;
349 
350 	if (!intel_pmdemand_needs_update(state))
351 		return 0;
352 
353 	new_pmdemand_state = intel_atomic_get_pmdemand_state(state);
354 	if (IS_ERR(new_pmdemand_state))
355 		return PTR_ERR(new_pmdemand_state);
356 
357 	new_bw_state = intel_atomic_get_bw_state(state);
358 	if (IS_ERR(new_bw_state))
359 		return PTR_ERR(new_bw_state);
360 
361 	/* firmware will calculate the qclk_gv_index, requirement is set to 0 */
362 	new_pmdemand_state->params.qclk_gv_index = 0;
363 	new_pmdemand_state->params.qclk_gv_bw = new_bw_state->qgv_point_peakbw;
364 
365 	new_dbuf_state = intel_atomic_get_dbuf_state(state);
366 	if (IS_ERR(new_dbuf_state))
367 		return PTR_ERR(new_dbuf_state);
368 
369 	if (DISPLAY_VER(display) < 30) {
370 		new_pmdemand_state->params.active_dbufs =
371 			min_t(u8, hweight8(new_dbuf_state->enabled_slices), 3);
372 		new_pmdemand_state->params.active_pipes =
373 			min_t(u8, hweight8(new_dbuf_state->active_pipes), 3);
374 	} else {
375 		new_pmdemand_state->params.active_pipes =
376 			min_t(u8, hweight8(new_dbuf_state->active_pipes), INTEL_NUM_PIPES(display));
377 	}
378 
379 	new_cdclk_state = intel_atomic_get_cdclk_state(state);
380 	if (IS_ERR(new_cdclk_state))
381 		return PTR_ERR(new_cdclk_state);
382 
383 	new_pmdemand_state->params.voltage_index =
384 		new_cdclk_state->actual.voltage_level;
385 	new_pmdemand_state->params.cdclk_freq_mhz =
386 		DIV_ROUND_UP(new_cdclk_state->actual.cdclk, 1000);
387 
388 	intel_pmdemand_update_max_ddiclk(display, state, new_pmdemand_state);
389 
390 	intel_pmdemand_update_active_non_tc_phys(display, state, new_pmdemand_state);
391 
392 	/*
393 	 * Active_PLLs starts with 1 because of CDCLK PLL.
394 	 * TODO: Missing to account genlock filter when it gets used.
395 	 */
396 	new_pmdemand_state->params.plls =
397 		min_t(u16, new_pmdemand_state->params.active_phys + 1, 7);
398 
399 	/*
400 	 * Setting scalers to max as it can not be calculated during flips and
401 	 * fastsets without taking global states locks.
402 	 */
403 	new_pmdemand_state->params.scalers = 7;
404 
405 	if (state->base.allow_modeset)
406 		return intel_atomic_serialize_global_state(&new_pmdemand_state->base);
407 	else
408 		return intel_atomic_lock_global_state(&new_pmdemand_state->base);
409 }
410 
intel_pmdemand_check_prev_transaction(struct intel_display * display)411 static bool intel_pmdemand_check_prev_transaction(struct intel_display *display)
412 {
413 	return !(intel_de_wait_for_clear(display,
414 					 XELPDP_INITIATE_PMDEMAND_REQUEST(1),
415 					 XELPDP_PMDEMAND_REQ_ENABLE, 10) ||
416 		 intel_de_wait_for_clear(display,
417 					 GEN12_DCPR_STATUS_1,
418 					 XELPDP_PMDEMAND_INFLIGHT_STATUS, 10));
419 }
420 
421 void
intel_pmdemand_init_pmdemand_params(struct intel_display * display,struct intel_pmdemand_state * pmdemand_state)422 intel_pmdemand_init_pmdemand_params(struct intel_display *display,
423 				    struct intel_pmdemand_state *pmdemand_state)
424 {
425 	u32 reg1, reg2;
426 
427 	if (DISPLAY_VER(display) < 14)
428 		return;
429 
430 	mutex_lock(&display->pmdemand.lock);
431 	if (drm_WARN_ON(display->drm,
432 			!intel_pmdemand_check_prev_transaction(display))) {
433 		memset(&pmdemand_state->params, 0,
434 		       sizeof(pmdemand_state->params));
435 		goto unlock;
436 	}
437 
438 	reg1 = intel_de_read(display, XELPDP_INITIATE_PMDEMAND_REQUEST(0));
439 
440 	reg2 = intel_de_read(display, XELPDP_INITIATE_PMDEMAND_REQUEST(1));
441 
442 	pmdemand_state->params.qclk_gv_bw =
443 		REG_FIELD_GET(XELPDP_PMDEMAND_QCLK_GV_BW_MASK, reg1);
444 	pmdemand_state->params.voltage_index =
445 		REG_FIELD_GET(XELPDP_PMDEMAND_VOLTAGE_INDEX_MASK, reg1);
446 	pmdemand_state->params.qclk_gv_index =
447 		REG_FIELD_GET(XELPDP_PMDEMAND_QCLK_GV_INDEX_MASK, reg1);
448 	pmdemand_state->params.active_phys =
449 		REG_FIELD_GET(XELPDP_PMDEMAND_PHYS_MASK, reg1);
450 
451 	pmdemand_state->params.cdclk_freq_mhz =
452 		REG_FIELD_GET(XELPDP_PMDEMAND_CDCLK_FREQ_MASK, reg2);
453 	pmdemand_state->params.ddiclk_max =
454 		REG_FIELD_GET(XELPDP_PMDEMAND_DDICLK_FREQ_MASK, reg2);
455 
456 	if (DISPLAY_VER(display) >= 30) {
457 		pmdemand_state->params.active_pipes =
458 			REG_FIELD_GET(XE3_PMDEMAND_PIPES_MASK, reg1);
459 	} else {
460 		pmdemand_state->params.active_pipes =
461 			REG_FIELD_GET(XELPDP_PMDEMAND_PIPES_MASK, reg1);
462 		pmdemand_state->params.active_dbufs =
463 			REG_FIELD_GET(XELPDP_PMDEMAND_DBUFS_MASK, reg1);
464 
465 		pmdemand_state->params.scalers =
466 			REG_FIELD_GET(XELPDP_PMDEMAND_SCALERS_MASK, reg2);
467 	}
468 
469 unlock:
470 	mutex_unlock(&display->pmdemand.lock);
471 }
472 
intel_pmdemand_req_complete(struct intel_display * display)473 static bool intel_pmdemand_req_complete(struct intel_display *display)
474 {
475 	return !(intel_de_read(display, XELPDP_INITIATE_PMDEMAND_REQUEST(1)) &
476 		 XELPDP_PMDEMAND_REQ_ENABLE);
477 }
478 
intel_pmdemand_wait(struct intel_display * display)479 static void intel_pmdemand_wait(struct intel_display *display)
480 {
481 	if (!wait_event_timeout(display->pmdemand.waitqueue,
482 				intel_pmdemand_req_complete(display),
483 				msecs_to_jiffies_timeout(10)))
484 		drm_err(display->drm,
485 			"timed out waiting for Punit PM Demand Response\n");
486 }
487 
488 /* Required to be programmed during Display Init Sequences. */
intel_pmdemand_program_dbuf(struct intel_display * display,u8 dbuf_slices)489 void intel_pmdemand_program_dbuf(struct intel_display *display,
490 				 u8 dbuf_slices)
491 {
492 	u32 dbufs = min_t(u32, hweight8(dbuf_slices), 3);
493 
494 	/* PM Demand only tracks active dbufs on pre-Xe3 platforms */
495 	if (DISPLAY_VER(display) >= 30)
496 		return;
497 
498 	mutex_lock(&display->pmdemand.lock);
499 	if (drm_WARN_ON(display->drm,
500 			!intel_pmdemand_check_prev_transaction(display)))
501 		goto unlock;
502 
503 	intel_de_rmw(display, XELPDP_INITIATE_PMDEMAND_REQUEST(0),
504 		     XELPDP_PMDEMAND_DBUFS_MASK,
505 		     REG_FIELD_PREP(XELPDP_PMDEMAND_DBUFS_MASK, dbufs));
506 	intel_de_rmw(display, XELPDP_INITIATE_PMDEMAND_REQUEST(1), 0,
507 		     XELPDP_PMDEMAND_REQ_ENABLE);
508 
509 	intel_pmdemand_wait(display);
510 
511 unlock:
512 	mutex_unlock(&display->pmdemand.lock);
513 }
514 
515 static void
intel_pmdemand_update_params(struct intel_display * display,const struct intel_pmdemand_state * new,const struct intel_pmdemand_state * old,u32 * reg1,u32 * reg2,bool serialized)516 intel_pmdemand_update_params(struct intel_display *display,
517 			     const struct intel_pmdemand_state *new,
518 			     const struct intel_pmdemand_state *old,
519 			     u32 *reg1, u32 *reg2, bool serialized)
520 {
521 	/*
522 	 * The pmdemand parameter updates happens in two steps. Pre plane and
523 	 * post plane updates. During the pre plane, as DE might still be
524 	 * handling with some old operations, to avoid unexpected performance
525 	 * issues, program the pmdemand parameters with higher of old and new
526 	 * values. And then after once settled, use the new parameter values
527 	 * as part of the post plane update.
528 	 *
529 	 * If the pmdemand params update happens without modeset allowed, this
530 	 * means we can't serialize the updates. So that implies possibility of
531 	 * some parallel atomic commits affecting the pmdemand parameters. In
532 	 * that case, we need to consider the current values from the register
533 	 * as well. So in pre-plane case, we need to check the max of old, new
534 	 * and current register value if not serialized. In post plane update
535 	 * we need to consider max of new and current register value if not
536 	 * serialized
537 	 */
538 
539 #define update_reg(reg, field, mask) do { \
540 	u32 current_val = serialized ? 0 : REG_FIELD_GET((mask), *(reg)); \
541 	u32 old_val = old ? old->params.field : 0; \
542 	u32 new_val = new->params.field; \
543 \
544 	*(reg) &= ~(mask); \
545 	*(reg) |= REG_FIELD_PREP((mask), max3(old_val, new_val, current_val)); \
546 } while (0)
547 
548 	/* Set 1*/
549 	update_reg(reg1, qclk_gv_bw, XELPDP_PMDEMAND_QCLK_GV_BW_MASK);
550 	update_reg(reg1, voltage_index, XELPDP_PMDEMAND_VOLTAGE_INDEX_MASK);
551 	update_reg(reg1, qclk_gv_index, XELPDP_PMDEMAND_QCLK_GV_INDEX_MASK);
552 	update_reg(reg1, active_phys, XELPDP_PMDEMAND_PHYS_MASK);
553 
554 	/* Set 2*/
555 	update_reg(reg2, cdclk_freq_mhz, XELPDP_PMDEMAND_CDCLK_FREQ_MASK);
556 	update_reg(reg2, ddiclk_max, XELPDP_PMDEMAND_DDICLK_FREQ_MASK);
557 	update_reg(reg2, plls, XELPDP_PMDEMAND_PLLS_MASK);
558 
559 	if (DISPLAY_VER(display) >= 30) {
560 		update_reg(reg1, active_pipes, XE3_PMDEMAND_PIPES_MASK);
561 	} else {
562 		update_reg(reg1, active_pipes, XELPDP_PMDEMAND_PIPES_MASK);
563 		update_reg(reg1, active_dbufs, XELPDP_PMDEMAND_DBUFS_MASK);
564 
565 		update_reg(reg2, scalers, XELPDP_PMDEMAND_SCALERS_MASK);
566 	}
567 
568 #undef update_reg
569 }
570 
571 static void
intel_pmdemand_program_params(struct intel_display * display,const struct intel_pmdemand_state * new,const struct intel_pmdemand_state * old,bool serialized)572 intel_pmdemand_program_params(struct intel_display *display,
573 			      const struct intel_pmdemand_state *new,
574 			      const struct intel_pmdemand_state *old,
575 			      bool serialized)
576 {
577 	bool changed = false;
578 	u32 reg1, mod_reg1;
579 	u32 reg2, mod_reg2;
580 
581 	mutex_lock(&display->pmdemand.lock);
582 	if (drm_WARN_ON(display->drm,
583 			!intel_pmdemand_check_prev_transaction(display)))
584 		goto unlock;
585 
586 	reg1 = intel_de_read(display, XELPDP_INITIATE_PMDEMAND_REQUEST(0));
587 	mod_reg1 = reg1;
588 
589 	reg2 = intel_de_read(display, XELPDP_INITIATE_PMDEMAND_REQUEST(1));
590 	mod_reg2 = reg2;
591 
592 	intel_pmdemand_update_params(display, new, old, &mod_reg1, &mod_reg2,
593 				     serialized);
594 
595 	if (reg1 != mod_reg1) {
596 		intel_de_write(display, XELPDP_INITIATE_PMDEMAND_REQUEST(0),
597 			       mod_reg1);
598 		changed = true;
599 	}
600 
601 	if (reg2 != mod_reg2) {
602 		intel_de_write(display, XELPDP_INITIATE_PMDEMAND_REQUEST(1),
603 			       mod_reg2);
604 		changed = true;
605 	}
606 
607 	/* Initiate pm demand request only if register values are changed */
608 	if (!changed)
609 		goto unlock;
610 
611 	drm_dbg_kms(display->drm,
612 		    "initate pmdemand request values: (0x%x 0x%x)\n",
613 		    mod_reg1, mod_reg2);
614 
615 	intel_de_rmw(display, XELPDP_INITIATE_PMDEMAND_REQUEST(1), 0,
616 		     XELPDP_PMDEMAND_REQ_ENABLE);
617 
618 	intel_pmdemand_wait(display);
619 
620 unlock:
621 	mutex_unlock(&display->pmdemand.lock);
622 }
623 
624 static bool
intel_pmdemand_state_changed(const struct intel_pmdemand_state * new,const struct intel_pmdemand_state * old)625 intel_pmdemand_state_changed(const struct intel_pmdemand_state *new,
626 			     const struct intel_pmdemand_state *old)
627 {
628 	return memcmp(&new->params, &old->params, sizeof(new->params)) != 0;
629 }
630 
intel_pmdemand_pre_plane_update(struct intel_atomic_state * state)631 void intel_pmdemand_pre_plane_update(struct intel_atomic_state *state)
632 {
633 	struct intel_display *display = to_intel_display(state);
634 	const struct intel_pmdemand_state *new_pmdemand_state =
635 		intel_atomic_get_new_pmdemand_state(state);
636 	const struct intel_pmdemand_state *old_pmdemand_state =
637 		intel_atomic_get_old_pmdemand_state(state);
638 
639 	if (DISPLAY_VER(display) < 14)
640 		return;
641 
642 	if (!new_pmdemand_state ||
643 	    !intel_pmdemand_state_changed(new_pmdemand_state,
644 					  old_pmdemand_state))
645 		return;
646 
647 	WARN_ON(!new_pmdemand_state->base.changed);
648 
649 	intel_pmdemand_program_params(display, new_pmdemand_state,
650 				      old_pmdemand_state,
651 				      intel_atomic_global_state_is_serialized(state));
652 }
653 
intel_pmdemand_post_plane_update(struct intel_atomic_state * state)654 void intel_pmdemand_post_plane_update(struct intel_atomic_state *state)
655 {
656 	struct intel_display *display = to_intel_display(state);
657 	const struct intel_pmdemand_state *new_pmdemand_state =
658 		intel_atomic_get_new_pmdemand_state(state);
659 	const struct intel_pmdemand_state *old_pmdemand_state =
660 		intel_atomic_get_old_pmdemand_state(state);
661 
662 	if (DISPLAY_VER(display) < 14)
663 		return;
664 
665 	if (!new_pmdemand_state ||
666 	    !intel_pmdemand_state_changed(new_pmdemand_state,
667 					  old_pmdemand_state))
668 		return;
669 
670 	WARN_ON(!new_pmdemand_state->base.changed);
671 
672 	intel_pmdemand_program_params(display, new_pmdemand_state, NULL,
673 				      intel_atomic_global_state_is_serialized(state));
674 }
675