xref: /linux/drivers/gpu/drm/i915/display/intel_pmdemand.c (revision a4871e6201c46c8e1d04308265b4b4c5753c8209)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2023 Intel Corporation
4  */
5 
6 #include <linux/bitops.h>
7 
8 #include <drm/drm_print.h>
9 
10 #include "i915_reg.h"
11 #include "i915_utils.h"
12 #include "intel_atomic.h"
13 #include "intel_bw.h"
14 #include "intel_cdclk.h"
15 #include "intel_de.h"
16 #include "intel_display_trace.h"
17 #include "intel_pmdemand.h"
18 #include "intel_step.h"
19 #include "skl_watermark.h"
20 
21 struct pmdemand_params {
22 	u16 qclk_gv_bw;
23 	u8 voltage_index;
24 	u8 qclk_gv_index;
25 	u8 active_pipes;
26 	u8 active_dbufs;	/* pre-Xe3 only */
27 	/* Total number of non type C active phys from active_phys_mask */
28 	u8 active_phys;
29 	u8 plls;
30 	u16 cdclk_freq_mhz;
31 	/* max from ddi_clocks[] */
32 	u16 ddiclk_max;
33 	u8 scalers;		/* pre-Xe3 only */
34 };
35 
36 struct intel_pmdemand_state {
37 	struct intel_global_state base;
38 
39 	/* Maintain a persistent list of port clocks across all crtcs */
40 	int ddi_clocks[I915_MAX_PIPES];
41 
42 	/* Maintain a persistent list of non type C phys mask */
43 	u16 active_combo_phys_mask;
44 
45 	/* Parameters to be configured in the pmdemand registers */
46 	struct pmdemand_params params;
47 };
48 
49 struct intel_pmdemand_state *to_intel_pmdemand_state(struct intel_global_state *obj_state)
50 {
51 	return container_of(obj_state, struct intel_pmdemand_state, base);
52 }
53 
54 static struct intel_global_state *
55 intel_pmdemand_duplicate_state(struct intel_global_obj *obj)
56 {
57 	struct intel_pmdemand_state *pmdemand_state;
58 
59 	pmdemand_state = kmemdup(obj->state, sizeof(*pmdemand_state), GFP_KERNEL);
60 	if (!pmdemand_state)
61 		return NULL;
62 
63 	return &pmdemand_state->base;
64 }
65 
66 static void intel_pmdemand_destroy_state(struct intel_global_obj *obj,
67 					 struct intel_global_state *state)
68 {
69 	kfree(state);
70 }
71 
72 static const struct intel_global_state_funcs intel_pmdemand_funcs = {
73 	.atomic_duplicate_state = intel_pmdemand_duplicate_state,
74 	.atomic_destroy_state = intel_pmdemand_destroy_state,
75 };
76 
77 static struct intel_pmdemand_state *
78 intel_atomic_get_pmdemand_state(struct intel_atomic_state *state)
79 {
80 	struct intel_display *display = to_intel_display(state);
81 	struct intel_global_state *pmdemand_state =
82 		intel_atomic_get_global_obj_state(state,
83 						  &display->pmdemand.obj);
84 
85 	if (IS_ERR(pmdemand_state))
86 		return ERR_CAST(pmdemand_state);
87 
88 	return to_intel_pmdemand_state(pmdemand_state);
89 }
90 
91 static struct intel_pmdemand_state *
92 intel_atomic_get_old_pmdemand_state(struct intel_atomic_state *state)
93 {
94 	struct intel_display *display = to_intel_display(state);
95 	struct intel_global_state *pmdemand_state =
96 		intel_atomic_get_old_global_obj_state(state,
97 						      &display->pmdemand.obj);
98 
99 	if (!pmdemand_state)
100 		return NULL;
101 
102 	return to_intel_pmdemand_state(pmdemand_state);
103 }
104 
105 static struct intel_pmdemand_state *
106 intel_atomic_get_new_pmdemand_state(struct intel_atomic_state *state)
107 {
108 	struct intel_display *display = to_intel_display(state);
109 	struct intel_global_state *pmdemand_state =
110 		intel_atomic_get_new_global_obj_state(state,
111 						      &display->pmdemand.obj);
112 
113 	if (!pmdemand_state)
114 		return NULL;
115 
116 	return to_intel_pmdemand_state(pmdemand_state);
117 }
118 
119 int intel_pmdemand_init(struct intel_display *display)
120 {
121 	struct intel_pmdemand_state *pmdemand_state;
122 
123 	pmdemand_state = kzalloc(sizeof(*pmdemand_state), GFP_KERNEL);
124 	if (!pmdemand_state)
125 		return -ENOMEM;
126 
127 	intel_atomic_global_obj_init(display, &display->pmdemand.obj,
128 				     &pmdemand_state->base,
129 				     &intel_pmdemand_funcs);
130 
131 	if (IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_C0))
132 		/* Wa_14016740474 */
133 		intel_de_rmw(display, XELPD_CHICKEN_DCPR_3, 0, DMD_RSP_TIMEOUT_DISABLE);
134 
135 	return 0;
136 }
137 
138 void intel_pmdemand_init_early(struct intel_display *display)
139 {
140 	mutex_init(&display->pmdemand.lock);
141 	init_waitqueue_head(&display->pmdemand.waitqueue);
142 }
143 
144 void
145 intel_pmdemand_update_phys_mask(struct intel_display *display,
146 				struct intel_encoder *encoder,
147 				struct intel_pmdemand_state *pmdemand_state,
148 				bool set_bit)
149 {
150 	enum phy phy;
151 
152 	if (DISPLAY_VER(display) < 14)
153 		return;
154 
155 	if (!encoder)
156 		return;
157 
158 	if (intel_encoder_is_tc(encoder))
159 		return;
160 
161 	phy = intel_encoder_to_phy(encoder);
162 
163 	if (set_bit)
164 		pmdemand_state->active_combo_phys_mask |= BIT(phy);
165 	else
166 		pmdemand_state->active_combo_phys_mask &= ~BIT(phy);
167 }
168 
169 void
170 intel_pmdemand_update_port_clock(struct intel_display *display,
171 				 struct intel_pmdemand_state *pmdemand_state,
172 				 enum pipe pipe, int port_clock)
173 {
174 	if (DISPLAY_VER(display) < 14)
175 		return;
176 
177 	pmdemand_state->ddi_clocks[pipe] = port_clock;
178 }
179 
180 static void
181 intel_pmdemand_update_max_ddiclk(struct intel_display *display,
182 				 struct intel_atomic_state *state,
183 				 struct intel_pmdemand_state *pmdemand_state)
184 {
185 	int max_ddiclk = 0;
186 	const struct intel_crtc_state *new_crtc_state;
187 	struct intel_crtc *crtc;
188 	int i;
189 
190 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
191 		intel_pmdemand_update_port_clock(display, pmdemand_state,
192 						 crtc->pipe,
193 						 new_crtc_state->port_clock);
194 
195 	for (i = 0; i < ARRAY_SIZE(pmdemand_state->ddi_clocks); i++)
196 		max_ddiclk = max(pmdemand_state->ddi_clocks[i], max_ddiclk);
197 
198 	pmdemand_state->params.ddiclk_max = DIV_ROUND_UP(max_ddiclk, 1000);
199 }
200 
201 static void
202 intel_pmdemand_update_connector_phys(struct intel_display *display,
203 				     struct intel_atomic_state *state,
204 				     struct drm_connector_state *conn_state,
205 				     bool set_bit,
206 				     struct intel_pmdemand_state *pmdemand_state)
207 {
208 	struct intel_encoder *encoder = to_intel_encoder(conn_state->best_encoder);
209 	struct intel_crtc *crtc = to_intel_crtc(conn_state->crtc);
210 	struct intel_crtc_state *crtc_state;
211 
212 	if (!crtc)
213 		return;
214 
215 	if (set_bit)
216 		crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
217 	else
218 		crtc_state = intel_atomic_get_old_crtc_state(state, crtc);
219 
220 	if (!crtc_state->hw.active)
221 		return;
222 
223 	intel_pmdemand_update_phys_mask(display, encoder, pmdemand_state,
224 					set_bit);
225 }
226 
227 static void
228 intel_pmdemand_update_active_non_tc_phys(struct intel_display *display,
229 					 struct intel_atomic_state *state,
230 					 struct intel_pmdemand_state *pmdemand_state)
231 {
232 	struct drm_connector_state *old_conn_state;
233 	struct drm_connector_state *new_conn_state;
234 	struct drm_connector *connector;
235 	int i;
236 
237 	for_each_oldnew_connector_in_state(&state->base, connector,
238 					   old_conn_state, new_conn_state, i) {
239 		if (!intel_connector_needs_modeset(state, connector))
240 			continue;
241 
242 		/* First clear the active phys in the old connector state */
243 		intel_pmdemand_update_connector_phys(display, state,
244 						     old_conn_state, false,
245 						     pmdemand_state);
246 
247 		/* Then set the active phys in new connector state */
248 		intel_pmdemand_update_connector_phys(display, state,
249 						     new_conn_state, true,
250 						     pmdemand_state);
251 	}
252 
253 	pmdemand_state->params.active_phys =
254 		min_t(u16, hweight16(pmdemand_state->active_combo_phys_mask),
255 		      7);
256 }
257 
258 static bool
259 intel_pmdemand_encoder_has_tc_phy(struct intel_display *display,
260 				  struct intel_encoder *encoder)
261 {
262 	return encoder && intel_encoder_is_tc(encoder);
263 }
264 
265 static bool
266 intel_pmdemand_connector_needs_update(struct intel_atomic_state *state)
267 {
268 	struct intel_display *display = to_intel_display(state);
269 	struct drm_connector_state *old_conn_state;
270 	struct drm_connector_state *new_conn_state;
271 	struct drm_connector *connector;
272 	int i;
273 
274 	for_each_oldnew_connector_in_state(&state->base, connector,
275 					   old_conn_state, new_conn_state, i) {
276 		struct intel_encoder *old_encoder =
277 			to_intel_encoder(old_conn_state->best_encoder);
278 		struct intel_encoder *new_encoder =
279 			to_intel_encoder(new_conn_state->best_encoder);
280 
281 		if (!intel_connector_needs_modeset(state, connector))
282 			continue;
283 
284 		if (old_encoder == new_encoder ||
285 		    (intel_pmdemand_encoder_has_tc_phy(display, old_encoder) &&
286 		     intel_pmdemand_encoder_has_tc_phy(display, new_encoder)))
287 			continue;
288 
289 		return true;
290 	}
291 
292 	return false;
293 }
294 
295 static bool intel_pmdemand_needs_update(struct intel_atomic_state *state)
296 {
297 	struct intel_display *display = to_intel_display(state);
298 	const struct intel_bw_state *new_bw_state, *old_bw_state;
299 	const struct intel_cdclk_state *new_cdclk_state, *old_cdclk_state;
300 	const struct intel_crtc_state *new_crtc_state, *old_crtc_state;
301 	const struct intel_dbuf_state *new_dbuf_state, *old_dbuf_state;
302 	struct intel_crtc *crtc;
303 	int i;
304 
305 	new_bw_state = intel_atomic_get_new_bw_state(state);
306 	old_bw_state = intel_atomic_get_old_bw_state(state);
307 	if (new_bw_state && new_bw_state->qgv_point_peakbw !=
308 	    old_bw_state->qgv_point_peakbw)
309 		return true;
310 
311 	new_dbuf_state = intel_atomic_get_new_dbuf_state(state);
312 	old_dbuf_state = intel_atomic_get_old_dbuf_state(state);
313 	if (new_dbuf_state &&
314 	    new_dbuf_state->active_pipes != old_dbuf_state->active_pipes)
315 		return true;
316 
317 	if (DISPLAY_VER(display) < 30) {
318 		if (new_dbuf_state &&
319 		    new_dbuf_state->enabled_slices !=
320 		    old_dbuf_state->enabled_slices)
321 			return true;
322 	}
323 
324 	new_cdclk_state = intel_atomic_get_new_cdclk_state(state);
325 	old_cdclk_state = intel_atomic_get_old_cdclk_state(state);
326 	if (new_cdclk_state &&
327 	    (new_cdclk_state->actual.cdclk !=
328 	     old_cdclk_state->actual.cdclk ||
329 	     new_cdclk_state->actual.voltage_level !=
330 	     old_cdclk_state->actual.voltage_level))
331 		return true;
332 
333 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
334 					    new_crtc_state, i)
335 		if (new_crtc_state->port_clock != old_crtc_state->port_clock)
336 			return true;
337 
338 	return intel_pmdemand_connector_needs_update(state);
339 }
340 
341 int intel_pmdemand_atomic_check(struct intel_atomic_state *state)
342 {
343 	struct intel_display *display = to_intel_display(state);
344 	const struct intel_bw_state *new_bw_state;
345 	const struct intel_cdclk_state *new_cdclk_state;
346 	const struct intel_dbuf_state *new_dbuf_state;
347 	struct intel_pmdemand_state *new_pmdemand_state;
348 
349 	if (DISPLAY_VER(display) < 14)
350 		return 0;
351 
352 	if (!intel_pmdemand_needs_update(state))
353 		return 0;
354 
355 	new_pmdemand_state = intel_atomic_get_pmdemand_state(state);
356 	if (IS_ERR(new_pmdemand_state))
357 		return PTR_ERR(new_pmdemand_state);
358 
359 	new_bw_state = intel_atomic_get_bw_state(state);
360 	if (IS_ERR(new_bw_state))
361 		return PTR_ERR(new_bw_state);
362 
363 	/* firmware will calculate the qclk_gv_index, requirement is set to 0 */
364 	new_pmdemand_state->params.qclk_gv_index = 0;
365 	new_pmdemand_state->params.qclk_gv_bw = new_bw_state->qgv_point_peakbw;
366 
367 	new_dbuf_state = intel_atomic_get_dbuf_state(state);
368 	if (IS_ERR(new_dbuf_state))
369 		return PTR_ERR(new_dbuf_state);
370 
371 	if (DISPLAY_VER(display) < 30) {
372 		new_pmdemand_state->params.active_dbufs =
373 			min_t(u8, hweight8(new_dbuf_state->enabled_slices), 3);
374 		new_pmdemand_state->params.active_pipes =
375 			min_t(u8, hweight8(new_dbuf_state->active_pipes), 3);
376 	} else {
377 		new_pmdemand_state->params.active_pipes =
378 			min_t(u8, hweight8(new_dbuf_state->active_pipes), INTEL_NUM_PIPES(display));
379 	}
380 
381 	new_cdclk_state = intel_atomic_get_cdclk_state(state);
382 	if (IS_ERR(new_cdclk_state))
383 		return PTR_ERR(new_cdclk_state);
384 
385 	new_pmdemand_state->params.voltage_index =
386 		new_cdclk_state->actual.voltage_level;
387 	new_pmdemand_state->params.cdclk_freq_mhz =
388 		DIV_ROUND_UP(new_cdclk_state->actual.cdclk, 1000);
389 
390 	intel_pmdemand_update_max_ddiclk(display, state, new_pmdemand_state);
391 
392 	intel_pmdemand_update_active_non_tc_phys(display, state, new_pmdemand_state);
393 
394 	/*
395 	 * Active_PLLs starts with 1 because of CDCLK PLL.
396 	 * TODO: Missing to account genlock filter when it gets used.
397 	 */
398 	new_pmdemand_state->params.plls =
399 		min_t(u16, new_pmdemand_state->params.active_phys + 1, 7);
400 
401 	/*
402 	 * Setting scalers to max as it can not be calculated during flips and
403 	 * fastsets without taking global states locks.
404 	 */
405 	new_pmdemand_state->params.scalers = 7;
406 
407 	if (state->base.allow_modeset)
408 		return intel_atomic_serialize_global_state(&new_pmdemand_state->base);
409 	else
410 		return intel_atomic_lock_global_state(&new_pmdemand_state->base);
411 }
412 
413 static bool intel_pmdemand_check_prev_transaction(struct intel_display *display)
414 {
415 	return !(intel_de_wait_for_clear(display,
416 					 XELPDP_INITIATE_PMDEMAND_REQUEST(1),
417 					 XELPDP_PMDEMAND_REQ_ENABLE, 10) ||
418 		 intel_de_wait_for_clear(display,
419 					 GEN12_DCPR_STATUS_1,
420 					 XELPDP_PMDEMAND_INFLIGHT_STATUS, 10));
421 }
422 
423 void
424 intel_pmdemand_init_pmdemand_params(struct intel_display *display,
425 				    struct intel_pmdemand_state *pmdemand_state)
426 {
427 	u32 reg1, reg2;
428 
429 	if (DISPLAY_VER(display) < 14)
430 		return;
431 
432 	mutex_lock(&display->pmdemand.lock);
433 	if (drm_WARN_ON(display->drm,
434 			!intel_pmdemand_check_prev_transaction(display))) {
435 		memset(&pmdemand_state->params, 0,
436 		       sizeof(pmdemand_state->params));
437 		goto unlock;
438 	}
439 
440 	reg1 = intel_de_read(display, XELPDP_INITIATE_PMDEMAND_REQUEST(0));
441 
442 	reg2 = intel_de_read(display, XELPDP_INITIATE_PMDEMAND_REQUEST(1));
443 
444 	pmdemand_state->params.qclk_gv_bw =
445 		REG_FIELD_GET(XELPDP_PMDEMAND_QCLK_GV_BW_MASK, reg1);
446 	pmdemand_state->params.voltage_index =
447 		REG_FIELD_GET(XELPDP_PMDEMAND_VOLTAGE_INDEX_MASK, reg1);
448 	pmdemand_state->params.qclk_gv_index =
449 		REG_FIELD_GET(XELPDP_PMDEMAND_QCLK_GV_INDEX_MASK, reg1);
450 	pmdemand_state->params.active_phys =
451 		REG_FIELD_GET(XELPDP_PMDEMAND_PHYS_MASK, reg1);
452 
453 	pmdemand_state->params.cdclk_freq_mhz =
454 		REG_FIELD_GET(XELPDP_PMDEMAND_CDCLK_FREQ_MASK, reg2);
455 	pmdemand_state->params.ddiclk_max =
456 		REG_FIELD_GET(XELPDP_PMDEMAND_DDICLK_FREQ_MASK, reg2);
457 
458 	if (DISPLAY_VER(display) >= 30) {
459 		pmdemand_state->params.active_pipes =
460 			REG_FIELD_GET(XE3_PMDEMAND_PIPES_MASK, reg1);
461 	} else {
462 		pmdemand_state->params.active_pipes =
463 			REG_FIELD_GET(XELPDP_PMDEMAND_PIPES_MASK, reg1);
464 		pmdemand_state->params.active_dbufs =
465 			REG_FIELD_GET(XELPDP_PMDEMAND_DBUFS_MASK, reg1);
466 
467 		pmdemand_state->params.scalers =
468 			REG_FIELD_GET(XELPDP_PMDEMAND_SCALERS_MASK, reg2);
469 	}
470 
471 unlock:
472 	mutex_unlock(&display->pmdemand.lock);
473 }
474 
475 static bool intel_pmdemand_req_complete(struct intel_display *display)
476 {
477 	return !(intel_de_read(display, XELPDP_INITIATE_PMDEMAND_REQUEST(1)) &
478 		 XELPDP_PMDEMAND_REQ_ENABLE);
479 }
480 
481 static void intel_pmdemand_wait(struct intel_display *display)
482 {
483 	if (!wait_event_timeout(display->pmdemand.waitqueue,
484 				intel_pmdemand_req_complete(display),
485 				msecs_to_jiffies_timeout(10)))
486 		drm_err(display->drm,
487 			"timed out waiting for Punit PM Demand Response\n");
488 }
489 
490 /* Required to be programmed during Display Init Sequences. */
491 void intel_pmdemand_program_dbuf(struct intel_display *display,
492 				 u8 dbuf_slices)
493 {
494 	u32 dbufs = min_t(u32, hweight8(dbuf_slices), 3);
495 
496 	/* PM Demand only tracks active dbufs on pre-Xe3 platforms */
497 	if (DISPLAY_VER(display) >= 30)
498 		return;
499 
500 	mutex_lock(&display->pmdemand.lock);
501 	if (drm_WARN_ON(display->drm,
502 			!intel_pmdemand_check_prev_transaction(display)))
503 		goto unlock;
504 
505 	intel_de_rmw(display, XELPDP_INITIATE_PMDEMAND_REQUEST(0),
506 		     XELPDP_PMDEMAND_DBUFS_MASK,
507 		     REG_FIELD_PREP(XELPDP_PMDEMAND_DBUFS_MASK, dbufs));
508 	intel_de_rmw(display, XELPDP_INITIATE_PMDEMAND_REQUEST(1), 0,
509 		     XELPDP_PMDEMAND_REQ_ENABLE);
510 
511 	intel_pmdemand_wait(display);
512 
513 unlock:
514 	mutex_unlock(&display->pmdemand.lock);
515 }
516 
517 static void
518 intel_pmdemand_update_params(struct intel_display *display,
519 			     const struct intel_pmdemand_state *new,
520 			     const struct intel_pmdemand_state *old,
521 			     u32 *reg1, u32 *reg2, bool serialized)
522 {
523 	/*
524 	 * The pmdemand parameter updates happens in two steps. Pre plane and
525 	 * post plane updates. During the pre plane, as DE might still be
526 	 * handling with some old operations, to avoid unexpected performance
527 	 * issues, program the pmdemand parameters with higher of old and new
528 	 * values. And then after once settled, use the new parameter values
529 	 * as part of the post plane update.
530 	 *
531 	 * If the pmdemand params update happens without modeset allowed, this
532 	 * means we can't serialize the updates. So that implies possibility of
533 	 * some parallel atomic commits affecting the pmdemand parameters. In
534 	 * that case, we need to consider the current values from the register
535 	 * as well. So in pre-plane case, we need to check the max of old, new
536 	 * and current register value if not serialized. In post plane update
537 	 * we need to consider max of new and current register value if not
538 	 * serialized
539 	 */
540 
541 #define update_reg(reg, field, mask) do { \
542 	u32 current_val = serialized ? 0 : REG_FIELD_GET((mask), *(reg)); \
543 	u32 old_val = old ? old->params.field : 0; \
544 	u32 new_val = new->params.field; \
545 \
546 	*(reg) &= ~(mask); \
547 	*(reg) |= REG_FIELD_PREP((mask), max3(old_val, new_val, current_val)); \
548 } while (0)
549 
550 	/* Set 1*/
551 	update_reg(reg1, qclk_gv_bw, XELPDP_PMDEMAND_QCLK_GV_BW_MASK);
552 	update_reg(reg1, voltage_index, XELPDP_PMDEMAND_VOLTAGE_INDEX_MASK);
553 	update_reg(reg1, qclk_gv_index, XELPDP_PMDEMAND_QCLK_GV_INDEX_MASK);
554 	update_reg(reg1, active_phys, XELPDP_PMDEMAND_PHYS_MASK);
555 
556 	/* Set 2*/
557 	update_reg(reg2, cdclk_freq_mhz, XELPDP_PMDEMAND_CDCLK_FREQ_MASK);
558 	update_reg(reg2, ddiclk_max, XELPDP_PMDEMAND_DDICLK_FREQ_MASK);
559 	update_reg(reg2, plls, XELPDP_PMDEMAND_PLLS_MASK);
560 
561 	if (DISPLAY_VER(display) >= 30) {
562 		update_reg(reg1, active_pipes, XE3_PMDEMAND_PIPES_MASK);
563 	} else {
564 		update_reg(reg1, active_pipes, XELPDP_PMDEMAND_PIPES_MASK);
565 		update_reg(reg1, active_dbufs, XELPDP_PMDEMAND_DBUFS_MASK);
566 
567 		update_reg(reg2, scalers, XELPDP_PMDEMAND_SCALERS_MASK);
568 	}
569 
570 #undef update_reg
571 }
572 
573 static void
574 intel_pmdemand_program_params(struct intel_display *display,
575 			      const struct intel_pmdemand_state *new,
576 			      const struct intel_pmdemand_state *old,
577 			      bool serialized)
578 {
579 	bool changed = false;
580 	u32 reg1, mod_reg1;
581 	u32 reg2, mod_reg2;
582 
583 	mutex_lock(&display->pmdemand.lock);
584 	if (drm_WARN_ON(display->drm,
585 			!intel_pmdemand_check_prev_transaction(display)))
586 		goto unlock;
587 
588 	reg1 = intel_de_read(display, XELPDP_INITIATE_PMDEMAND_REQUEST(0));
589 	mod_reg1 = reg1;
590 
591 	reg2 = intel_de_read(display, XELPDP_INITIATE_PMDEMAND_REQUEST(1));
592 	mod_reg2 = reg2;
593 
594 	intel_pmdemand_update_params(display, new, old, &mod_reg1, &mod_reg2,
595 				     serialized);
596 
597 	if (reg1 != mod_reg1) {
598 		intel_de_write(display, XELPDP_INITIATE_PMDEMAND_REQUEST(0),
599 			       mod_reg1);
600 		changed = true;
601 	}
602 
603 	if (reg2 != mod_reg2) {
604 		intel_de_write(display, XELPDP_INITIATE_PMDEMAND_REQUEST(1),
605 			       mod_reg2);
606 		changed = true;
607 	}
608 
609 	/* Initiate pm demand request only if register values are changed */
610 	if (!changed)
611 		goto unlock;
612 
613 	drm_dbg_kms(display->drm,
614 		    "initiate pmdemand request values: (0x%x 0x%x)\n",
615 		    mod_reg1, mod_reg2);
616 
617 	intel_de_rmw(display, XELPDP_INITIATE_PMDEMAND_REQUEST(1), 0,
618 		     XELPDP_PMDEMAND_REQ_ENABLE);
619 
620 	intel_pmdemand_wait(display);
621 
622 unlock:
623 	mutex_unlock(&display->pmdemand.lock);
624 }
625 
626 static bool
627 intel_pmdemand_state_changed(const struct intel_pmdemand_state *new,
628 			     const struct intel_pmdemand_state *old)
629 {
630 	return memcmp(&new->params, &old->params, sizeof(new->params)) != 0;
631 }
632 
633 void intel_pmdemand_pre_plane_update(struct intel_atomic_state *state)
634 {
635 	struct intel_display *display = to_intel_display(state);
636 	const struct intel_pmdemand_state *new_pmdemand_state =
637 		intel_atomic_get_new_pmdemand_state(state);
638 	const struct intel_pmdemand_state *old_pmdemand_state =
639 		intel_atomic_get_old_pmdemand_state(state);
640 
641 	if (DISPLAY_VER(display) < 14)
642 		return;
643 
644 	if (!new_pmdemand_state ||
645 	    !intel_pmdemand_state_changed(new_pmdemand_state,
646 					  old_pmdemand_state))
647 		return;
648 
649 	WARN_ON(!new_pmdemand_state->base.changed);
650 
651 	intel_pmdemand_program_params(display, new_pmdemand_state,
652 				      old_pmdemand_state,
653 				      intel_atomic_global_state_is_serialized(state));
654 }
655 
656 void intel_pmdemand_post_plane_update(struct intel_atomic_state *state)
657 {
658 	struct intel_display *display = to_intel_display(state);
659 	const struct intel_pmdemand_state *new_pmdemand_state =
660 		intel_atomic_get_new_pmdemand_state(state);
661 	const struct intel_pmdemand_state *old_pmdemand_state =
662 		intel_atomic_get_old_pmdemand_state(state);
663 
664 	if (DISPLAY_VER(display) < 14)
665 		return;
666 
667 	if (!new_pmdemand_state ||
668 	    !intel_pmdemand_state_changed(new_pmdemand_state,
669 					  old_pmdemand_state))
670 		return;
671 
672 	WARN_ON(!new_pmdemand_state->base.changed);
673 
674 	intel_pmdemand_program_params(display, new_pmdemand_state, NULL,
675 				      intel_atomic_global_state_is_serialized(state));
676 }
677