xref: /linux/drivers/gpu/drm/i915/display/intel_pmdemand.c (revision 13c072b8e91a5ccb5855ca1ba6fe3ea467dbf94d)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2023 Intel Corporation
4  */
5 
6 #include <linux/bitops.h>
7 
8 #include <drm/drm_print.h>
9 
10 #include "intel_atomic.h"
11 #include "intel_bw.h"
12 #include "intel_cdclk.h"
13 #include "intel_de.h"
14 #include "intel_display_jiffies.h"
15 #include "intel_display_regs.h"
16 #include "intel_display_trace.h"
17 #include "intel_display_utils.h"
18 #include "intel_display_wa.h"
19 #include "intel_pmdemand.h"
20 #include "intel_step.h"
21 #include "skl_watermark.h"
22 
23 struct pmdemand_params {
24 	u16 qclk_gv_bw;
25 	u8 voltage_index;
26 	u8 qclk_gv_index;
27 	u8 active_pipes;
28 	u8 active_dbufs;	/* pre-Xe3 only */
29 	/* Total number of non type C active phys from active_phys_mask */
30 	u8 active_phys;
31 	u8 plls;
32 	u16 cdclk_freq_mhz;
33 	/* max from ddi_clocks[] */
34 	u16 ddiclk_max;
35 	u8 scalers;		/* pre-Xe3 only */
36 };
37 
38 struct intel_pmdemand_state {
39 	struct intel_global_state base;
40 
41 	/* Maintain a persistent list of port clocks across all crtcs */
42 	int ddi_clocks[I915_MAX_PIPES];
43 
44 	/* Maintain a persistent list of non type C phys mask */
45 	u16 active_combo_phys_mask;
46 
47 	/* Parameters to be configured in the pmdemand registers */
48 	struct pmdemand_params params;
49 };
50 
51 struct intel_pmdemand_state *to_intel_pmdemand_state(struct intel_global_state *obj_state)
52 {
53 	return container_of(obj_state, struct intel_pmdemand_state, base);
54 }
55 
56 static struct intel_global_state *
57 intel_pmdemand_duplicate_state(struct intel_global_obj *obj)
58 {
59 	struct intel_pmdemand_state *pmdemand_state;
60 
61 	pmdemand_state = kmemdup(obj->state, sizeof(*pmdemand_state), GFP_KERNEL);
62 	if (!pmdemand_state)
63 		return NULL;
64 
65 	return &pmdemand_state->base;
66 }
67 
68 static void intel_pmdemand_destroy_state(struct intel_global_obj *obj,
69 					 struct intel_global_state *state)
70 {
71 	kfree(state);
72 }
73 
74 static const struct intel_global_state_funcs intel_pmdemand_funcs = {
75 	.atomic_duplicate_state = intel_pmdemand_duplicate_state,
76 	.atomic_destroy_state = intel_pmdemand_destroy_state,
77 };
78 
79 static struct intel_pmdemand_state *
80 intel_atomic_get_pmdemand_state(struct intel_atomic_state *state)
81 {
82 	struct intel_display *display = to_intel_display(state);
83 	struct intel_global_state *pmdemand_state =
84 		intel_atomic_get_global_obj_state(state,
85 						  &display->pmdemand.obj);
86 
87 	if (IS_ERR(pmdemand_state))
88 		return ERR_CAST(pmdemand_state);
89 
90 	return to_intel_pmdemand_state(pmdemand_state);
91 }
92 
93 static struct intel_pmdemand_state *
94 intel_atomic_get_old_pmdemand_state(struct intel_atomic_state *state)
95 {
96 	struct intel_display *display = to_intel_display(state);
97 	struct intel_global_state *pmdemand_state =
98 		intel_atomic_get_old_global_obj_state(state,
99 						      &display->pmdemand.obj);
100 
101 	if (!pmdemand_state)
102 		return NULL;
103 
104 	return to_intel_pmdemand_state(pmdemand_state);
105 }
106 
107 static struct intel_pmdemand_state *
108 intel_atomic_get_new_pmdemand_state(struct intel_atomic_state *state)
109 {
110 	struct intel_display *display = to_intel_display(state);
111 	struct intel_global_state *pmdemand_state =
112 		intel_atomic_get_new_global_obj_state(state,
113 						      &display->pmdemand.obj);
114 
115 	if (!pmdemand_state)
116 		return NULL;
117 
118 	return to_intel_pmdemand_state(pmdemand_state);
119 }
120 
121 int intel_pmdemand_init(struct intel_display *display)
122 {
123 	struct intel_pmdemand_state *pmdemand_state;
124 
125 	pmdemand_state = kzalloc_obj(*pmdemand_state);
126 	if (!pmdemand_state)
127 		return -ENOMEM;
128 
129 	intel_atomic_global_obj_init(display, &display->pmdemand.obj,
130 				     &pmdemand_state->base,
131 				     &intel_pmdemand_funcs);
132 
133 	/* Wa_14016740474 */
134 	if (intel_display_wa(display, INTEL_DISPLAY_WA_14016740474))
135 		intel_de_rmw(display, XELPD_CHICKEN_DCPR_3, 0,
136 			     DMD_RSP_TIMEOUT_DISABLE);
137 
138 	return 0;
139 }
140 
141 void intel_pmdemand_init_early(struct intel_display *display)
142 {
143 	mutex_init(&display->pmdemand.lock);
144 	init_waitqueue_head(&display->pmdemand.waitqueue);
145 }
146 
147 void
148 intel_pmdemand_update_phys_mask(struct intel_display *display,
149 				struct intel_encoder *encoder,
150 				struct intel_pmdemand_state *pmdemand_state,
151 				bool set_bit)
152 {
153 	enum phy phy;
154 
155 	if (DISPLAY_VER(display) < 14)
156 		return;
157 
158 	if (!encoder)
159 		return;
160 
161 	if (intel_encoder_is_tc(encoder))
162 		return;
163 
164 	phy = intel_encoder_to_phy(encoder);
165 
166 	if (set_bit)
167 		pmdemand_state->active_combo_phys_mask |= BIT(phy);
168 	else
169 		pmdemand_state->active_combo_phys_mask &= ~BIT(phy);
170 }
171 
172 void
173 intel_pmdemand_update_port_clock(struct intel_display *display,
174 				 struct intel_pmdemand_state *pmdemand_state,
175 				 enum pipe pipe, int port_clock)
176 {
177 	if (DISPLAY_VER(display) < 14)
178 		return;
179 
180 	pmdemand_state->ddi_clocks[pipe] = port_clock;
181 }
182 
183 static void
184 intel_pmdemand_update_max_ddiclk(struct intel_display *display,
185 				 struct intel_atomic_state *state,
186 				 struct intel_pmdemand_state *pmdemand_state)
187 {
188 	int max_ddiclk = 0;
189 	const struct intel_crtc_state *new_crtc_state;
190 	struct intel_crtc *crtc;
191 	int i;
192 
193 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
194 		intel_pmdemand_update_port_clock(display, pmdemand_state,
195 						 crtc->pipe,
196 						 new_crtc_state->port_clock);
197 
198 	for (i = 0; i < ARRAY_SIZE(pmdemand_state->ddi_clocks); i++)
199 		max_ddiclk = max(pmdemand_state->ddi_clocks[i], max_ddiclk);
200 
201 	pmdemand_state->params.ddiclk_max = DIV_ROUND_UP(max_ddiclk, 1000);
202 }
203 
204 static void
205 intel_pmdemand_update_connector_phys(struct intel_display *display,
206 				     struct intel_atomic_state *state,
207 				     struct drm_connector_state *conn_state,
208 				     bool set_bit,
209 				     struct intel_pmdemand_state *pmdemand_state)
210 {
211 	struct intel_encoder *encoder = to_intel_encoder(conn_state->best_encoder);
212 	struct intel_crtc *crtc = to_intel_crtc(conn_state->crtc);
213 	struct intel_crtc_state *crtc_state;
214 
215 	if (!crtc)
216 		return;
217 
218 	if (set_bit)
219 		crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
220 	else
221 		crtc_state = intel_atomic_get_old_crtc_state(state, crtc);
222 
223 	if (!crtc_state->hw.active)
224 		return;
225 
226 	intel_pmdemand_update_phys_mask(display, encoder, pmdemand_state,
227 					set_bit);
228 }
229 
230 static void
231 intel_pmdemand_update_active_non_tc_phys(struct intel_display *display,
232 					 struct intel_atomic_state *state,
233 					 struct intel_pmdemand_state *pmdemand_state)
234 {
235 	struct drm_connector_state *old_conn_state;
236 	struct drm_connector_state *new_conn_state;
237 	struct drm_connector *connector;
238 	int i;
239 
240 	for_each_oldnew_connector_in_state(&state->base, connector,
241 					   old_conn_state, new_conn_state, i) {
242 		if (!intel_connector_needs_modeset(state, connector))
243 			continue;
244 
245 		/* First clear the active phys in the old connector state */
246 		intel_pmdemand_update_connector_phys(display, state,
247 						     old_conn_state, false,
248 						     pmdemand_state);
249 
250 		/* Then set the active phys in new connector state */
251 		intel_pmdemand_update_connector_phys(display, state,
252 						     new_conn_state, true,
253 						     pmdemand_state);
254 	}
255 
256 	pmdemand_state->params.active_phys =
257 		min_t(u16, hweight16(pmdemand_state->active_combo_phys_mask),
258 		      7);
259 }
260 
261 static bool
262 intel_pmdemand_encoder_has_tc_phy(struct intel_display *display,
263 				  struct intel_encoder *encoder)
264 {
265 	return encoder && intel_encoder_is_tc(encoder);
266 }
267 
268 static bool
269 intel_pmdemand_connector_needs_update(struct intel_atomic_state *state)
270 {
271 	struct intel_display *display = to_intel_display(state);
272 	struct drm_connector_state *old_conn_state;
273 	struct drm_connector_state *new_conn_state;
274 	struct drm_connector *connector;
275 	int i;
276 
277 	for_each_oldnew_connector_in_state(&state->base, connector,
278 					   old_conn_state, new_conn_state, i) {
279 		struct intel_encoder *old_encoder =
280 			to_intel_encoder(old_conn_state->best_encoder);
281 		struct intel_encoder *new_encoder =
282 			to_intel_encoder(new_conn_state->best_encoder);
283 
284 		if (!intel_connector_needs_modeset(state, connector))
285 			continue;
286 
287 		if (old_encoder == new_encoder ||
288 		    (intel_pmdemand_encoder_has_tc_phy(display, old_encoder) &&
289 		     intel_pmdemand_encoder_has_tc_phy(display, new_encoder)))
290 			continue;
291 
292 		return true;
293 	}
294 
295 	return false;
296 }
297 
298 static bool intel_pmdemand_needs_update(struct intel_atomic_state *state)
299 {
300 	const struct intel_crtc_state *new_crtc_state, *old_crtc_state;
301 	struct intel_crtc *crtc;
302 	int i;
303 
304 	if (intel_bw_pmdemand_needs_update(state))
305 		return true;
306 
307 	if (intel_dbuf_pmdemand_needs_update(state))
308 		return true;
309 
310 	if (intel_cdclk_pmdemand_needs_update(state))
311 		return true;
312 
313 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
314 					    new_crtc_state, i)
315 		if (new_crtc_state->port_clock != old_crtc_state->port_clock)
316 			return true;
317 
318 	return intel_pmdemand_connector_needs_update(state);
319 }
320 
321 int intel_pmdemand_atomic_check(struct intel_atomic_state *state)
322 {
323 	struct intel_display *display = to_intel_display(state);
324 	const struct intel_bw_state *new_bw_state;
325 	const struct intel_cdclk_state *new_cdclk_state;
326 	const struct intel_dbuf_state *new_dbuf_state;
327 	struct intel_pmdemand_state *new_pmdemand_state;
328 
329 	if (DISPLAY_VER(display) < 14)
330 		return 0;
331 
332 	if (!intel_pmdemand_needs_update(state))
333 		return 0;
334 
335 	new_pmdemand_state = intel_atomic_get_pmdemand_state(state);
336 	if (IS_ERR(new_pmdemand_state))
337 		return PTR_ERR(new_pmdemand_state);
338 
339 	new_bw_state = intel_atomic_get_bw_state(state);
340 	if (IS_ERR(new_bw_state))
341 		return PTR_ERR(new_bw_state);
342 
343 	/* firmware will calculate the qclk_gv_index, requirement is set to 0 */
344 	new_pmdemand_state->params.qclk_gv_index = 0;
345 	new_pmdemand_state->params.qclk_gv_bw = intel_bw_qgv_point_peakbw(new_bw_state);
346 
347 	new_dbuf_state = intel_atomic_get_dbuf_state(state);
348 	if (IS_ERR(new_dbuf_state))
349 		return PTR_ERR(new_dbuf_state);
350 
351 	if (DISPLAY_VER(display) < 30) {
352 		new_pmdemand_state->params.active_dbufs =
353 			min_t(u8, intel_dbuf_num_enabled_slices(new_dbuf_state), 3);
354 		new_pmdemand_state->params.active_pipes =
355 			min_t(u8, intel_dbuf_num_active_pipes(new_dbuf_state), 3);
356 	} else {
357 		new_pmdemand_state->params.active_pipes =
358 			min_t(u8, intel_dbuf_num_active_pipes(new_dbuf_state), INTEL_NUM_PIPES(display));
359 	}
360 
361 	new_cdclk_state = intel_atomic_get_cdclk_state(state);
362 	if (IS_ERR(new_cdclk_state))
363 		return PTR_ERR(new_cdclk_state);
364 
365 	new_pmdemand_state->params.voltage_index =
366 		intel_cdclk_actual_voltage_level(new_cdclk_state);
367 	new_pmdemand_state->params.cdclk_freq_mhz =
368 		DIV_ROUND_UP(intel_cdclk_actual(new_cdclk_state), 1000);
369 
370 	intel_pmdemand_update_max_ddiclk(display, state, new_pmdemand_state);
371 
372 	intel_pmdemand_update_active_non_tc_phys(display, state, new_pmdemand_state);
373 
374 	/*
375 	 * Active_PLLs starts with 1 because of CDCLK PLL.
376 	 * TODO: Missing to account genlock filter when it gets used.
377 	 */
378 	new_pmdemand_state->params.plls =
379 		min_t(u16, new_pmdemand_state->params.active_phys + 1, 7);
380 
381 	/*
382 	 * Setting scalers to max as it can not be calculated during flips and
383 	 * fastsets without taking global states locks.
384 	 */
385 	new_pmdemand_state->params.scalers = 7;
386 
387 	if (state->base.allow_modeset)
388 		return intel_atomic_serialize_global_state(&new_pmdemand_state->base);
389 	else
390 		return intel_atomic_lock_global_state(&new_pmdemand_state->base);
391 }
392 
393 static bool intel_pmdemand_check_prev_transaction(struct intel_display *display)
394 {
395 	return !(intel_de_wait_for_clear_ms(display,
396 					    XELPDP_INITIATE_PMDEMAND_REQUEST(1),
397 					    XELPDP_PMDEMAND_REQ_ENABLE, 10) ||
398 		 intel_de_wait_for_clear_ms(display,
399 					    GEN12_DCPR_STATUS_1,
400 					    XELPDP_PMDEMAND_INFLIGHT_STATUS, 10));
401 }
402 
403 void
404 intel_pmdemand_init_pmdemand_params(struct intel_display *display,
405 				    struct intel_pmdemand_state *pmdemand_state)
406 {
407 	u32 reg1, reg2;
408 
409 	if (DISPLAY_VER(display) < 14)
410 		return;
411 
412 	mutex_lock(&display->pmdemand.lock);
413 	if (drm_WARN_ON(display->drm,
414 			!intel_pmdemand_check_prev_transaction(display))) {
415 		memset(&pmdemand_state->params, 0,
416 		       sizeof(pmdemand_state->params));
417 		goto unlock;
418 	}
419 
420 	reg1 = intel_de_read(display, XELPDP_INITIATE_PMDEMAND_REQUEST(0));
421 
422 	reg2 = intel_de_read(display, XELPDP_INITIATE_PMDEMAND_REQUEST(1));
423 
424 	pmdemand_state->params.qclk_gv_bw =
425 		REG_FIELD_GET(XELPDP_PMDEMAND_QCLK_GV_BW_MASK, reg1);
426 	pmdemand_state->params.voltage_index =
427 		REG_FIELD_GET(XELPDP_PMDEMAND_VOLTAGE_INDEX_MASK, reg1);
428 	pmdemand_state->params.qclk_gv_index =
429 		REG_FIELD_GET(XELPDP_PMDEMAND_QCLK_GV_INDEX_MASK, reg1);
430 	pmdemand_state->params.active_phys =
431 		REG_FIELD_GET(XELPDP_PMDEMAND_PHYS_MASK, reg1);
432 
433 	pmdemand_state->params.cdclk_freq_mhz =
434 		REG_FIELD_GET(XELPDP_PMDEMAND_CDCLK_FREQ_MASK, reg2);
435 	pmdemand_state->params.ddiclk_max =
436 		REG_FIELD_GET(XELPDP_PMDEMAND_DDICLK_FREQ_MASK, reg2);
437 
438 	if (DISPLAY_VER(display) >= 30) {
439 		pmdemand_state->params.active_pipes =
440 			REG_FIELD_GET(XE3_PMDEMAND_PIPES_MASK, reg1);
441 	} else {
442 		pmdemand_state->params.active_pipes =
443 			REG_FIELD_GET(XELPDP_PMDEMAND_PIPES_MASK, reg1);
444 		pmdemand_state->params.active_dbufs =
445 			REG_FIELD_GET(XELPDP_PMDEMAND_DBUFS_MASK, reg1);
446 
447 		pmdemand_state->params.scalers =
448 			REG_FIELD_GET(XELPDP_PMDEMAND_SCALERS_MASK, reg2);
449 	}
450 
451 unlock:
452 	mutex_unlock(&display->pmdemand.lock);
453 }
454 
455 static bool intel_pmdemand_req_complete(struct intel_display *display)
456 {
457 	return !(intel_de_read(display, XELPDP_INITIATE_PMDEMAND_REQUEST(1)) &
458 		 XELPDP_PMDEMAND_REQ_ENABLE);
459 }
460 
461 static void intel_pmdemand_poll(struct intel_display *display)
462 {
463 	const unsigned int timeout_ms = 10;
464 	u32 status;
465 	int ret;
466 
467 	ret = intel_de_wait_ms(display, XELPDP_INITIATE_PMDEMAND_REQUEST(1),
468 			       XELPDP_PMDEMAND_REQ_ENABLE, 0,
469 			       timeout_ms, &status);
470 
471 	if (ret == -ETIMEDOUT)
472 		drm_err(display->drm,
473 			"timed out waiting for Punit PM Demand Response within %ums (status 0x%08x)\n",
474 			timeout_ms, status);
475 }
476 
477 static void intel_pmdemand_wait(struct intel_display *display)
478 {
479 	/* Wa_14024400148 For lnl use polling method */
480 	if (DISPLAY_VER(display) == 20) {
481 		intel_pmdemand_poll(display);
482 	} else {
483 		if (!wait_event_timeout(display->pmdemand.waitqueue,
484 					intel_pmdemand_req_complete(display),
485 					msecs_to_jiffies_timeout(10)))
486 			drm_err(display->drm,
487 				"timed out waiting for Punit PM Demand Response\n");
488 	}
489 }
490 
491 /* Required to be programmed during Display Init Sequences. */
492 void intel_pmdemand_program_dbuf(struct intel_display *display,
493 				 u8 dbuf_slices)
494 {
495 	u32 dbufs = min_t(u32, hweight8(dbuf_slices), 3);
496 
497 	/* PM Demand only tracks active dbufs on pre-Xe3 platforms */
498 	if (DISPLAY_VER(display) >= 30)
499 		return;
500 
501 	mutex_lock(&display->pmdemand.lock);
502 	if (drm_WARN_ON(display->drm,
503 			!intel_pmdemand_check_prev_transaction(display)))
504 		goto unlock;
505 
506 	intel_de_rmw(display, XELPDP_INITIATE_PMDEMAND_REQUEST(0),
507 		     XELPDP_PMDEMAND_DBUFS_MASK,
508 		     REG_FIELD_PREP(XELPDP_PMDEMAND_DBUFS_MASK, dbufs));
509 	intel_de_rmw(display, XELPDP_INITIATE_PMDEMAND_REQUEST(1), 0,
510 		     XELPDP_PMDEMAND_REQ_ENABLE);
511 
512 	intel_pmdemand_wait(display);
513 
514 unlock:
515 	mutex_unlock(&display->pmdemand.lock);
516 }
517 
518 static void
519 intel_pmdemand_update_params(struct intel_display *display,
520 			     const struct intel_pmdemand_state *new,
521 			     const struct intel_pmdemand_state *old,
522 			     u32 *reg1, u32 *reg2, bool serialized)
523 {
524 	/*
525 	 * The pmdemand parameter updates happens in two steps. Pre plane and
526 	 * post plane updates. During the pre plane, as DE might still be
527 	 * handling with some old operations, to avoid unexpected performance
528 	 * issues, program the pmdemand parameters with higher of old and new
529 	 * values. And then after once settled, use the new parameter values
530 	 * as part of the post plane update.
531 	 *
532 	 * If the pmdemand params update happens without modeset allowed, this
533 	 * means we can't serialize the updates. So that implies possibility of
534 	 * some parallel atomic commits affecting the pmdemand parameters. In
535 	 * that case, we need to consider the current values from the register
536 	 * as well. So in pre-plane case, we need to check the max of old, new
537 	 * and current register value if not serialized. In post plane update
538 	 * we need to consider max of new and current register value if not
539 	 * serialized
540 	 */
541 
542 #define update_reg(reg, field, mask) do { \
543 	u32 current_val = serialized ? 0 : REG_FIELD_GET((mask), *(reg)); \
544 	u32 old_val = old ? old->params.field : 0; \
545 	u32 new_val = new->params.field; \
546 \
547 	*(reg) &= ~(mask); \
548 	*(reg) |= REG_FIELD_PREP((mask), max3(old_val, new_val, current_val)); \
549 } while (0)
550 
551 	/* Set 1*/
552 	update_reg(reg1, qclk_gv_bw, XELPDP_PMDEMAND_QCLK_GV_BW_MASK);
553 	update_reg(reg1, voltage_index, XELPDP_PMDEMAND_VOLTAGE_INDEX_MASK);
554 	update_reg(reg1, qclk_gv_index, XELPDP_PMDEMAND_QCLK_GV_INDEX_MASK);
555 	update_reg(reg1, active_phys, XELPDP_PMDEMAND_PHYS_MASK);
556 
557 	/* Set 2*/
558 	update_reg(reg2, cdclk_freq_mhz, XELPDP_PMDEMAND_CDCLK_FREQ_MASK);
559 	update_reg(reg2, ddiclk_max, XELPDP_PMDEMAND_DDICLK_FREQ_MASK);
560 	update_reg(reg2, plls, XELPDP_PMDEMAND_PLLS_MASK);
561 
562 	if (DISPLAY_VER(display) >= 30) {
563 		update_reg(reg1, active_pipes, XE3_PMDEMAND_PIPES_MASK);
564 	} else {
565 		update_reg(reg1, active_pipes, XELPDP_PMDEMAND_PIPES_MASK);
566 		update_reg(reg1, active_dbufs, XELPDP_PMDEMAND_DBUFS_MASK);
567 
568 		update_reg(reg2, scalers, XELPDP_PMDEMAND_SCALERS_MASK);
569 	}
570 
571 #undef update_reg
572 }
573 
574 static void
575 intel_pmdemand_program_params(struct intel_display *display,
576 			      const struct intel_pmdemand_state *new,
577 			      const struct intel_pmdemand_state *old,
578 			      bool serialized)
579 {
580 	bool changed = false;
581 	u32 reg1, mod_reg1;
582 	u32 reg2, mod_reg2;
583 
584 	mutex_lock(&display->pmdemand.lock);
585 	if (drm_WARN_ON(display->drm,
586 			!intel_pmdemand_check_prev_transaction(display)))
587 		goto unlock;
588 
589 	reg1 = intel_de_read(display, XELPDP_INITIATE_PMDEMAND_REQUEST(0));
590 	mod_reg1 = reg1;
591 
592 	reg2 = intel_de_read(display, XELPDP_INITIATE_PMDEMAND_REQUEST(1));
593 	mod_reg2 = reg2;
594 
595 	intel_pmdemand_update_params(display, new, old, &mod_reg1, &mod_reg2,
596 				     serialized);
597 
598 	if (reg1 != mod_reg1) {
599 		intel_de_write(display, XELPDP_INITIATE_PMDEMAND_REQUEST(0),
600 			       mod_reg1);
601 		changed = true;
602 	}
603 
604 	if (reg2 != mod_reg2) {
605 		intel_de_write(display, XELPDP_INITIATE_PMDEMAND_REQUEST(1),
606 			       mod_reg2);
607 		changed = true;
608 	}
609 
610 	/* Initiate pm demand request only if register values are changed */
611 	if (!changed)
612 		goto unlock;
613 
614 	drm_dbg_kms(display->drm,
615 		    "initiate pmdemand request values: (0x%x 0x%x)\n",
616 		    mod_reg1, mod_reg2);
617 
618 	intel_de_rmw(display, XELPDP_INITIATE_PMDEMAND_REQUEST(1), 0,
619 		     XELPDP_PMDEMAND_REQ_ENABLE);
620 
621 	intel_pmdemand_wait(display);
622 
623 unlock:
624 	mutex_unlock(&display->pmdemand.lock);
625 }
626 
627 static bool
628 intel_pmdemand_state_changed(const struct intel_pmdemand_state *new,
629 			     const struct intel_pmdemand_state *old)
630 {
631 	return memcmp(&new->params, &old->params, sizeof(new->params)) != 0;
632 }
633 
634 void intel_pmdemand_pre_plane_update(struct intel_atomic_state *state)
635 {
636 	struct intel_display *display = to_intel_display(state);
637 	const struct intel_pmdemand_state *new_pmdemand_state =
638 		intel_atomic_get_new_pmdemand_state(state);
639 	const struct intel_pmdemand_state *old_pmdemand_state =
640 		intel_atomic_get_old_pmdemand_state(state);
641 
642 	if (DISPLAY_VER(display) < 14)
643 		return;
644 
645 	if (!new_pmdemand_state ||
646 	    !intel_pmdemand_state_changed(new_pmdemand_state,
647 					  old_pmdemand_state))
648 		return;
649 
650 	WARN_ON(!new_pmdemand_state->base.changed);
651 
652 	intel_pmdemand_program_params(display, new_pmdemand_state,
653 				      old_pmdemand_state,
654 				      intel_atomic_global_state_is_serialized(state));
655 }
656 
657 void intel_pmdemand_post_plane_update(struct intel_atomic_state *state)
658 {
659 	struct intel_display *display = to_intel_display(state);
660 	const struct intel_pmdemand_state *new_pmdemand_state =
661 		intel_atomic_get_new_pmdemand_state(state);
662 	const struct intel_pmdemand_state *old_pmdemand_state =
663 		intel_atomic_get_old_pmdemand_state(state);
664 
665 	if (DISPLAY_VER(display) < 14)
666 		return;
667 
668 	if (!new_pmdemand_state ||
669 	    !intel_pmdemand_state_changed(new_pmdemand_state,
670 					  old_pmdemand_state))
671 		return;
672 
673 	WARN_ON(!new_pmdemand_state->base.changed);
674 
675 	intel_pmdemand_program_params(display, new_pmdemand_state, NULL,
676 				      intel_atomic_global_state_is_serialized(state));
677 }
678