xref: /linux/drivers/gpu/drm/i915/display/intel_pmdemand.c (revision 7f71507851fc7764b36a3221839607d3a45c2025)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2023 Intel Corporation
4  */
5 
6 #include <linux/bitops.h>
7 
8 #include "i915_drv.h"
9 #include "i915_reg.h"
10 #include "intel_atomic.h"
11 #include "intel_bw.h"
12 #include "intel_cdclk.h"
13 #include "intel_de.h"
14 #include "intel_display_trace.h"
15 #include "intel_pmdemand.h"
16 #include "skl_watermark.h"
17 
18 static struct intel_global_state *
19 intel_pmdemand_duplicate_state(struct intel_global_obj *obj)
20 {
21 	struct intel_pmdemand_state *pmdemand_state;
22 
23 	pmdemand_state = kmemdup(obj->state, sizeof(*pmdemand_state), GFP_KERNEL);
24 	if (!pmdemand_state)
25 		return NULL;
26 
27 	return &pmdemand_state->base;
28 }
29 
30 static void intel_pmdemand_destroy_state(struct intel_global_obj *obj,
31 					 struct intel_global_state *state)
32 {
33 	kfree(state);
34 }
35 
36 static const struct intel_global_state_funcs intel_pmdemand_funcs = {
37 	.atomic_duplicate_state = intel_pmdemand_duplicate_state,
38 	.atomic_destroy_state = intel_pmdemand_destroy_state,
39 };
40 
41 static struct intel_pmdemand_state *
42 intel_atomic_get_pmdemand_state(struct intel_atomic_state *state)
43 {
44 	struct drm_i915_private *i915 = to_i915(state->base.dev);
45 	struct intel_global_state *pmdemand_state =
46 		intel_atomic_get_global_obj_state(state,
47 						  &i915->display.pmdemand.obj);
48 
49 	if (IS_ERR(pmdemand_state))
50 		return ERR_CAST(pmdemand_state);
51 
52 	return to_intel_pmdemand_state(pmdemand_state);
53 }
54 
55 static struct intel_pmdemand_state *
56 intel_atomic_get_old_pmdemand_state(struct intel_atomic_state *state)
57 {
58 	struct drm_i915_private *i915 = to_i915(state->base.dev);
59 	struct intel_global_state *pmdemand_state =
60 		intel_atomic_get_old_global_obj_state(state,
61 						      &i915->display.pmdemand.obj);
62 
63 	if (!pmdemand_state)
64 		return NULL;
65 
66 	return to_intel_pmdemand_state(pmdemand_state);
67 }
68 
69 static struct intel_pmdemand_state *
70 intel_atomic_get_new_pmdemand_state(struct intel_atomic_state *state)
71 {
72 	struct drm_i915_private *i915 = to_i915(state->base.dev);
73 	struct intel_global_state *pmdemand_state =
74 		intel_atomic_get_new_global_obj_state(state,
75 						      &i915->display.pmdemand.obj);
76 
77 	if (!pmdemand_state)
78 		return NULL;
79 
80 	return to_intel_pmdemand_state(pmdemand_state);
81 }
82 
83 int intel_pmdemand_init(struct drm_i915_private *i915)
84 {
85 	struct intel_pmdemand_state *pmdemand_state;
86 
87 	pmdemand_state = kzalloc(sizeof(*pmdemand_state), GFP_KERNEL);
88 	if (!pmdemand_state)
89 		return -ENOMEM;
90 
91 	intel_atomic_global_obj_init(i915, &i915->display.pmdemand.obj,
92 				     &pmdemand_state->base,
93 				     &intel_pmdemand_funcs);
94 
95 	if (IS_DISPLAY_VERx100_STEP(i915, 1400, STEP_A0, STEP_C0))
96 		/* Wa_14016740474 */
97 		intel_de_rmw(i915, XELPD_CHICKEN_DCPR_3, 0, DMD_RSP_TIMEOUT_DISABLE);
98 
99 	return 0;
100 }
101 
102 void intel_pmdemand_init_early(struct drm_i915_private *i915)
103 {
104 	mutex_init(&i915->display.pmdemand.lock);
105 	init_waitqueue_head(&i915->display.pmdemand.waitqueue);
106 }
107 
108 void
109 intel_pmdemand_update_phys_mask(struct drm_i915_private *i915,
110 				struct intel_encoder *encoder,
111 				struct intel_pmdemand_state *pmdemand_state,
112 				bool set_bit)
113 {
114 	enum phy phy;
115 
116 	if (DISPLAY_VER(i915) < 14)
117 		return;
118 
119 	if (!encoder)
120 		return;
121 
122 	if (intel_encoder_is_tc(encoder))
123 		return;
124 
125 	phy = intel_encoder_to_phy(encoder);
126 
127 	if (set_bit)
128 		pmdemand_state->active_combo_phys_mask |= BIT(phy);
129 	else
130 		pmdemand_state->active_combo_phys_mask &= ~BIT(phy);
131 }
132 
133 void
134 intel_pmdemand_update_port_clock(struct drm_i915_private *i915,
135 				 struct intel_pmdemand_state *pmdemand_state,
136 				 enum pipe pipe, int port_clock)
137 {
138 	if (DISPLAY_VER(i915) < 14)
139 		return;
140 
141 	pmdemand_state->ddi_clocks[pipe] = port_clock;
142 }
143 
144 static void
145 intel_pmdemand_update_max_ddiclk(struct drm_i915_private *i915,
146 				 struct intel_atomic_state *state,
147 				 struct intel_pmdemand_state *pmdemand_state)
148 {
149 	int max_ddiclk = 0;
150 	const struct intel_crtc_state *new_crtc_state;
151 	struct intel_crtc *crtc;
152 	int i;
153 
154 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
155 		intel_pmdemand_update_port_clock(i915, pmdemand_state,
156 						 crtc->pipe,
157 						 new_crtc_state->port_clock);
158 
159 	for (i = 0; i < ARRAY_SIZE(pmdemand_state->ddi_clocks); i++)
160 		max_ddiclk = max(pmdemand_state->ddi_clocks[i], max_ddiclk);
161 
162 	pmdemand_state->params.ddiclk_max = DIV_ROUND_UP(max_ddiclk, 1000);
163 }
164 
165 static void
166 intel_pmdemand_update_connector_phys(struct drm_i915_private *i915,
167 				     struct intel_atomic_state *state,
168 				     struct drm_connector_state *conn_state,
169 				     bool set_bit,
170 				     struct intel_pmdemand_state *pmdemand_state)
171 {
172 	struct intel_encoder *encoder = to_intel_encoder(conn_state->best_encoder);
173 	struct intel_crtc *crtc = to_intel_crtc(conn_state->crtc);
174 	struct intel_crtc_state *crtc_state;
175 
176 	if (!crtc)
177 		return;
178 
179 	if (set_bit)
180 		crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
181 	else
182 		crtc_state = intel_atomic_get_old_crtc_state(state, crtc);
183 
184 	if (!crtc_state->hw.active)
185 		return;
186 
187 	intel_pmdemand_update_phys_mask(i915, encoder, pmdemand_state,
188 					set_bit);
189 }
190 
191 static void
192 intel_pmdemand_update_active_non_tc_phys(struct drm_i915_private *i915,
193 					 struct intel_atomic_state *state,
194 					 struct intel_pmdemand_state *pmdemand_state)
195 {
196 	struct drm_connector_state *old_conn_state;
197 	struct drm_connector_state *new_conn_state;
198 	struct drm_connector *connector;
199 	int i;
200 
201 	for_each_oldnew_connector_in_state(&state->base, connector,
202 					   old_conn_state, new_conn_state, i) {
203 		if (!intel_connector_needs_modeset(state, connector))
204 			continue;
205 
206 		/* First clear the active phys in the old connector state */
207 		intel_pmdemand_update_connector_phys(i915, state,
208 						     old_conn_state, false,
209 						     pmdemand_state);
210 
211 		/* Then set the active phys in new connector state */
212 		intel_pmdemand_update_connector_phys(i915, state,
213 						     new_conn_state, true,
214 						     pmdemand_state);
215 	}
216 
217 	pmdemand_state->params.active_phys =
218 		min_t(u16, hweight16(pmdemand_state->active_combo_phys_mask),
219 		      7);
220 }
221 
222 static bool
223 intel_pmdemand_encoder_has_tc_phy(struct drm_i915_private *i915,
224 				  struct intel_encoder *encoder)
225 {
226 	return encoder && intel_encoder_is_tc(encoder);
227 }
228 
229 static bool
230 intel_pmdemand_connector_needs_update(struct intel_atomic_state *state)
231 {
232 	struct drm_i915_private *i915 = to_i915(state->base.dev);
233 	struct drm_connector_state *old_conn_state;
234 	struct drm_connector_state *new_conn_state;
235 	struct drm_connector *connector;
236 	int i;
237 
238 	for_each_oldnew_connector_in_state(&state->base, connector,
239 					   old_conn_state, new_conn_state, i) {
240 		struct intel_encoder *old_encoder =
241 			to_intel_encoder(old_conn_state->best_encoder);
242 		struct intel_encoder *new_encoder =
243 			to_intel_encoder(new_conn_state->best_encoder);
244 
245 		if (!intel_connector_needs_modeset(state, connector))
246 			continue;
247 
248 		if (old_encoder == new_encoder ||
249 		    (intel_pmdemand_encoder_has_tc_phy(i915, old_encoder) &&
250 		     intel_pmdemand_encoder_has_tc_phy(i915, new_encoder)))
251 			continue;
252 
253 		return true;
254 	}
255 
256 	return false;
257 }
258 
259 static bool intel_pmdemand_needs_update(struct intel_atomic_state *state)
260 {
261 	struct intel_display *display = to_intel_display(state);
262 	const struct intel_bw_state *new_bw_state, *old_bw_state;
263 	const struct intel_cdclk_state *new_cdclk_state, *old_cdclk_state;
264 	const struct intel_crtc_state *new_crtc_state, *old_crtc_state;
265 	const struct intel_dbuf_state *new_dbuf_state, *old_dbuf_state;
266 	struct intel_crtc *crtc;
267 	int i;
268 
269 	new_bw_state = intel_atomic_get_new_bw_state(state);
270 	old_bw_state = intel_atomic_get_old_bw_state(state);
271 	if (new_bw_state && new_bw_state->qgv_point_peakbw !=
272 	    old_bw_state->qgv_point_peakbw)
273 		return true;
274 
275 	new_dbuf_state = intel_atomic_get_new_dbuf_state(state);
276 	old_dbuf_state = intel_atomic_get_old_dbuf_state(state);
277 	if (new_dbuf_state &&
278 	    new_dbuf_state->active_pipes != old_dbuf_state->active_pipes)
279 		return true;
280 
281 	if (DISPLAY_VER(display) < 30) {
282 		if (new_dbuf_state &&
283 		    new_dbuf_state->enabled_slices !=
284 		    old_dbuf_state->enabled_slices)
285 			return true;
286 	}
287 
288 	new_cdclk_state = intel_atomic_get_new_cdclk_state(state);
289 	old_cdclk_state = intel_atomic_get_old_cdclk_state(state);
290 	if (new_cdclk_state &&
291 	    (new_cdclk_state->actual.cdclk !=
292 	     old_cdclk_state->actual.cdclk ||
293 	     new_cdclk_state->actual.voltage_level !=
294 	     old_cdclk_state->actual.voltage_level))
295 		return true;
296 
297 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
298 					    new_crtc_state, i)
299 		if (new_crtc_state->port_clock != old_crtc_state->port_clock)
300 			return true;
301 
302 	return intel_pmdemand_connector_needs_update(state);
303 }
304 
305 int intel_pmdemand_atomic_check(struct intel_atomic_state *state)
306 {
307 	struct drm_i915_private *i915 = to_i915(state->base.dev);
308 	const struct intel_bw_state *new_bw_state;
309 	const struct intel_cdclk_state *new_cdclk_state;
310 	const struct intel_dbuf_state *new_dbuf_state;
311 	struct intel_pmdemand_state *new_pmdemand_state;
312 
313 	if (DISPLAY_VER(i915) < 14)
314 		return 0;
315 
316 	if (!intel_pmdemand_needs_update(state))
317 		return 0;
318 
319 	new_pmdemand_state = intel_atomic_get_pmdemand_state(state);
320 	if (IS_ERR(new_pmdemand_state))
321 		return PTR_ERR(new_pmdemand_state);
322 
323 	new_bw_state = intel_atomic_get_bw_state(state);
324 	if (IS_ERR(new_bw_state))
325 		return PTR_ERR(new_bw_state);
326 
327 	/* firmware will calculate the qclk_gv_index, requirement is set to 0 */
328 	new_pmdemand_state->params.qclk_gv_index = 0;
329 	new_pmdemand_state->params.qclk_gv_bw = new_bw_state->qgv_point_peakbw;
330 
331 	new_dbuf_state = intel_atomic_get_dbuf_state(state);
332 	if (IS_ERR(new_dbuf_state))
333 		return PTR_ERR(new_dbuf_state);
334 
335 	if (DISPLAY_VER(i915) < 30) {
336 		new_pmdemand_state->params.active_dbufs =
337 			min_t(u8, hweight8(new_dbuf_state->enabled_slices), 3);
338 		new_pmdemand_state->params.active_pipes =
339 			min_t(u8, hweight8(new_dbuf_state->active_pipes), 3);
340 	} else {
341 		new_pmdemand_state->params.active_pipes =
342 			min_t(u8, hweight8(new_dbuf_state->active_pipes), INTEL_NUM_PIPES(i915));
343 	}
344 
345 	new_cdclk_state = intel_atomic_get_cdclk_state(state);
346 	if (IS_ERR(new_cdclk_state))
347 		return PTR_ERR(new_cdclk_state);
348 
349 	new_pmdemand_state->params.voltage_index =
350 		new_cdclk_state->actual.voltage_level;
351 	new_pmdemand_state->params.cdclk_freq_mhz =
352 		DIV_ROUND_UP(new_cdclk_state->actual.cdclk, 1000);
353 
354 	intel_pmdemand_update_max_ddiclk(i915, state, new_pmdemand_state);
355 
356 	intel_pmdemand_update_active_non_tc_phys(i915, state, new_pmdemand_state);
357 
358 	/*
359 	 * Active_PLLs starts with 1 because of CDCLK PLL.
360 	 * TODO: Missing to account genlock filter when it gets used.
361 	 */
362 	new_pmdemand_state->params.plls =
363 		min_t(u16, new_pmdemand_state->params.active_phys + 1, 7);
364 
365 	/*
366 	 * Setting scalers to max as it can not be calculated during flips and
367 	 * fastsets without taking global states locks.
368 	 */
369 	new_pmdemand_state->params.scalers = 7;
370 
371 	if (state->base.allow_modeset)
372 		return intel_atomic_serialize_global_state(&new_pmdemand_state->base);
373 	else
374 		return intel_atomic_lock_global_state(&new_pmdemand_state->base);
375 }
376 
377 static bool intel_pmdemand_check_prev_transaction(struct drm_i915_private *i915)
378 {
379 	return !(intel_de_wait_for_clear(i915,
380 					 XELPDP_INITIATE_PMDEMAND_REQUEST(1),
381 					 XELPDP_PMDEMAND_REQ_ENABLE, 10) ||
382 		 intel_de_wait_for_clear(i915,
383 					 GEN12_DCPR_STATUS_1,
384 					 XELPDP_PMDEMAND_INFLIGHT_STATUS, 10));
385 }
386 
387 void
388 intel_pmdemand_init_pmdemand_params(struct drm_i915_private *i915,
389 				    struct intel_pmdemand_state *pmdemand_state)
390 {
391 	u32 reg1, reg2;
392 
393 	if (DISPLAY_VER(i915) < 14)
394 		return;
395 
396 	mutex_lock(&i915->display.pmdemand.lock);
397 	if (drm_WARN_ON(&i915->drm,
398 			!intel_pmdemand_check_prev_transaction(i915))) {
399 		memset(&pmdemand_state->params, 0,
400 		       sizeof(pmdemand_state->params));
401 		goto unlock;
402 	}
403 
404 	reg1 = intel_de_read(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(0));
405 
406 	reg2 = intel_de_read(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(1));
407 
408 	pmdemand_state->params.qclk_gv_bw =
409 		REG_FIELD_GET(XELPDP_PMDEMAND_QCLK_GV_BW_MASK, reg1);
410 	pmdemand_state->params.voltage_index =
411 		REG_FIELD_GET(XELPDP_PMDEMAND_VOLTAGE_INDEX_MASK, reg1);
412 	pmdemand_state->params.qclk_gv_index =
413 		REG_FIELD_GET(XELPDP_PMDEMAND_QCLK_GV_INDEX_MASK, reg1);
414 	pmdemand_state->params.active_phys =
415 		REG_FIELD_GET(XELPDP_PMDEMAND_PHYS_MASK, reg1);
416 
417 	pmdemand_state->params.cdclk_freq_mhz =
418 		REG_FIELD_GET(XELPDP_PMDEMAND_CDCLK_FREQ_MASK, reg2);
419 	pmdemand_state->params.ddiclk_max =
420 		REG_FIELD_GET(XELPDP_PMDEMAND_DDICLK_FREQ_MASK, reg2);
421 
422 	if (DISPLAY_VER(i915) >= 30) {
423 		pmdemand_state->params.active_pipes =
424 			REG_FIELD_GET(XE3_PMDEMAND_PIPES_MASK, reg1);
425 	} else {
426 		pmdemand_state->params.active_pipes =
427 			REG_FIELD_GET(XELPDP_PMDEMAND_PIPES_MASK, reg1);
428 		pmdemand_state->params.active_dbufs =
429 			REG_FIELD_GET(XELPDP_PMDEMAND_DBUFS_MASK, reg1);
430 
431 		pmdemand_state->params.scalers =
432 			REG_FIELD_GET(XELPDP_PMDEMAND_SCALERS_MASK, reg2);
433 	}
434 
435 unlock:
436 	mutex_unlock(&i915->display.pmdemand.lock);
437 }
438 
439 static bool intel_pmdemand_req_complete(struct drm_i915_private *i915)
440 {
441 	return !(intel_de_read(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(1)) &
442 		 XELPDP_PMDEMAND_REQ_ENABLE);
443 }
444 
445 static void intel_pmdemand_wait(struct drm_i915_private *i915)
446 {
447 	if (!wait_event_timeout(i915->display.pmdemand.waitqueue,
448 				intel_pmdemand_req_complete(i915),
449 				msecs_to_jiffies_timeout(10)))
450 		drm_err(&i915->drm,
451 			"timed out waiting for Punit PM Demand Response\n");
452 }
453 
454 /* Required to be programmed during Display Init Sequences. */
455 void intel_pmdemand_program_dbuf(struct drm_i915_private *i915,
456 				 u8 dbuf_slices)
457 {
458 	u32 dbufs = min_t(u32, hweight8(dbuf_slices), 3);
459 
460 	/* PM Demand only tracks active dbufs on pre-Xe3 platforms */
461 	if (DISPLAY_VER(i915) >= 30)
462 		return;
463 
464 	mutex_lock(&i915->display.pmdemand.lock);
465 	if (drm_WARN_ON(&i915->drm,
466 			!intel_pmdemand_check_prev_transaction(i915)))
467 		goto unlock;
468 
469 	intel_de_rmw(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(0),
470 		     XELPDP_PMDEMAND_DBUFS_MASK,
471 		     REG_FIELD_PREP(XELPDP_PMDEMAND_DBUFS_MASK, dbufs));
472 	intel_de_rmw(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(1), 0,
473 		     XELPDP_PMDEMAND_REQ_ENABLE);
474 
475 	intel_pmdemand_wait(i915);
476 
477 unlock:
478 	mutex_unlock(&i915->display.pmdemand.lock);
479 }
480 
481 static void
482 intel_pmdemand_update_params(struct intel_display *display,
483 			     const struct intel_pmdemand_state *new,
484 			     const struct intel_pmdemand_state *old,
485 			     u32 *reg1, u32 *reg2, bool serialized)
486 {
487 	/*
488 	 * The pmdemand parameter updates happens in two steps. Pre plane and
489 	 * post plane updates. During the pre plane, as DE might still be
490 	 * handling with some old operations, to avoid unexpected performance
491 	 * issues, program the pmdemand parameters with higher of old and new
492 	 * values. And then after once settled, use the new parameter values
493 	 * as part of the post plane update.
494 	 *
495 	 * If the pmdemand params update happens without modeset allowed, this
496 	 * means we can't serialize the updates. So that implies possibility of
497 	 * some parallel atomic commits affecting the pmdemand parameters. In
498 	 * that case, we need to consider the current values from the register
499 	 * as well. So in pre-plane case, we need to check the max of old, new
500 	 * and current register value if not serialized. In post plane update
501 	 * we need to consider max of new and current register value if not
502 	 * serialized
503 	 */
504 
505 #define update_reg(reg, field, mask) do { \
506 	u32 current_val = serialized ? 0 : REG_FIELD_GET((mask), *(reg)); \
507 	u32 old_val = old ? old->params.field : 0; \
508 	u32 new_val = new->params.field; \
509 \
510 	*(reg) &= ~(mask); \
511 	*(reg) |= REG_FIELD_PREP((mask), max3(old_val, new_val, current_val)); \
512 } while (0)
513 
514 	/* Set 1*/
515 	update_reg(reg1, qclk_gv_bw, XELPDP_PMDEMAND_QCLK_GV_BW_MASK);
516 	update_reg(reg1, voltage_index, XELPDP_PMDEMAND_VOLTAGE_INDEX_MASK);
517 	update_reg(reg1, qclk_gv_index, XELPDP_PMDEMAND_QCLK_GV_INDEX_MASK);
518 	update_reg(reg1, active_phys, XELPDP_PMDEMAND_PHYS_MASK);
519 
520 	/* Set 2*/
521 	update_reg(reg2, cdclk_freq_mhz, XELPDP_PMDEMAND_CDCLK_FREQ_MASK);
522 	update_reg(reg2, ddiclk_max, XELPDP_PMDEMAND_DDICLK_FREQ_MASK);
523 	update_reg(reg2, plls, XELPDP_PMDEMAND_PLLS_MASK);
524 
525 	if (DISPLAY_VER(display) >= 30) {
526 		update_reg(reg1, active_pipes, XE3_PMDEMAND_PIPES_MASK);
527 	} else {
528 		update_reg(reg1, active_pipes, XELPDP_PMDEMAND_PIPES_MASK);
529 		update_reg(reg1, active_dbufs, XELPDP_PMDEMAND_DBUFS_MASK);
530 
531 		update_reg(reg2, scalers, XELPDP_PMDEMAND_SCALERS_MASK);
532 	}
533 
534 #undef update_reg
535 }
536 
537 static void
538 intel_pmdemand_program_params(struct drm_i915_private *i915,
539 			      const struct intel_pmdemand_state *new,
540 			      const struct intel_pmdemand_state *old,
541 			      bool serialized)
542 {
543 	struct intel_display *display = &i915->display;
544 	bool changed = false;
545 	u32 reg1, mod_reg1;
546 	u32 reg2, mod_reg2;
547 
548 	mutex_lock(&i915->display.pmdemand.lock);
549 	if (drm_WARN_ON(&i915->drm,
550 			!intel_pmdemand_check_prev_transaction(i915)))
551 		goto unlock;
552 
553 	reg1 = intel_de_read(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(0));
554 	mod_reg1 = reg1;
555 
556 	reg2 = intel_de_read(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(1));
557 	mod_reg2 = reg2;
558 
559 	intel_pmdemand_update_params(display, new, old, &mod_reg1, &mod_reg2,
560 				     serialized);
561 
562 	if (reg1 != mod_reg1) {
563 		intel_de_write(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(0),
564 			       mod_reg1);
565 		changed = true;
566 	}
567 
568 	if (reg2 != mod_reg2) {
569 		intel_de_write(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(1),
570 			       mod_reg2);
571 		changed = true;
572 	}
573 
574 	/* Initiate pm demand request only if register values are changed */
575 	if (!changed)
576 		goto unlock;
577 
578 	drm_dbg_kms(&i915->drm,
579 		    "initate pmdemand request values: (0x%x 0x%x)\n",
580 		    mod_reg1, mod_reg2);
581 
582 	intel_de_rmw(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(1), 0,
583 		     XELPDP_PMDEMAND_REQ_ENABLE);
584 
585 	intel_pmdemand_wait(i915);
586 
587 unlock:
588 	mutex_unlock(&i915->display.pmdemand.lock);
589 }
590 
591 static bool
592 intel_pmdemand_state_changed(const struct intel_pmdemand_state *new,
593 			     const struct intel_pmdemand_state *old)
594 {
595 	return memcmp(&new->params, &old->params, sizeof(new->params)) != 0;
596 }
597 
598 void intel_pmdemand_pre_plane_update(struct intel_atomic_state *state)
599 {
600 	struct drm_i915_private *i915 = to_i915(state->base.dev);
601 	const struct intel_pmdemand_state *new_pmdemand_state =
602 		intel_atomic_get_new_pmdemand_state(state);
603 	const struct intel_pmdemand_state *old_pmdemand_state =
604 		intel_atomic_get_old_pmdemand_state(state);
605 
606 	if (DISPLAY_VER(i915) < 14)
607 		return;
608 
609 	if (!new_pmdemand_state ||
610 	    !intel_pmdemand_state_changed(new_pmdemand_state,
611 					  old_pmdemand_state))
612 		return;
613 
614 	WARN_ON(!new_pmdemand_state->base.changed);
615 
616 	intel_pmdemand_program_params(i915, new_pmdemand_state,
617 				      old_pmdemand_state,
618 				      intel_atomic_global_state_is_serialized(state));
619 }
620 
621 void intel_pmdemand_post_plane_update(struct intel_atomic_state *state)
622 {
623 	struct drm_i915_private *i915 = to_i915(state->base.dev);
624 	const struct intel_pmdemand_state *new_pmdemand_state =
625 		intel_atomic_get_new_pmdemand_state(state);
626 	const struct intel_pmdemand_state *old_pmdemand_state =
627 		intel_atomic_get_old_pmdemand_state(state);
628 
629 	if (DISPLAY_VER(i915) < 14)
630 		return;
631 
632 	if (!new_pmdemand_state ||
633 	    !intel_pmdemand_state_changed(new_pmdemand_state,
634 					  old_pmdemand_state))
635 		return;
636 
637 	WARN_ON(!new_pmdemand_state->base.changed);
638 
639 	intel_pmdemand_program_params(i915, new_pmdemand_state, NULL,
640 				      intel_atomic_global_state_is_serialized(state));
641 }
642