xref: /linux/drivers/gpu/drm/i915/display/intel_pmdemand.c (revision bdfa82f5b8998a6311a8ef0cf89ad413f5cd9ea4)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2023 Intel Corporation
4  */
5 
6 #include <linux/bitops.h>
7 
8 #include <drm/drm_print.h>
9 
10 #include "i915_reg.h"
11 #include "i915_utils.h"
12 #include "intel_atomic.h"
13 #include "intel_bw.h"
14 #include "intel_cdclk.h"
15 #include "intel_de.h"
16 #include "intel_display_trace.h"
17 #include "intel_pmdemand.h"
18 #include "intel_step.h"
19 #include "skl_watermark.h"
20 
21 struct pmdemand_params {
22 	u16 qclk_gv_bw;
23 	u8 voltage_index;
24 	u8 qclk_gv_index;
25 	u8 active_pipes;
26 	u8 active_dbufs;	/* pre-Xe3 only */
27 	/* Total number of non type C active phys from active_phys_mask */
28 	u8 active_phys;
29 	u8 plls;
30 	u16 cdclk_freq_mhz;
31 	/* max from ddi_clocks[] */
32 	u16 ddiclk_max;
33 	u8 scalers;		/* pre-Xe3 only */
34 };
35 
36 struct intel_pmdemand_state {
37 	struct intel_global_state base;
38 
39 	/* Maintain a persistent list of port clocks across all crtcs */
40 	int ddi_clocks[I915_MAX_PIPES];
41 
42 	/* Maintain a persistent list of non type C phys mask */
43 	u16 active_combo_phys_mask;
44 
45 	/* Parameters to be configured in the pmdemand registers */
46 	struct pmdemand_params params;
47 };
48 
49 struct intel_pmdemand_state *to_intel_pmdemand_state(struct intel_global_state *obj_state)
50 {
51 	return container_of(obj_state, struct intel_pmdemand_state, base);
52 }
53 
54 static struct intel_global_state *
55 intel_pmdemand_duplicate_state(struct intel_global_obj *obj)
56 {
57 	struct intel_pmdemand_state *pmdemand_state;
58 
59 	pmdemand_state = kmemdup(obj->state, sizeof(*pmdemand_state), GFP_KERNEL);
60 	if (!pmdemand_state)
61 		return NULL;
62 
63 	return &pmdemand_state->base;
64 }
65 
66 static void intel_pmdemand_destroy_state(struct intel_global_obj *obj,
67 					 struct intel_global_state *state)
68 {
69 	kfree(state);
70 }
71 
72 static const struct intel_global_state_funcs intel_pmdemand_funcs = {
73 	.atomic_duplicate_state = intel_pmdemand_duplicate_state,
74 	.atomic_destroy_state = intel_pmdemand_destroy_state,
75 };
76 
77 static struct intel_pmdemand_state *
78 intel_atomic_get_pmdemand_state(struct intel_atomic_state *state)
79 {
80 	struct intel_display *display = to_intel_display(state);
81 	struct intel_global_state *pmdemand_state =
82 		intel_atomic_get_global_obj_state(state,
83 						  &display->pmdemand.obj);
84 
85 	if (IS_ERR(pmdemand_state))
86 		return ERR_CAST(pmdemand_state);
87 
88 	return to_intel_pmdemand_state(pmdemand_state);
89 }
90 
91 static struct intel_pmdemand_state *
92 intel_atomic_get_old_pmdemand_state(struct intel_atomic_state *state)
93 {
94 	struct intel_display *display = to_intel_display(state);
95 	struct intel_global_state *pmdemand_state =
96 		intel_atomic_get_old_global_obj_state(state,
97 						      &display->pmdemand.obj);
98 
99 	if (!pmdemand_state)
100 		return NULL;
101 
102 	return to_intel_pmdemand_state(pmdemand_state);
103 }
104 
105 static struct intel_pmdemand_state *
106 intel_atomic_get_new_pmdemand_state(struct intel_atomic_state *state)
107 {
108 	struct intel_display *display = to_intel_display(state);
109 	struct intel_global_state *pmdemand_state =
110 		intel_atomic_get_new_global_obj_state(state,
111 						      &display->pmdemand.obj);
112 
113 	if (!pmdemand_state)
114 		return NULL;
115 
116 	return to_intel_pmdemand_state(pmdemand_state);
117 }
118 
119 int intel_pmdemand_init(struct intel_display *display)
120 {
121 	struct intel_pmdemand_state *pmdemand_state;
122 
123 	pmdemand_state = kzalloc(sizeof(*pmdemand_state), GFP_KERNEL);
124 	if (!pmdemand_state)
125 		return -ENOMEM;
126 
127 	intel_atomic_global_obj_init(display, &display->pmdemand.obj,
128 				     &pmdemand_state->base,
129 				     &intel_pmdemand_funcs);
130 
131 	if (IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_C0))
132 		/* Wa_14016740474 */
133 		intel_de_rmw(display, XELPD_CHICKEN_DCPR_3, 0, DMD_RSP_TIMEOUT_DISABLE);
134 
135 	return 0;
136 }
137 
138 void intel_pmdemand_init_early(struct intel_display *display)
139 {
140 	mutex_init(&display->pmdemand.lock);
141 	init_waitqueue_head(&display->pmdemand.waitqueue);
142 }
143 
144 void
145 intel_pmdemand_update_phys_mask(struct intel_display *display,
146 				struct intel_encoder *encoder,
147 				struct intel_pmdemand_state *pmdemand_state,
148 				bool set_bit)
149 {
150 	enum phy phy;
151 
152 	if (DISPLAY_VER(display) < 14)
153 		return;
154 
155 	if (!encoder)
156 		return;
157 
158 	if (intel_encoder_is_tc(encoder))
159 		return;
160 
161 	phy = intel_encoder_to_phy(encoder);
162 
163 	if (set_bit)
164 		pmdemand_state->active_combo_phys_mask |= BIT(phy);
165 	else
166 		pmdemand_state->active_combo_phys_mask &= ~BIT(phy);
167 }
168 
169 void
170 intel_pmdemand_update_port_clock(struct intel_display *display,
171 				 struct intel_pmdemand_state *pmdemand_state,
172 				 enum pipe pipe, int port_clock)
173 {
174 	if (DISPLAY_VER(display) < 14)
175 		return;
176 
177 	pmdemand_state->ddi_clocks[pipe] = port_clock;
178 }
179 
180 static void
181 intel_pmdemand_update_max_ddiclk(struct intel_display *display,
182 				 struct intel_atomic_state *state,
183 				 struct intel_pmdemand_state *pmdemand_state)
184 {
185 	int max_ddiclk = 0;
186 	const struct intel_crtc_state *new_crtc_state;
187 	struct intel_crtc *crtc;
188 	int i;
189 
190 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
191 		intel_pmdemand_update_port_clock(display, pmdemand_state,
192 						 crtc->pipe,
193 						 new_crtc_state->port_clock);
194 
195 	for (i = 0; i < ARRAY_SIZE(pmdemand_state->ddi_clocks); i++)
196 		max_ddiclk = max(pmdemand_state->ddi_clocks[i], max_ddiclk);
197 
198 	pmdemand_state->params.ddiclk_max = DIV_ROUND_UP(max_ddiclk, 1000);
199 }
200 
201 static void
202 intel_pmdemand_update_connector_phys(struct intel_display *display,
203 				     struct intel_atomic_state *state,
204 				     struct drm_connector_state *conn_state,
205 				     bool set_bit,
206 				     struct intel_pmdemand_state *pmdemand_state)
207 {
208 	struct intel_encoder *encoder = to_intel_encoder(conn_state->best_encoder);
209 	struct intel_crtc *crtc = to_intel_crtc(conn_state->crtc);
210 	struct intel_crtc_state *crtc_state;
211 
212 	if (!crtc)
213 		return;
214 
215 	if (set_bit)
216 		crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
217 	else
218 		crtc_state = intel_atomic_get_old_crtc_state(state, crtc);
219 
220 	if (!crtc_state->hw.active)
221 		return;
222 
223 	intel_pmdemand_update_phys_mask(display, encoder, pmdemand_state,
224 					set_bit);
225 }
226 
227 static void
228 intel_pmdemand_update_active_non_tc_phys(struct intel_display *display,
229 					 struct intel_atomic_state *state,
230 					 struct intel_pmdemand_state *pmdemand_state)
231 {
232 	struct drm_connector_state *old_conn_state;
233 	struct drm_connector_state *new_conn_state;
234 	struct drm_connector *connector;
235 	int i;
236 
237 	for_each_oldnew_connector_in_state(&state->base, connector,
238 					   old_conn_state, new_conn_state, i) {
239 		if (!intel_connector_needs_modeset(state, connector))
240 			continue;
241 
242 		/* First clear the active phys in the old connector state */
243 		intel_pmdemand_update_connector_phys(display, state,
244 						     old_conn_state, false,
245 						     pmdemand_state);
246 
247 		/* Then set the active phys in new connector state */
248 		intel_pmdemand_update_connector_phys(display, state,
249 						     new_conn_state, true,
250 						     pmdemand_state);
251 	}
252 
253 	pmdemand_state->params.active_phys =
254 		min_t(u16, hweight16(pmdemand_state->active_combo_phys_mask),
255 		      7);
256 }
257 
258 static bool
259 intel_pmdemand_encoder_has_tc_phy(struct intel_display *display,
260 				  struct intel_encoder *encoder)
261 {
262 	return encoder && intel_encoder_is_tc(encoder);
263 }
264 
265 static bool
266 intel_pmdemand_connector_needs_update(struct intel_atomic_state *state)
267 {
268 	struct intel_display *display = to_intel_display(state);
269 	struct drm_connector_state *old_conn_state;
270 	struct drm_connector_state *new_conn_state;
271 	struct drm_connector *connector;
272 	int i;
273 
274 	for_each_oldnew_connector_in_state(&state->base, connector,
275 					   old_conn_state, new_conn_state, i) {
276 		struct intel_encoder *old_encoder =
277 			to_intel_encoder(old_conn_state->best_encoder);
278 		struct intel_encoder *new_encoder =
279 			to_intel_encoder(new_conn_state->best_encoder);
280 
281 		if (!intel_connector_needs_modeset(state, connector))
282 			continue;
283 
284 		if (old_encoder == new_encoder ||
285 		    (intel_pmdemand_encoder_has_tc_phy(display, old_encoder) &&
286 		     intel_pmdemand_encoder_has_tc_phy(display, new_encoder)))
287 			continue;
288 
289 		return true;
290 	}
291 
292 	return false;
293 }
294 
295 static bool intel_pmdemand_needs_update(struct intel_atomic_state *state)
296 {
297 	struct intel_display *display = to_intel_display(state);
298 	const struct intel_bw_state *new_bw_state, *old_bw_state;
299 	const struct intel_cdclk_state *new_cdclk_state, *old_cdclk_state;
300 	const struct intel_crtc_state *new_crtc_state, *old_crtc_state;
301 	const struct intel_dbuf_state *new_dbuf_state, *old_dbuf_state;
302 	struct intel_crtc *crtc;
303 	int i;
304 
305 	new_bw_state = intel_atomic_get_new_bw_state(state);
306 	old_bw_state = intel_atomic_get_old_bw_state(state);
307 	if (new_bw_state && new_bw_state->qgv_point_peakbw !=
308 	    old_bw_state->qgv_point_peakbw)
309 		return true;
310 
311 	new_dbuf_state = intel_atomic_get_new_dbuf_state(state);
312 	old_dbuf_state = intel_atomic_get_old_dbuf_state(state);
313 	if (new_dbuf_state &&
314 	    new_dbuf_state->active_pipes != old_dbuf_state->active_pipes)
315 		return true;
316 
317 	if (DISPLAY_VER(display) < 30) {
318 		if (new_dbuf_state &&
319 		    new_dbuf_state->enabled_slices !=
320 		    old_dbuf_state->enabled_slices)
321 			return true;
322 	}
323 
324 	new_cdclk_state = intel_atomic_get_new_cdclk_state(state);
325 	old_cdclk_state = intel_atomic_get_old_cdclk_state(state);
326 	if (new_cdclk_state &&
327 	    (new_cdclk_state->actual.cdclk !=
328 	     old_cdclk_state->actual.cdclk ||
329 	     new_cdclk_state->actual.voltage_level !=
330 	     old_cdclk_state->actual.voltage_level))
331 		return true;
332 
333 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
334 					    new_crtc_state, i)
335 		if (new_crtc_state->port_clock != old_crtc_state->port_clock)
336 			return true;
337 
338 	return intel_pmdemand_connector_needs_update(state);
339 }
340 
341 int intel_pmdemand_atomic_check(struct intel_atomic_state *state)
342 {
343 	struct intel_display *display = to_intel_display(state);
344 	const struct intel_bw_state *new_bw_state;
345 	const struct intel_cdclk_state *new_cdclk_state;
346 	const struct intel_dbuf_state *new_dbuf_state;
347 	struct intel_pmdemand_state *new_pmdemand_state;
348 
349 	if (DISPLAY_VER(display) < 14)
350 		return 0;
351 
352 	if (!intel_pmdemand_needs_update(state))
353 		return 0;
354 
355 	new_pmdemand_state = intel_atomic_get_pmdemand_state(state);
356 	if (IS_ERR(new_pmdemand_state))
357 		return PTR_ERR(new_pmdemand_state);
358 
359 	new_bw_state = intel_atomic_get_bw_state(state);
360 	if (IS_ERR(new_bw_state))
361 		return PTR_ERR(new_bw_state);
362 
363 	/* firmware will calculate the qclk_gv_index, requirement is set to 0 */
364 	new_pmdemand_state->params.qclk_gv_index = 0;
365 	new_pmdemand_state->params.qclk_gv_bw = new_bw_state->qgv_point_peakbw;
366 
367 	new_dbuf_state = intel_atomic_get_dbuf_state(state);
368 	if (IS_ERR(new_dbuf_state))
369 		return PTR_ERR(new_dbuf_state);
370 
371 	if (DISPLAY_VER(display) < 30) {
372 		new_pmdemand_state->params.active_dbufs =
373 			min_t(u8, hweight8(new_dbuf_state->enabled_slices), 3);
374 		new_pmdemand_state->params.active_pipes =
375 			min_t(u8, hweight8(new_dbuf_state->active_pipes), 3);
376 	} else {
377 		new_pmdemand_state->params.active_pipes =
378 			min_t(u8, hweight8(new_dbuf_state->active_pipes), INTEL_NUM_PIPES(display));
379 	}
380 
381 	new_cdclk_state = intel_atomic_get_cdclk_state(state);
382 	if (IS_ERR(new_cdclk_state))
383 		return PTR_ERR(new_cdclk_state);
384 
385 	new_pmdemand_state->params.voltage_index =
386 		new_cdclk_state->actual.voltage_level;
387 	new_pmdemand_state->params.cdclk_freq_mhz =
388 		DIV_ROUND_UP(new_cdclk_state->actual.cdclk, 1000);
389 
390 	intel_pmdemand_update_max_ddiclk(display, state, new_pmdemand_state);
391 
392 	intel_pmdemand_update_active_non_tc_phys(display, state, new_pmdemand_state);
393 
394 	/*
395 	 * Active_PLLs starts with 1 because of CDCLK PLL.
396 	 * TODO: Missing to account genlock filter when it gets used.
397 	 */
398 	new_pmdemand_state->params.plls =
399 		min_t(u16, new_pmdemand_state->params.active_phys + 1, 7);
400 
401 	/*
402 	 * Setting scalers to max as it can not be calculated during flips and
403 	 * fastsets without taking global states locks.
404 	 */
405 	new_pmdemand_state->params.scalers = 7;
406 
407 	if (state->base.allow_modeset)
408 		return intel_atomic_serialize_global_state(&new_pmdemand_state->base);
409 	else
410 		return intel_atomic_lock_global_state(&new_pmdemand_state->base);
411 }
412 
413 static bool intel_pmdemand_check_prev_transaction(struct intel_display *display)
414 {
415 	return !(intel_de_wait_for_clear(display,
416 					 XELPDP_INITIATE_PMDEMAND_REQUEST(1),
417 					 XELPDP_PMDEMAND_REQ_ENABLE, 10) ||
418 		 intel_de_wait_for_clear(display,
419 					 GEN12_DCPR_STATUS_1,
420 					 XELPDP_PMDEMAND_INFLIGHT_STATUS, 10));
421 }
422 
423 void
424 intel_pmdemand_init_pmdemand_params(struct intel_display *display,
425 				    struct intel_pmdemand_state *pmdemand_state)
426 {
427 	u32 reg1, reg2;
428 
429 	if (DISPLAY_VER(display) < 14)
430 		return;
431 
432 	mutex_lock(&display->pmdemand.lock);
433 	if (drm_WARN_ON(display->drm,
434 			!intel_pmdemand_check_prev_transaction(display))) {
435 		memset(&pmdemand_state->params, 0,
436 		       sizeof(pmdemand_state->params));
437 		goto unlock;
438 	}
439 
440 	reg1 = intel_de_read(display, XELPDP_INITIATE_PMDEMAND_REQUEST(0));
441 
442 	reg2 = intel_de_read(display, XELPDP_INITIATE_PMDEMAND_REQUEST(1));
443 
444 	pmdemand_state->params.qclk_gv_bw =
445 		REG_FIELD_GET(XELPDP_PMDEMAND_QCLK_GV_BW_MASK, reg1);
446 	pmdemand_state->params.voltage_index =
447 		REG_FIELD_GET(XELPDP_PMDEMAND_VOLTAGE_INDEX_MASK, reg1);
448 	pmdemand_state->params.qclk_gv_index =
449 		REG_FIELD_GET(XELPDP_PMDEMAND_QCLK_GV_INDEX_MASK, reg1);
450 	pmdemand_state->params.active_phys =
451 		REG_FIELD_GET(XELPDP_PMDEMAND_PHYS_MASK, reg1);
452 
453 	pmdemand_state->params.cdclk_freq_mhz =
454 		REG_FIELD_GET(XELPDP_PMDEMAND_CDCLK_FREQ_MASK, reg2);
455 	pmdemand_state->params.ddiclk_max =
456 		REG_FIELD_GET(XELPDP_PMDEMAND_DDICLK_FREQ_MASK, reg2);
457 
458 	if (DISPLAY_VER(display) >= 30) {
459 		pmdemand_state->params.active_pipes =
460 			REG_FIELD_GET(XE3_PMDEMAND_PIPES_MASK, reg1);
461 	} else {
462 		pmdemand_state->params.active_pipes =
463 			REG_FIELD_GET(XELPDP_PMDEMAND_PIPES_MASK, reg1);
464 		pmdemand_state->params.active_dbufs =
465 			REG_FIELD_GET(XELPDP_PMDEMAND_DBUFS_MASK, reg1);
466 
467 		pmdemand_state->params.scalers =
468 			REG_FIELD_GET(XELPDP_PMDEMAND_SCALERS_MASK, reg2);
469 	}
470 
471 unlock:
472 	mutex_unlock(&display->pmdemand.lock);
473 }
474 
475 static bool intel_pmdemand_req_complete(struct intel_display *display)
476 {
477 	return !(intel_de_read(display, XELPDP_INITIATE_PMDEMAND_REQUEST(1)) &
478 		 XELPDP_PMDEMAND_REQ_ENABLE);
479 }
480 
481 static void intel_pmdemand_poll(struct intel_display *display)
482 {
483 	const unsigned int timeout_ms = 10;
484 	u32 status;
485 	int ret;
486 
487 	ret = intel_de_wait_custom(display, XELPDP_INITIATE_PMDEMAND_REQUEST(1),
488 				   XELPDP_PMDEMAND_REQ_ENABLE, 0,
489 				   50, timeout_ms, &status);
490 
491 	if (ret == -ETIMEDOUT)
492 		drm_err(display->drm,
493 			"timed out waiting for Punit PM Demand Response within %ums (status 0x%08x)\n",
494 			timeout_ms, status);
495 }
496 
497 static void intel_pmdemand_wait(struct intel_display *display)
498 {
499 	/* Wa_14024400148 For lnl use polling method */
500 	if (DISPLAY_VER(display) == 20) {
501 		intel_pmdemand_poll(display);
502 	} else {
503 		if (!wait_event_timeout(display->pmdemand.waitqueue,
504 					intel_pmdemand_req_complete(display),
505 					msecs_to_jiffies_timeout(10)))
506 			drm_err(display->drm,
507 				"timed out waiting for Punit PM Demand Response\n");
508 	}
509 }
510 
511 /* Required to be programmed during Display Init Sequences. */
512 void intel_pmdemand_program_dbuf(struct intel_display *display,
513 				 u8 dbuf_slices)
514 {
515 	u32 dbufs = min_t(u32, hweight8(dbuf_slices), 3);
516 
517 	/* PM Demand only tracks active dbufs on pre-Xe3 platforms */
518 	if (DISPLAY_VER(display) >= 30)
519 		return;
520 
521 	mutex_lock(&display->pmdemand.lock);
522 	if (drm_WARN_ON(display->drm,
523 			!intel_pmdemand_check_prev_transaction(display)))
524 		goto unlock;
525 
526 	intel_de_rmw(display, XELPDP_INITIATE_PMDEMAND_REQUEST(0),
527 		     XELPDP_PMDEMAND_DBUFS_MASK,
528 		     REG_FIELD_PREP(XELPDP_PMDEMAND_DBUFS_MASK, dbufs));
529 	intel_de_rmw(display, XELPDP_INITIATE_PMDEMAND_REQUEST(1), 0,
530 		     XELPDP_PMDEMAND_REQ_ENABLE);
531 
532 	intel_pmdemand_wait(display);
533 
534 unlock:
535 	mutex_unlock(&display->pmdemand.lock);
536 }
537 
538 static void
539 intel_pmdemand_update_params(struct intel_display *display,
540 			     const struct intel_pmdemand_state *new,
541 			     const struct intel_pmdemand_state *old,
542 			     u32 *reg1, u32 *reg2, bool serialized)
543 {
544 	/*
545 	 * The pmdemand parameter updates happens in two steps. Pre plane and
546 	 * post plane updates. During the pre plane, as DE might still be
547 	 * handling with some old operations, to avoid unexpected performance
548 	 * issues, program the pmdemand parameters with higher of old and new
549 	 * values. And then after once settled, use the new parameter values
550 	 * as part of the post plane update.
551 	 *
552 	 * If the pmdemand params update happens without modeset allowed, this
553 	 * means we can't serialize the updates. So that implies possibility of
554 	 * some parallel atomic commits affecting the pmdemand parameters. In
555 	 * that case, we need to consider the current values from the register
556 	 * as well. So in pre-plane case, we need to check the max of old, new
557 	 * and current register value if not serialized. In post plane update
558 	 * we need to consider max of new and current register value if not
559 	 * serialized
560 	 */
561 
562 #define update_reg(reg, field, mask) do { \
563 	u32 current_val = serialized ? 0 : REG_FIELD_GET((mask), *(reg)); \
564 	u32 old_val = old ? old->params.field : 0; \
565 	u32 new_val = new->params.field; \
566 \
567 	*(reg) &= ~(mask); \
568 	*(reg) |= REG_FIELD_PREP((mask), max3(old_val, new_val, current_val)); \
569 } while (0)
570 
571 	/* Set 1*/
572 	update_reg(reg1, qclk_gv_bw, XELPDP_PMDEMAND_QCLK_GV_BW_MASK);
573 	update_reg(reg1, voltage_index, XELPDP_PMDEMAND_VOLTAGE_INDEX_MASK);
574 	update_reg(reg1, qclk_gv_index, XELPDP_PMDEMAND_QCLK_GV_INDEX_MASK);
575 	update_reg(reg1, active_phys, XELPDP_PMDEMAND_PHYS_MASK);
576 
577 	/* Set 2*/
578 	update_reg(reg2, cdclk_freq_mhz, XELPDP_PMDEMAND_CDCLK_FREQ_MASK);
579 	update_reg(reg2, ddiclk_max, XELPDP_PMDEMAND_DDICLK_FREQ_MASK);
580 	update_reg(reg2, plls, XELPDP_PMDEMAND_PLLS_MASK);
581 
582 	if (DISPLAY_VER(display) >= 30) {
583 		update_reg(reg1, active_pipes, XE3_PMDEMAND_PIPES_MASK);
584 	} else {
585 		update_reg(reg1, active_pipes, XELPDP_PMDEMAND_PIPES_MASK);
586 		update_reg(reg1, active_dbufs, XELPDP_PMDEMAND_DBUFS_MASK);
587 
588 		update_reg(reg2, scalers, XELPDP_PMDEMAND_SCALERS_MASK);
589 	}
590 
591 #undef update_reg
592 }
593 
594 static void
595 intel_pmdemand_program_params(struct intel_display *display,
596 			      const struct intel_pmdemand_state *new,
597 			      const struct intel_pmdemand_state *old,
598 			      bool serialized)
599 {
600 	bool changed = false;
601 	u32 reg1, mod_reg1;
602 	u32 reg2, mod_reg2;
603 
604 	mutex_lock(&display->pmdemand.lock);
605 	if (drm_WARN_ON(display->drm,
606 			!intel_pmdemand_check_prev_transaction(display)))
607 		goto unlock;
608 
609 	reg1 = intel_de_read(display, XELPDP_INITIATE_PMDEMAND_REQUEST(0));
610 	mod_reg1 = reg1;
611 
612 	reg2 = intel_de_read(display, XELPDP_INITIATE_PMDEMAND_REQUEST(1));
613 	mod_reg2 = reg2;
614 
615 	intel_pmdemand_update_params(display, new, old, &mod_reg1, &mod_reg2,
616 				     serialized);
617 
618 	if (reg1 != mod_reg1) {
619 		intel_de_write(display, XELPDP_INITIATE_PMDEMAND_REQUEST(0),
620 			       mod_reg1);
621 		changed = true;
622 	}
623 
624 	if (reg2 != mod_reg2) {
625 		intel_de_write(display, XELPDP_INITIATE_PMDEMAND_REQUEST(1),
626 			       mod_reg2);
627 		changed = true;
628 	}
629 
630 	/* Initiate pm demand request only if register values are changed */
631 	if (!changed)
632 		goto unlock;
633 
634 	drm_dbg_kms(display->drm,
635 		    "initiate pmdemand request values: (0x%x 0x%x)\n",
636 		    mod_reg1, mod_reg2);
637 
638 	intel_de_rmw(display, XELPDP_INITIATE_PMDEMAND_REQUEST(1), 0,
639 		     XELPDP_PMDEMAND_REQ_ENABLE);
640 
641 	intel_pmdemand_wait(display);
642 
643 unlock:
644 	mutex_unlock(&display->pmdemand.lock);
645 }
646 
647 static bool
648 intel_pmdemand_state_changed(const struct intel_pmdemand_state *new,
649 			     const struct intel_pmdemand_state *old)
650 {
651 	return memcmp(&new->params, &old->params, sizeof(new->params)) != 0;
652 }
653 
654 void intel_pmdemand_pre_plane_update(struct intel_atomic_state *state)
655 {
656 	struct intel_display *display = to_intel_display(state);
657 	const struct intel_pmdemand_state *new_pmdemand_state =
658 		intel_atomic_get_new_pmdemand_state(state);
659 	const struct intel_pmdemand_state *old_pmdemand_state =
660 		intel_atomic_get_old_pmdemand_state(state);
661 
662 	if (DISPLAY_VER(display) < 14)
663 		return;
664 
665 	if (!new_pmdemand_state ||
666 	    !intel_pmdemand_state_changed(new_pmdemand_state,
667 					  old_pmdemand_state))
668 		return;
669 
670 	WARN_ON(!new_pmdemand_state->base.changed);
671 
672 	intel_pmdemand_program_params(display, new_pmdemand_state,
673 				      old_pmdemand_state,
674 				      intel_atomic_global_state_is_serialized(state));
675 }
676 
677 void intel_pmdemand_post_plane_update(struct intel_atomic_state *state)
678 {
679 	struct intel_display *display = to_intel_display(state);
680 	const struct intel_pmdemand_state *new_pmdemand_state =
681 		intel_atomic_get_new_pmdemand_state(state);
682 	const struct intel_pmdemand_state *old_pmdemand_state =
683 		intel_atomic_get_old_pmdemand_state(state);
684 
685 	if (DISPLAY_VER(display) < 14)
686 		return;
687 
688 	if (!new_pmdemand_state ||
689 	    !intel_pmdemand_state_changed(new_pmdemand_state,
690 					  old_pmdemand_state))
691 		return;
692 
693 	WARN_ON(!new_pmdemand_state->base.changed);
694 
695 	intel_pmdemand_program_params(display, new_pmdemand_state, NULL,
696 				      intel_atomic_global_state_is_serialized(state));
697 }
698