xref: /linux/drivers/gpu/drm/i915/display/skl_watermark.c (revision 815e260a18a3af4dab59025ee99a7156c0e8b5e0)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include <linux/debugfs.h>
7 
8 #include <drm/drm_blend.h>
9 #include <drm/drm_print.h>
10 
11 #include "soc/intel_dram.h"
12 #include "i915_reg.h"
13 #include "i9xx_wm.h"
14 #include "intel_atomic.h"
15 #include "intel_bw.h"
16 #include "intel_cdclk.h"
17 #include "intel_crtc.h"
18 #include "intel_cursor_regs.h"
19 #include "intel_de.h"
20 #include "intel_display.h"
21 #include "intel_display_power.h"
22 #include "intel_display_regs.h"
23 #include "intel_display_rpm.h"
24 #include "intel_display_types.h"
25 #include "intel_display_utils.h"
26 #include "intel_fb.h"
27 #include "intel_fixed.h"
28 #include "intel_flipq.h"
29 #include "intel_pcode.h"
30 #include "intel_plane.h"
31 #include "intel_vblank.h"
32 #include "intel_wm.h"
33 #include "skl_prefill.h"
34 #include "skl_scaler.h"
35 #include "skl_universal_plane_regs.h"
36 #include "skl_watermark.h"
37 #include "skl_watermark_regs.h"
38 
39 struct intel_dbuf_state {
40 	struct intel_global_state base;
41 
42 	struct skl_ddb_entry ddb[I915_MAX_PIPES];
43 	unsigned int weight[I915_MAX_PIPES];
44 	u8 slices[I915_MAX_PIPES];
45 	u8 enabled_slices;
46 	u8 active_pipes;
47 	u8 mdclk_cdclk_ratio;
48 	bool joined_mbus;
49 };
50 
51 #define to_intel_dbuf_state(global_state) \
52 	container_of_const((global_state), struct intel_dbuf_state, base)
53 
54 #define intel_atomic_get_old_dbuf_state(state) \
55 	to_intel_dbuf_state(intel_atomic_get_old_global_obj_state(state, &to_intel_display(state)->dbuf.obj))
56 #define intel_atomic_get_new_dbuf_state(state) \
57 	to_intel_dbuf_state(intel_atomic_get_new_global_obj_state(state, &to_intel_display(state)->dbuf.obj))
58 
59 static void skl_sagv_disable(struct intel_display *display);
60 
61 /* Stores plane specific WM parameters */
62 struct skl_wm_params {
63 	bool x_tiled, y_tiled;
64 	bool rc_surface;
65 	bool is_planar;
66 	u32 width;
67 	u8 cpp;
68 	u32 plane_pixel_rate;
69 	u32 y_min_scanlines;
70 	u32 plane_bytes_per_line;
71 	uint_fixed_16_16_t plane_blocks_per_line;
72 	uint_fixed_16_16_t y_tile_minimum;
73 	u32 linetime_us;
74 	u32 dbuf_block_size;
75 };
76 
77 u8 intel_enabled_dbuf_slices_mask(struct intel_display *display)
78 {
79 	u8 enabled_slices = 0;
80 	enum dbuf_slice slice;
81 
82 	for_each_dbuf_slice(display, slice) {
83 		if (intel_de_read(display, DBUF_CTL_S(slice)) & DBUF_POWER_STATE)
84 			enabled_slices |= BIT(slice);
85 	}
86 
87 	return enabled_slices;
88 }
89 
90 /*
91  * FIXME: We still don't have the proper code detect if we need to apply the WA,
92  * so assume we'll always need it in order to avoid underruns.
93  */
94 static bool skl_needs_memory_bw_wa(struct intel_display *display)
95 {
96 	return DISPLAY_VER(display) == 9;
97 }
98 
99 bool
100 intel_has_sagv(struct intel_display *display)
101 {
102 	return HAS_SAGV(display) && display->sagv.status != I915_SAGV_NOT_CONTROLLED;
103 }
104 
105 static u32
106 intel_sagv_block_time(struct intel_display *display)
107 {
108 	if (DISPLAY_VER(display) >= 14) {
109 		u32 val;
110 
111 		val = intel_de_read(display, MTL_LATENCY_SAGV);
112 
113 		return REG_FIELD_GET(MTL_LATENCY_QCLK_SAGV, val);
114 	} else if (DISPLAY_VER(display) >= 12) {
115 		u32 val = 0;
116 		int ret;
117 
118 		ret = intel_pcode_read(display->drm,
119 				       GEN12_PCODE_READ_SAGV_BLOCK_TIME_US,
120 				       &val, NULL);
121 		if (ret) {
122 			drm_dbg_kms(display->drm, "Couldn't read SAGV block time!\n");
123 			return 0;
124 		}
125 
126 		return val;
127 	} else if (DISPLAY_VER(display) == 11) {
128 		return 10;
129 	} else if (HAS_SAGV(display)) {
130 		return 30;
131 	} else {
132 		return 0;
133 	}
134 }
135 
136 static void intel_sagv_init(struct intel_display *display)
137 {
138 	if (!HAS_SAGV(display))
139 		display->sagv.status = I915_SAGV_NOT_CONTROLLED;
140 
141 	/*
142 	 * Probe to see if we have working SAGV control.
143 	 * For icl+ this was already determined by intel_bw_init_hw().
144 	 */
145 	if (DISPLAY_VER(display) < 11)
146 		skl_sagv_disable(display);
147 
148 	drm_WARN_ON(display->drm, display->sagv.status == I915_SAGV_UNKNOWN);
149 
150 	display->sagv.block_time_us = intel_sagv_block_time(display);
151 
152 	drm_dbg_kms(display->drm, "SAGV supported: %s, original SAGV block time: %u us\n",
153 		    str_yes_no(intel_has_sagv(display)), display->sagv.block_time_us);
154 
155 	/* avoid overflow when adding with wm0 latency/etc. */
156 	if (drm_WARN(display->drm, display->sagv.block_time_us > U16_MAX,
157 		     "Excessive SAGV block time %u, ignoring\n",
158 		     display->sagv.block_time_us))
159 		display->sagv.block_time_us = 0;
160 
161 	if (!intel_has_sagv(display))
162 		display->sagv.block_time_us = 0;
163 }
164 
165 /*
166  * SAGV dynamically adjusts the system agent voltage and clock frequencies
167  * depending on power and performance requirements. The display engine access
168  * to system memory is blocked during the adjustment time. Because of the
169  * blocking time, having this enabled can cause full system hangs and/or pipe
170  * underruns if we don't meet all of the following requirements:
171  *
172  *  - <= 1 pipe enabled
173  *  - All planes can enable watermarks for latencies >= SAGV engine block time
174  *  - We're not using an interlaced display configuration
175  */
176 static void skl_sagv_enable(struct intel_display *display)
177 {
178 	int ret;
179 
180 	if (!intel_has_sagv(display))
181 		return;
182 
183 	if (display->sagv.status == I915_SAGV_ENABLED)
184 		return;
185 
186 	drm_dbg_kms(display->drm, "Enabling SAGV\n");
187 	ret = intel_pcode_write(display->drm, GEN9_PCODE_SAGV_CONTROL,
188 				GEN9_SAGV_ENABLE);
189 
190 	/* We don't need to wait for SAGV when enabling */
191 
192 	/*
193 	 * Some skl systems, pre-release machines in particular,
194 	 * don't actually have SAGV.
195 	 */
196 	if (display->platform.skylake && ret == -ENXIO) {
197 		drm_dbg(display->drm, "No SAGV found on system, ignoring\n");
198 		display->sagv.status = I915_SAGV_NOT_CONTROLLED;
199 		return;
200 	} else if (ret < 0) {
201 		drm_err(display->drm, "Failed to enable SAGV\n");
202 		return;
203 	}
204 
205 	display->sagv.status = I915_SAGV_ENABLED;
206 }
207 
208 static void skl_sagv_disable(struct intel_display *display)
209 {
210 	int ret;
211 
212 	if (!intel_has_sagv(display))
213 		return;
214 
215 	if (display->sagv.status == I915_SAGV_DISABLED)
216 		return;
217 
218 	drm_dbg_kms(display->drm, "Disabling SAGV\n");
219 	/* bspec says to keep retrying for at least 1 ms */
220 	ret = intel_pcode_request(display->drm, GEN9_PCODE_SAGV_CONTROL,
221 				  GEN9_SAGV_DISABLE,
222 				  GEN9_SAGV_IS_DISABLED, GEN9_SAGV_IS_DISABLED, 1);
223 	/*
224 	 * Some skl systems, pre-release machines in particular,
225 	 * don't actually have SAGV.
226 	 */
227 	if (display->platform.skylake && ret == -ENXIO) {
228 		drm_dbg(display->drm, "No SAGV found on system, ignoring\n");
229 		display->sagv.status = I915_SAGV_NOT_CONTROLLED;
230 		return;
231 	} else if (ret < 0) {
232 		drm_err(display->drm, "Failed to disable SAGV (%d)\n", ret);
233 		return;
234 	}
235 
236 	display->sagv.status = I915_SAGV_DISABLED;
237 }
238 
239 static void skl_sagv_pre_plane_update(struct intel_atomic_state *state)
240 {
241 	struct intel_display *display = to_intel_display(state);
242 	const struct intel_bw_state *new_bw_state =
243 		intel_atomic_get_new_bw_state(state);
244 
245 	if (!new_bw_state)
246 		return;
247 
248 	if (!intel_bw_can_enable_sagv(display, new_bw_state))
249 		skl_sagv_disable(display);
250 }
251 
252 static void skl_sagv_post_plane_update(struct intel_atomic_state *state)
253 {
254 	struct intel_display *display = to_intel_display(state);
255 	const struct intel_bw_state *new_bw_state =
256 		intel_atomic_get_new_bw_state(state);
257 
258 	if (!new_bw_state)
259 		return;
260 
261 	if (intel_bw_can_enable_sagv(display, new_bw_state))
262 		skl_sagv_enable(display);
263 }
264 
265 void intel_sagv_pre_plane_update(struct intel_atomic_state *state)
266 {
267 	struct intel_display *display = to_intel_display(state);
268 
269 	/*
270 	 * Just return if we can't control SAGV or don't have it.
271 	 * This is different from situation when we have SAGV but just can't
272 	 * afford it due to DBuf limitation - in case if SAGV is completely
273 	 * disabled in a BIOS, we are not even allowed to send a PCode request,
274 	 * as it will throw an error. So have to check it here.
275 	 */
276 	if (!intel_has_sagv(display))
277 		return;
278 
279 	if (DISPLAY_VER(display) >= 11)
280 		icl_sagv_pre_plane_update(state);
281 	else
282 		skl_sagv_pre_plane_update(state);
283 }
284 
285 void intel_sagv_post_plane_update(struct intel_atomic_state *state)
286 {
287 	struct intel_display *display = to_intel_display(state);
288 
289 	/*
290 	 * Just return if we can't control SAGV or don't have it.
291 	 * This is different from situation when we have SAGV but just can't
292 	 * afford it due to DBuf limitation - in case if SAGV is completely
293 	 * disabled in a BIOS, we are not even allowed to send a PCode request,
294 	 * as it will throw an error. So have to check it here.
295 	 */
296 	if (!intel_has_sagv(display))
297 		return;
298 
299 	if (DISPLAY_VER(display) >= 11)
300 		icl_sagv_post_plane_update(state);
301 	else
302 		skl_sagv_post_plane_update(state);
303 }
304 
305 static bool skl_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
306 {
307 	struct intel_display *display = to_intel_display(crtc_state);
308 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
309 	enum plane_id plane_id;
310 	int max_level = INT_MAX;
311 
312 	if (!intel_has_sagv(display))
313 		return false;
314 
315 	if (!crtc_state->hw.active)
316 		return true;
317 
318 	if (crtc_state->hw.pipe_mode.flags & DRM_MODE_FLAG_INTERLACE)
319 		return false;
320 
321 	for_each_plane_id_on_crtc(crtc, plane_id) {
322 		const struct skl_plane_wm *wm =
323 			&crtc_state->wm.skl.optimal.planes[plane_id];
324 		int level;
325 
326 		/* Skip this plane if it's not enabled */
327 		if (!wm->wm[0].enable)
328 			continue;
329 
330 		/* Find the highest enabled wm level for this plane */
331 		for (level = display->wm.num_levels - 1;
332 		     !wm->wm[level].enable; --level)
333 		     { }
334 
335 		/* Highest common enabled wm level for all planes */
336 		max_level = min(level, max_level);
337 	}
338 
339 	/* No enabled planes? */
340 	if (max_level == INT_MAX)
341 		return true;
342 
343 	for_each_plane_id_on_crtc(crtc, plane_id) {
344 		const struct skl_plane_wm *wm =
345 			&crtc_state->wm.skl.optimal.planes[plane_id];
346 
347 		/*
348 		 * All enabled planes must have enabled a common wm level that
349 		 * can tolerate memory latencies higher than sagv_block_time_us
350 		 */
351 		if (wm->wm[0].enable && !wm->wm[max_level].can_sagv)
352 			return false;
353 	}
354 
355 	return true;
356 }
357 
358 static bool tgl_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
359 {
360 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
361 	enum plane_id plane_id;
362 
363 	if (!crtc_state->hw.active)
364 		return true;
365 
366 	for_each_plane_id_on_crtc(crtc, plane_id) {
367 		const struct skl_plane_wm *wm =
368 			&crtc_state->wm.skl.optimal.planes[plane_id];
369 
370 		if (wm->wm[0].enable && !wm->sagv.wm0.enable)
371 			return false;
372 	}
373 
374 	return true;
375 }
376 
377 bool intel_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
378 {
379 	struct intel_display *display = to_intel_display(crtc_state);
380 
381 	if (!display->params.enable_sagv)
382 		return false;
383 
384 	/*
385 	 * SAGV is initially forced off because its current
386 	 * state can't be queried from pcode. Allow SAGV to
387 	 * be enabled upon the first real commit.
388 	 */
389 	if (crtc_state->inherited)
390 		return false;
391 
392 	if (DISPLAY_VER(display) >= 12)
393 		return tgl_crtc_can_enable_sagv(crtc_state);
394 	else
395 		return skl_crtc_can_enable_sagv(crtc_state);
396 }
397 
398 static u16 skl_ddb_entry_init(struct skl_ddb_entry *entry,
399 			      u16 start, u16 end)
400 {
401 	entry->start = start;
402 	entry->end = end;
403 
404 	return end;
405 }
406 
407 static int intel_dbuf_slice_size(struct intel_display *display)
408 {
409 	return DISPLAY_INFO(display)->dbuf.size /
410 		hweight8(DISPLAY_INFO(display)->dbuf.slice_mask);
411 }
412 
413 static void
414 skl_ddb_entry_for_slices(struct intel_display *display, u8 slice_mask,
415 			 struct skl_ddb_entry *ddb)
416 {
417 	int slice_size = intel_dbuf_slice_size(display);
418 
419 	if (!slice_mask) {
420 		ddb->start = 0;
421 		ddb->end = 0;
422 		return;
423 	}
424 
425 	ddb->start = (ffs(slice_mask) - 1) * slice_size;
426 	ddb->end = fls(slice_mask) * slice_size;
427 
428 	WARN_ON(ddb->start >= ddb->end);
429 	WARN_ON(ddb->end > DISPLAY_INFO(display)->dbuf.size);
430 }
431 
432 static unsigned int mbus_ddb_offset(struct intel_display *display, u8 slice_mask)
433 {
434 	struct skl_ddb_entry ddb;
435 
436 	if (slice_mask & (BIT(DBUF_S1) | BIT(DBUF_S2)))
437 		slice_mask = BIT(DBUF_S1);
438 	else if (slice_mask & (BIT(DBUF_S3) | BIT(DBUF_S4)))
439 		slice_mask = BIT(DBUF_S3);
440 
441 	skl_ddb_entry_for_slices(display, slice_mask, &ddb);
442 
443 	return ddb.start;
444 }
445 
446 u32 skl_ddb_dbuf_slice_mask(struct intel_display *display,
447 			    const struct skl_ddb_entry *entry)
448 {
449 	int slice_size = intel_dbuf_slice_size(display);
450 	enum dbuf_slice start_slice, end_slice;
451 	u8 slice_mask = 0;
452 
453 	if (!skl_ddb_entry_size(entry))
454 		return 0;
455 
456 	start_slice = entry->start / slice_size;
457 	end_slice = (entry->end - 1) / slice_size;
458 
459 	/*
460 	 * Per plane DDB entry can in a really worst case be on multiple slices
461 	 * but single entry is anyway contiguous.
462 	 */
463 	while (start_slice <= end_slice) {
464 		slice_mask |= BIT(start_slice);
465 		start_slice++;
466 	}
467 
468 	return slice_mask;
469 }
470 
471 static unsigned int intel_crtc_ddb_weight(const struct intel_crtc_state *crtc_state)
472 {
473 	const struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
474 	int hdisplay, vdisplay;
475 
476 	if (!crtc_state->hw.active)
477 		return 0;
478 
479 	/*
480 	 * Watermark/ddb requirement highly depends upon width of the
481 	 * framebuffer, So instead of allocating DDB equally among pipes
482 	 * distribute DDB based on resolution/width of the display.
483 	 */
484 	drm_mode_get_hv_timing(pipe_mode, &hdisplay, &vdisplay);
485 
486 	return hdisplay;
487 }
488 
489 static void intel_crtc_dbuf_weights(const struct intel_dbuf_state *dbuf_state,
490 				    enum pipe for_pipe,
491 				    unsigned int *weight_start,
492 				    unsigned int *weight_end,
493 				    unsigned int *weight_total)
494 {
495 	struct intel_display *display = to_intel_display(dbuf_state->base.state->base.dev);
496 	enum pipe pipe;
497 
498 	*weight_start = 0;
499 	*weight_end = 0;
500 	*weight_total = 0;
501 
502 	for_each_pipe(display, pipe) {
503 		int weight = dbuf_state->weight[pipe];
504 
505 		/*
506 		 * Do not account pipes using other slice sets
507 		 * luckily as of current BSpec slice sets do not partially
508 		 * intersect(pipes share either same one slice or same slice set
509 		 * i.e no partial intersection), so it is enough to check for
510 		 * equality for now.
511 		 */
512 		if (dbuf_state->slices[pipe] != dbuf_state->slices[for_pipe])
513 			continue;
514 
515 		*weight_total += weight;
516 		if (pipe < for_pipe) {
517 			*weight_start += weight;
518 			*weight_end += weight;
519 		} else if (pipe == for_pipe) {
520 			*weight_end += weight;
521 		}
522 	}
523 }
524 
525 static int
526 skl_crtc_allocate_ddb(struct intel_atomic_state *state, struct intel_crtc *crtc)
527 {
528 	struct intel_display *display = to_intel_display(crtc);
529 	unsigned int weight_total, weight_start, weight_end;
530 	const struct intel_dbuf_state *old_dbuf_state =
531 		intel_atomic_get_old_dbuf_state(state);
532 	struct intel_dbuf_state *new_dbuf_state =
533 		intel_atomic_get_new_dbuf_state(state);
534 	struct intel_crtc_state *crtc_state;
535 	struct skl_ddb_entry ddb_slices;
536 	enum pipe pipe = crtc->pipe;
537 	unsigned int mbus_offset = 0;
538 	u32 ddb_range_size;
539 	u32 dbuf_slice_mask;
540 	u32 start, end;
541 	int ret;
542 
543 	if (new_dbuf_state->weight[pipe] == 0) {
544 		skl_ddb_entry_init(&new_dbuf_state->ddb[pipe], 0, 0);
545 		goto out;
546 	}
547 
548 	dbuf_slice_mask = new_dbuf_state->slices[pipe];
549 
550 	skl_ddb_entry_for_slices(display, dbuf_slice_mask, &ddb_slices);
551 	mbus_offset = mbus_ddb_offset(display, dbuf_slice_mask);
552 	ddb_range_size = skl_ddb_entry_size(&ddb_slices);
553 
554 	intel_crtc_dbuf_weights(new_dbuf_state, pipe,
555 				&weight_start, &weight_end, &weight_total);
556 
557 	start = ddb_range_size * weight_start / weight_total;
558 	end = ddb_range_size * weight_end / weight_total;
559 
560 	skl_ddb_entry_init(&new_dbuf_state->ddb[pipe],
561 			   ddb_slices.start - mbus_offset + start,
562 			   ddb_slices.start - mbus_offset + end);
563 
564 out:
565 	if (old_dbuf_state->slices[pipe] == new_dbuf_state->slices[pipe] &&
566 	    skl_ddb_entry_equal(&old_dbuf_state->ddb[pipe],
567 				&new_dbuf_state->ddb[pipe]))
568 		return 0;
569 
570 	ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
571 	if (ret)
572 		return ret;
573 
574 	crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
575 	if (IS_ERR(crtc_state))
576 		return PTR_ERR(crtc_state);
577 
578 	/*
579 	 * Used for checking overlaps, so we need absolute
580 	 * offsets instead of MBUS relative offsets.
581 	 */
582 	crtc_state->wm.skl.ddb.start = mbus_offset + new_dbuf_state->ddb[pipe].start;
583 	crtc_state->wm.skl.ddb.end = mbus_offset + new_dbuf_state->ddb[pipe].end;
584 
585 	drm_dbg_kms(display->drm,
586 		    "[CRTC:%d:%s] dbuf slices 0x%x -> 0x%x, ddb (%d - %d) -> (%d - %d), active pipes 0x%x -> 0x%x\n",
587 		    crtc->base.base.id, crtc->base.name,
588 		    old_dbuf_state->slices[pipe], new_dbuf_state->slices[pipe],
589 		    old_dbuf_state->ddb[pipe].start, old_dbuf_state->ddb[pipe].end,
590 		    new_dbuf_state->ddb[pipe].start, new_dbuf_state->ddb[pipe].end,
591 		    old_dbuf_state->active_pipes, new_dbuf_state->active_pipes);
592 
593 	return 0;
594 }
595 
596 static int skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
597 				 int width, const struct drm_format_info *format,
598 				 u64 modifier, unsigned int rotation,
599 				 u32 plane_pixel_rate, struct skl_wm_params *wp,
600 				 int color_plane, unsigned int pan_x);
601 
602 static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
603 				 struct intel_plane *plane,
604 				 int level,
605 				 unsigned int latency,
606 				 const struct skl_wm_params *wp,
607 				 const struct skl_wm_level *result_prev,
608 				 struct skl_wm_level *result /* out */);
609 
610 static unsigned int skl_wm_latency(struct intel_display *display, int level,
611 				   const struct skl_wm_params *wp)
612 {
613 	unsigned int latency = display->wm.skl_latency[level];
614 
615 	if (latency == 0)
616 		return 0;
617 
618 	/*
619 	 * WaIncreaseLatencyIPCEnabled: kbl,cfl
620 	 * Display WA #1141: kbl,cfl
621 	 */
622 	if ((display->platform.kabylake || display->platform.coffeelake ||
623 	     display->platform.cometlake) && skl_watermark_ipc_enabled(display))
624 		latency += 4;
625 
626 	if (skl_needs_memory_bw_wa(display) && wp && wp->x_tiled)
627 		latency += 15;
628 
629 	return latency;
630 }
631 
632 static unsigned int
633 skl_cursor_allocation(const struct intel_crtc_state *crtc_state,
634 		      int num_active)
635 {
636 	struct intel_display *display = to_intel_display(crtc_state);
637 	struct intel_plane *plane = to_intel_plane(crtc_state->uapi.crtc->cursor);
638 	struct skl_wm_level wm = {};
639 	int ret, min_ddb_alloc = 0;
640 	struct skl_wm_params wp;
641 	int level;
642 
643 	ret = skl_compute_wm_params(crtc_state, 256,
644 				    drm_format_info(DRM_FORMAT_ARGB8888),
645 				    DRM_FORMAT_MOD_LINEAR,
646 				    DRM_MODE_ROTATE_0,
647 				    crtc_state->pixel_rate, &wp, 0, 0);
648 	drm_WARN_ON(display->drm, ret);
649 
650 	for (level = 0; level < display->wm.num_levels; level++) {
651 		unsigned int latency = skl_wm_latency(display, level, &wp);
652 
653 		skl_compute_plane_wm(crtc_state, plane, level, latency, &wp, &wm, &wm);
654 		if (wm.min_ddb_alloc == U16_MAX)
655 			break;
656 
657 		min_ddb_alloc = wm.min_ddb_alloc;
658 	}
659 
660 	return max(num_active == 1 ? 32 : 8, min_ddb_alloc);
661 }
662 
663 static void skl_ddb_entry_init_from_hw(struct skl_ddb_entry *entry, u32 reg)
664 {
665 	skl_ddb_entry_init(entry,
666 			   REG_FIELD_GET(PLANE_BUF_START_MASK, reg),
667 			   REG_FIELD_GET(PLANE_BUF_END_MASK, reg));
668 	if (entry->end)
669 		entry->end++;
670 }
671 
672 static void
673 skl_ddb_get_hw_plane_state(struct intel_display *display,
674 			   const enum pipe pipe,
675 			   const enum plane_id plane_id,
676 			   struct skl_ddb_entry *ddb,
677 			   struct skl_ddb_entry *ddb_y,
678 			   u16 *min_ddb, u16 *interim_ddb)
679 {
680 	u32 val;
681 
682 	/* Cursor doesn't support NV12/planar, so no extra calculation needed */
683 	if (plane_id == PLANE_CURSOR) {
684 		val = intel_de_read(display, CUR_BUF_CFG(pipe));
685 		skl_ddb_entry_init_from_hw(ddb, val);
686 		return;
687 	}
688 
689 	val = intel_de_read(display, PLANE_BUF_CFG(pipe, plane_id));
690 	skl_ddb_entry_init_from_hw(ddb, val);
691 
692 	if (DISPLAY_VER(display) >= 30) {
693 		val = intel_de_read(display, PLANE_MIN_BUF_CFG(pipe, plane_id));
694 
695 		*min_ddb = REG_FIELD_GET(PLANE_MIN_DBUF_BLOCKS_MASK, val);
696 		*interim_ddb = REG_FIELD_GET(PLANE_INTERIM_DBUF_BLOCKS_MASK, val);
697 	}
698 
699 	if (DISPLAY_VER(display) >= 11)
700 		return;
701 
702 	val = intel_de_read(display, PLANE_NV12_BUF_CFG(pipe, plane_id));
703 	skl_ddb_entry_init_from_hw(ddb_y, val);
704 }
705 
706 static void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
707 				      struct skl_ddb_entry *ddb,
708 				      struct skl_ddb_entry *ddb_y,
709 				      u16 *min_ddb, u16 *interim_ddb)
710 {
711 	struct intel_display *display = to_intel_display(crtc);
712 	enum intel_display_power_domain power_domain;
713 	enum pipe pipe = crtc->pipe;
714 	intel_wakeref_t wakeref;
715 	enum plane_id plane_id;
716 
717 	power_domain = POWER_DOMAIN_PIPE(pipe);
718 	wakeref = intel_display_power_get_if_enabled(display, power_domain);
719 	if (!wakeref)
720 		return;
721 
722 	for_each_plane_id_on_crtc(crtc, plane_id)
723 		skl_ddb_get_hw_plane_state(display, pipe,
724 					   plane_id,
725 					   &ddb[plane_id],
726 					   &ddb_y[plane_id],
727 					   &min_ddb[plane_id],
728 					   &interim_ddb[plane_id]);
729 
730 	intel_display_power_put(display, power_domain, wakeref);
731 }
732 
733 struct dbuf_slice_conf_entry {
734 	u8 active_pipes;
735 	u8 dbuf_mask[I915_MAX_PIPES];
736 	bool join_mbus;
737 };
738 
739 /*
740  * Table taken from Bspec 12716
741  * Pipes do have some preferred DBuf slice affinity,
742  * plus there are some hardcoded requirements on how
743  * those should be distributed for multipipe scenarios.
744  * For more DBuf slices algorithm can get even more messy
745  * and less readable, so decided to use a table almost
746  * as is from BSpec itself - that way it is at least easier
747  * to compare, change and check.
748  */
749 static const struct dbuf_slice_conf_entry icl_allowed_dbufs[] =
750 /* Autogenerated with igt/tools/intel_dbuf_map tool: */
751 {
752 	{
753 		.active_pipes = BIT(PIPE_A),
754 		.dbuf_mask = {
755 			[PIPE_A] = BIT(DBUF_S1),
756 		},
757 	},
758 	{
759 		.active_pipes = BIT(PIPE_B),
760 		.dbuf_mask = {
761 			[PIPE_B] = BIT(DBUF_S1),
762 		},
763 	},
764 	{
765 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
766 		.dbuf_mask = {
767 			[PIPE_A] = BIT(DBUF_S1),
768 			[PIPE_B] = BIT(DBUF_S2),
769 		},
770 	},
771 	{
772 		.active_pipes = BIT(PIPE_C),
773 		.dbuf_mask = {
774 			[PIPE_C] = BIT(DBUF_S2),
775 		},
776 	},
777 	{
778 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_C),
779 		.dbuf_mask = {
780 			[PIPE_A] = BIT(DBUF_S1),
781 			[PIPE_C] = BIT(DBUF_S2),
782 		},
783 	},
784 	{
785 		.active_pipes = BIT(PIPE_B) | BIT(PIPE_C),
786 		.dbuf_mask = {
787 			[PIPE_B] = BIT(DBUF_S1),
788 			[PIPE_C] = BIT(DBUF_S2),
789 		},
790 	},
791 	{
792 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
793 		.dbuf_mask = {
794 			[PIPE_A] = BIT(DBUF_S1),
795 			[PIPE_B] = BIT(DBUF_S1),
796 			[PIPE_C] = BIT(DBUF_S2),
797 		},
798 	},
799 	{}
800 };
801 
802 /*
803  * Table taken from Bspec 49255
804  * Pipes do have some preferred DBuf slice affinity,
805  * plus there are some hardcoded requirements on how
806  * those should be distributed for multipipe scenarios.
807  * For more DBuf slices algorithm can get even more messy
808  * and less readable, so decided to use a table almost
809  * as is from BSpec itself - that way it is at least easier
810  * to compare, change and check.
811  */
812 static const struct dbuf_slice_conf_entry tgl_allowed_dbufs[] =
813 /* Autogenerated with igt/tools/intel_dbuf_map tool: */
814 {
815 	{
816 		.active_pipes = BIT(PIPE_A),
817 		.dbuf_mask = {
818 			[PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
819 		},
820 	},
821 	{
822 		.active_pipes = BIT(PIPE_B),
823 		.dbuf_mask = {
824 			[PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
825 		},
826 	},
827 	{
828 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
829 		.dbuf_mask = {
830 			[PIPE_A] = BIT(DBUF_S2),
831 			[PIPE_B] = BIT(DBUF_S1),
832 		},
833 	},
834 	{
835 		.active_pipes = BIT(PIPE_C),
836 		.dbuf_mask = {
837 			[PIPE_C] = BIT(DBUF_S2) | BIT(DBUF_S1),
838 		},
839 	},
840 	{
841 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_C),
842 		.dbuf_mask = {
843 			[PIPE_A] = BIT(DBUF_S1),
844 			[PIPE_C] = BIT(DBUF_S2),
845 		},
846 	},
847 	{
848 		.active_pipes = BIT(PIPE_B) | BIT(PIPE_C),
849 		.dbuf_mask = {
850 			[PIPE_B] = BIT(DBUF_S1),
851 			[PIPE_C] = BIT(DBUF_S2),
852 		},
853 	},
854 	{
855 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
856 		.dbuf_mask = {
857 			[PIPE_A] = BIT(DBUF_S1),
858 			[PIPE_B] = BIT(DBUF_S1),
859 			[PIPE_C] = BIT(DBUF_S2),
860 		},
861 	},
862 	{
863 		.active_pipes = BIT(PIPE_D),
864 		.dbuf_mask = {
865 			[PIPE_D] = BIT(DBUF_S2) | BIT(DBUF_S1),
866 		},
867 	},
868 	{
869 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_D),
870 		.dbuf_mask = {
871 			[PIPE_A] = BIT(DBUF_S1),
872 			[PIPE_D] = BIT(DBUF_S2),
873 		},
874 	},
875 	{
876 		.active_pipes = BIT(PIPE_B) | BIT(PIPE_D),
877 		.dbuf_mask = {
878 			[PIPE_B] = BIT(DBUF_S1),
879 			[PIPE_D] = BIT(DBUF_S2),
880 		},
881 	},
882 	{
883 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_D),
884 		.dbuf_mask = {
885 			[PIPE_A] = BIT(DBUF_S1),
886 			[PIPE_B] = BIT(DBUF_S1),
887 			[PIPE_D] = BIT(DBUF_S2),
888 		},
889 	},
890 	{
891 		.active_pipes = BIT(PIPE_C) | BIT(PIPE_D),
892 		.dbuf_mask = {
893 			[PIPE_C] = BIT(DBUF_S1),
894 			[PIPE_D] = BIT(DBUF_S2),
895 		},
896 	},
897 	{
898 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_C) | BIT(PIPE_D),
899 		.dbuf_mask = {
900 			[PIPE_A] = BIT(DBUF_S1),
901 			[PIPE_C] = BIT(DBUF_S2),
902 			[PIPE_D] = BIT(DBUF_S2),
903 		},
904 	},
905 	{
906 		.active_pipes = BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
907 		.dbuf_mask = {
908 			[PIPE_B] = BIT(DBUF_S1),
909 			[PIPE_C] = BIT(DBUF_S2),
910 			[PIPE_D] = BIT(DBUF_S2),
911 		},
912 	},
913 	{
914 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
915 		.dbuf_mask = {
916 			[PIPE_A] = BIT(DBUF_S1),
917 			[PIPE_B] = BIT(DBUF_S1),
918 			[PIPE_C] = BIT(DBUF_S2),
919 			[PIPE_D] = BIT(DBUF_S2),
920 		},
921 	},
922 	{}
923 };
924 
925 static const struct dbuf_slice_conf_entry dg2_allowed_dbufs[] = {
926 	{
927 		.active_pipes = BIT(PIPE_A),
928 		.dbuf_mask = {
929 			[PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
930 		},
931 	},
932 	{
933 		.active_pipes = BIT(PIPE_B),
934 		.dbuf_mask = {
935 			[PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
936 		},
937 	},
938 	{
939 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
940 		.dbuf_mask = {
941 			[PIPE_A] = BIT(DBUF_S1),
942 			[PIPE_B] = BIT(DBUF_S2),
943 		},
944 	},
945 	{
946 		.active_pipes = BIT(PIPE_C),
947 		.dbuf_mask = {
948 			[PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
949 		},
950 	},
951 	{
952 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_C),
953 		.dbuf_mask = {
954 			[PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
955 			[PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
956 		},
957 	},
958 	{
959 		.active_pipes = BIT(PIPE_B) | BIT(PIPE_C),
960 		.dbuf_mask = {
961 			[PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
962 			[PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
963 		},
964 	},
965 	{
966 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
967 		.dbuf_mask = {
968 			[PIPE_A] = BIT(DBUF_S1),
969 			[PIPE_B] = BIT(DBUF_S2),
970 			[PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
971 		},
972 	},
973 	{
974 		.active_pipes = BIT(PIPE_D),
975 		.dbuf_mask = {
976 			[PIPE_D] = BIT(DBUF_S3) | BIT(DBUF_S4),
977 		},
978 	},
979 	{
980 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_D),
981 		.dbuf_mask = {
982 			[PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
983 			[PIPE_D] = BIT(DBUF_S3) | BIT(DBUF_S4),
984 		},
985 	},
986 	{
987 		.active_pipes = BIT(PIPE_B) | BIT(PIPE_D),
988 		.dbuf_mask = {
989 			[PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
990 			[PIPE_D] = BIT(DBUF_S3) | BIT(DBUF_S4),
991 		},
992 	},
993 	{
994 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_D),
995 		.dbuf_mask = {
996 			[PIPE_A] = BIT(DBUF_S1),
997 			[PIPE_B] = BIT(DBUF_S2),
998 			[PIPE_D] = BIT(DBUF_S3) | BIT(DBUF_S4),
999 		},
1000 	},
1001 	{
1002 		.active_pipes = BIT(PIPE_C) | BIT(PIPE_D),
1003 		.dbuf_mask = {
1004 			[PIPE_C] = BIT(DBUF_S3),
1005 			[PIPE_D] = BIT(DBUF_S4),
1006 		},
1007 	},
1008 	{
1009 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_C) | BIT(PIPE_D),
1010 		.dbuf_mask = {
1011 			[PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
1012 			[PIPE_C] = BIT(DBUF_S3),
1013 			[PIPE_D] = BIT(DBUF_S4),
1014 		},
1015 	},
1016 	{
1017 		.active_pipes = BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
1018 		.dbuf_mask = {
1019 			[PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
1020 			[PIPE_C] = BIT(DBUF_S3),
1021 			[PIPE_D] = BIT(DBUF_S4),
1022 		},
1023 	},
1024 	{
1025 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
1026 		.dbuf_mask = {
1027 			[PIPE_A] = BIT(DBUF_S1),
1028 			[PIPE_B] = BIT(DBUF_S2),
1029 			[PIPE_C] = BIT(DBUF_S3),
1030 			[PIPE_D] = BIT(DBUF_S4),
1031 		},
1032 	},
1033 	{}
1034 };
1035 
1036 static const struct dbuf_slice_conf_entry adlp_allowed_dbufs[] = {
1037 	/*
1038 	 * Keep the join_mbus cases first so check_mbus_joined()
1039 	 * will prefer them over the !join_mbus cases.
1040 	 */
1041 	{
1042 		.active_pipes = BIT(PIPE_A),
1043 		.dbuf_mask = {
1044 			[PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2) | BIT(DBUF_S3) | BIT(DBUF_S4),
1045 		},
1046 		.join_mbus = true,
1047 	},
1048 	{
1049 		.active_pipes = BIT(PIPE_B),
1050 		.dbuf_mask = {
1051 			[PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2) | BIT(DBUF_S3) | BIT(DBUF_S4),
1052 		},
1053 		.join_mbus = true,
1054 	},
1055 	{
1056 		.active_pipes = BIT(PIPE_A),
1057 		.dbuf_mask = {
1058 			[PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
1059 		},
1060 		.join_mbus = false,
1061 	},
1062 	{
1063 		.active_pipes = BIT(PIPE_B),
1064 		.dbuf_mask = {
1065 			[PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
1066 		},
1067 		.join_mbus = false,
1068 	},
1069 	{
1070 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
1071 		.dbuf_mask = {
1072 			[PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
1073 			[PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
1074 		},
1075 	},
1076 	{
1077 		.active_pipes = BIT(PIPE_C),
1078 		.dbuf_mask = {
1079 			[PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
1080 		},
1081 	},
1082 	{
1083 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_C),
1084 		.dbuf_mask = {
1085 			[PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
1086 			[PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
1087 		},
1088 	},
1089 	{
1090 		.active_pipes = BIT(PIPE_B) | BIT(PIPE_C),
1091 		.dbuf_mask = {
1092 			[PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
1093 			[PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
1094 		},
1095 	},
1096 	{
1097 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
1098 		.dbuf_mask = {
1099 			[PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
1100 			[PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
1101 			[PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
1102 		},
1103 	},
1104 	{
1105 		.active_pipes = BIT(PIPE_D),
1106 		.dbuf_mask = {
1107 			[PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
1108 		},
1109 	},
1110 	{
1111 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_D),
1112 		.dbuf_mask = {
1113 			[PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
1114 			[PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
1115 		},
1116 	},
1117 	{
1118 		.active_pipes = BIT(PIPE_B) | BIT(PIPE_D),
1119 		.dbuf_mask = {
1120 			[PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
1121 			[PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
1122 		},
1123 	},
1124 	{
1125 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_D),
1126 		.dbuf_mask = {
1127 			[PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
1128 			[PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
1129 			[PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
1130 		},
1131 	},
1132 	{
1133 		.active_pipes = BIT(PIPE_C) | BIT(PIPE_D),
1134 		.dbuf_mask = {
1135 			[PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
1136 			[PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
1137 		},
1138 	},
1139 	{
1140 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_C) | BIT(PIPE_D),
1141 		.dbuf_mask = {
1142 			[PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
1143 			[PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
1144 			[PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
1145 		},
1146 	},
1147 	{
1148 		.active_pipes = BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
1149 		.dbuf_mask = {
1150 			[PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
1151 			[PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
1152 			[PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
1153 		},
1154 	},
1155 	{
1156 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
1157 		.dbuf_mask = {
1158 			[PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
1159 			[PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
1160 			[PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
1161 			[PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
1162 		},
1163 	},
1164 	{}
1165 
1166 };
1167 
1168 static bool check_mbus_joined(u8 active_pipes,
1169 			      const struct dbuf_slice_conf_entry *dbuf_slices)
1170 {
1171 	int i;
1172 
1173 	for (i = 0; dbuf_slices[i].active_pipes != 0; i++) {
1174 		if (dbuf_slices[i].active_pipes == active_pipes)
1175 			return dbuf_slices[i].join_mbus;
1176 	}
1177 	return false;
1178 }
1179 
1180 static bool adlp_check_mbus_joined(u8 active_pipes)
1181 {
1182 	return check_mbus_joined(active_pipes, adlp_allowed_dbufs);
1183 }
1184 
1185 static u8 compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus,
1186 			      const struct dbuf_slice_conf_entry *dbuf_slices)
1187 {
1188 	int i;
1189 
1190 	for (i = 0; dbuf_slices[i].active_pipes != 0; i++) {
1191 		if (dbuf_slices[i].active_pipes == active_pipes &&
1192 		    dbuf_slices[i].join_mbus == join_mbus)
1193 			return dbuf_slices[i].dbuf_mask[pipe];
1194 	}
1195 	return 0;
1196 }
1197 
1198 /*
1199  * This function finds an entry with same enabled pipe configuration and
1200  * returns correspondent DBuf slice mask as stated in BSpec for particular
1201  * platform.
1202  */
1203 static u8 icl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus)
1204 {
1205 	/*
1206 	 * FIXME: For ICL this is still a bit unclear as prev BSpec revision
1207 	 * required calculating "pipe ratio" in order to determine
1208 	 * if one or two slices can be used for single pipe configurations
1209 	 * as additional constraint to the existing table.
1210 	 * However based on recent info, it should be not "pipe ratio"
1211 	 * but rather ratio between pixel_rate and cdclk with additional
1212 	 * constants, so for now we are using only table until this is
1213 	 * clarified. Also this is the reason why crtc_state param is
1214 	 * still here - we will need it once those additional constraints
1215 	 * pop up.
1216 	 */
1217 	return compute_dbuf_slices(pipe, active_pipes, join_mbus,
1218 				   icl_allowed_dbufs);
1219 }
1220 
1221 static u8 tgl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus)
1222 {
1223 	return compute_dbuf_slices(pipe, active_pipes, join_mbus,
1224 				   tgl_allowed_dbufs);
1225 }
1226 
1227 static u8 adlp_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus)
1228 {
1229 	return compute_dbuf_slices(pipe, active_pipes, join_mbus,
1230 				   adlp_allowed_dbufs);
1231 }
1232 
1233 static u8 dg2_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus)
1234 {
1235 	return compute_dbuf_slices(pipe, active_pipes, join_mbus,
1236 				   dg2_allowed_dbufs);
1237 }
1238 
1239 static u8 skl_compute_dbuf_slices(struct intel_crtc *crtc, u8 active_pipes, bool join_mbus)
1240 {
1241 	struct intel_display *display = to_intel_display(crtc);
1242 	enum pipe pipe = crtc->pipe;
1243 
1244 	if (display->platform.dg2)
1245 		return dg2_compute_dbuf_slices(pipe, active_pipes, join_mbus);
1246 	else if (DISPLAY_VER(display) >= 13)
1247 		return adlp_compute_dbuf_slices(pipe, active_pipes, join_mbus);
1248 	else if (DISPLAY_VER(display) == 12)
1249 		return tgl_compute_dbuf_slices(pipe, active_pipes, join_mbus);
1250 	else if (DISPLAY_VER(display) == 11)
1251 		return icl_compute_dbuf_slices(pipe, active_pipes, join_mbus);
1252 	/*
1253 	 * For anything else just return one slice yet.
1254 	 * Should be extended for other platforms.
1255 	 */
1256 	return active_pipes & BIT(pipe) ? BIT(DBUF_S1) : 0;
1257 }
1258 
1259 static bool
1260 use_minimal_wm0_only(const struct intel_crtc_state *crtc_state,
1261 		     struct intel_plane *plane)
1262 {
1263 	struct intel_display *display = to_intel_display(plane);
1264 
1265 	/* Xe3+ are auto minimum DDB capble. So don't force minimal wm0 */
1266 	return IS_DISPLAY_VER(display, 13, 20) &&
1267 	       crtc_state->uapi.async_flip &&
1268 	       plane->async_flip;
1269 }
1270 
1271 unsigned int
1272 skl_plane_relative_data_rate(const struct intel_crtc_state *crtc_state,
1273 			     struct intel_plane *plane, int width, int height,
1274 			     int cpp)
1275 {
1276 	/*
1277 	 * We calculate extra ddb based on ratio plane rate/total data rate
1278 	 * in case, in some cases we should not allocate extra ddb for the plane,
1279 	 * so do not count its data rate, if this is the case.
1280 	 */
1281 	if (use_minimal_wm0_only(crtc_state, plane))
1282 		return 0;
1283 
1284 	return width * height * cpp;
1285 }
1286 
1287 static u64
1288 skl_total_relative_data_rate(const struct intel_crtc_state *crtc_state)
1289 {
1290 	struct intel_display *display = to_intel_display(crtc_state);
1291 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1292 	enum plane_id plane_id;
1293 	u64 data_rate = 0;
1294 
1295 	for_each_plane_id_on_crtc(crtc, plane_id) {
1296 		if (plane_id == PLANE_CURSOR)
1297 			continue;
1298 
1299 		data_rate += crtc_state->rel_data_rate[plane_id];
1300 
1301 		if (DISPLAY_VER(display) < 11)
1302 			data_rate += crtc_state->rel_data_rate_y[plane_id];
1303 	}
1304 
1305 	return data_rate;
1306 }
1307 
1308 const struct skl_wm_level *
1309 skl_plane_wm_level(const struct skl_pipe_wm *pipe_wm,
1310 		   enum plane_id plane_id,
1311 		   int level)
1312 {
1313 	const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
1314 
1315 	if (level == 0 && pipe_wm->use_sagv_wm)
1316 		return &wm->sagv.wm0;
1317 
1318 	return &wm->wm[level];
1319 }
1320 
1321 const struct skl_wm_level *
1322 skl_plane_trans_wm(const struct skl_pipe_wm *pipe_wm,
1323 		   enum plane_id plane_id)
1324 {
1325 	const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
1326 
1327 	if (pipe_wm->use_sagv_wm)
1328 		return &wm->sagv.trans_wm;
1329 
1330 	return &wm->trans_wm;
1331 }
1332 
1333 /*
1334  * We only disable the watermarks for each plane if
1335  * they exceed the ddb allocation of said plane. This
1336  * is done so that we don't end up touching cursor
1337  * watermarks needlessly when some other plane reduces
1338  * our max possible watermark level.
1339  *
1340  * Bspec has this to say about the PLANE_WM enable bit:
1341  * "All the watermarks at this level for all enabled
1342  *  planes must be enabled before the level will be used."
1343  * So this is actually safe to do.
1344  */
1345 static void
1346 skl_check_wm_level(struct skl_wm_level *wm, const struct skl_ddb_entry *ddb)
1347 {
1348 	if (wm->min_ddb_alloc > skl_ddb_entry_size(ddb))
1349 		memset(wm, 0, sizeof(*wm));
1350 }
1351 
1352 static void
1353 skl_check_nv12_wm_level(struct skl_wm_level *wm, struct skl_wm_level *uv_wm,
1354 			const struct skl_ddb_entry *ddb_y, const struct skl_ddb_entry *ddb)
1355 {
1356 	if (wm->min_ddb_alloc > skl_ddb_entry_size(ddb_y) ||
1357 	    uv_wm->min_ddb_alloc > skl_ddb_entry_size(ddb)) {
1358 		memset(wm, 0, sizeof(*wm));
1359 		memset(uv_wm, 0, sizeof(*uv_wm));
1360 	}
1361 }
1362 
1363 static bool skl_need_wm_copy_wa(struct intel_display *display, int level,
1364 				const struct skl_plane_wm *wm)
1365 {
1366 	/*
1367 	 * Wa_1408961008:icl, ehl
1368 	 * Wa_14012656716:tgl, adl
1369 	 * Wa_14017887344:icl
1370 	 * Wa_14017868169:adl, tgl
1371 	 * Due to some power saving optimizations, different subsystems
1372 	 * like PSR, might still use even disabled wm level registers,
1373 	 * for "reference", so lets keep at least the values sane.
1374 	 * Considering amount of WA requiring us to do similar things, was
1375 	 * decided to simply do it for all of the platforms, as those wm
1376 	 * levels are disabled, this isn't going to do harm anyway.
1377 	 */
1378 	return level > 0 && !wm->wm[level].enable;
1379 }
1380 
1381 struct skl_plane_ddb_iter {
1382 	u64 data_rate;
1383 	u16 start, size;
1384 };
1385 
1386 static void
1387 skl_allocate_plane_ddb(struct skl_plane_ddb_iter *iter,
1388 		       struct skl_ddb_entry *ddb,
1389 		       const struct skl_wm_level *wm,
1390 		       u64 data_rate)
1391 {
1392 	u16 size, extra = 0;
1393 
1394 	if (data_rate && iter->data_rate) {
1395 		extra = min_t(u16, iter->size,
1396 			      DIV64_U64_ROUND_UP(iter->size * data_rate,
1397 						 iter->data_rate));
1398 		iter->size -= extra;
1399 		iter->data_rate -= data_rate;
1400 	}
1401 
1402 	/*
1403 	 * Keep ddb entry of all disabled planes explicitly zeroed
1404 	 * to avoid skl_ddb_add_affected_planes() adding them to
1405 	 * the state when other planes change their allocations.
1406 	 */
1407 	size = wm->min_ddb_alloc + extra;
1408 	if (size)
1409 		iter->start = skl_ddb_entry_init(ddb, iter->start,
1410 						 iter->start + size);
1411 }
1412 
1413 static int
1414 skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
1415 			    struct intel_crtc *crtc)
1416 {
1417 	struct intel_crtc_state *crtc_state =
1418 		intel_atomic_get_new_crtc_state(state, crtc);
1419 	const struct intel_dbuf_state *dbuf_state =
1420 		intel_atomic_get_new_dbuf_state(state);
1421 	const struct skl_ddb_entry *alloc = &dbuf_state->ddb[crtc->pipe];
1422 	struct intel_display *display = to_intel_display(state);
1423 	int num_active = hweight8(dbuf_state->active_pipes);
1424 	struct skl_plane_ddb_iter iter;
1425 	enum plane_id plane_id;
1426 	u16 cursor_size;
1427 	u32 blocks;
1428 	int level;
1429 
1430 	/* Clear the partitioning for disabled planes. */
1431 	memset(crtc_state->wm.skl.plane_ddb, 0, sizeof(crtc_state->wm.skl.plane_ddb));
1432 	memset(crtc_state->wm.skl.plane_ddb_y, 0, sizeof(crtc_state->wm.skl.plane_ddb_y));
1433 	memset(crtc_state->wm.skl.plane_min_ddb, 0,
1434 	       sizeof(crtc_state->wm.skl.plane_min_ddb));
1435 	memset(crtc_state->wm.skl.plane_interim_ddb, 0,
1436 	       sizeof(crtc_state->wm.skl.plane_interim_ddb));
1437 
1438 	if (!crtc_state->hw.active)
1439 		return 0;
1440 
1441 	iter.start = alloc->start;
1442 	iter.size = skl_ddb_entry_size(alloc);
1443 	if (iter.size == 0)
1444 		return 0;
1445 
1446 	/* Allocate fixed number of blocks for cursor. */
1447 	cursor_size = skl_cursor_allocation(crtc_state, num_active);
1448 	iter.size -= cursor_size;
1449 	skl_ddb_entry_init(&crtc_state->wm.skl.plane_ddb[PLANE_CURSOR],
1450 			   alloc->end - cursor_size, alloc->end);
1451 
1452 	iter.data_rate = skl_total_relative_data_rate(crtc_state);
1453 
1454 	/*
1455 	 * Find the highest watermark level for which we can satisfy the block
1456 	 * requirement of active planes.
1457 	 */
1458 	for (level = display->wm.num_levels - 1; level >= 0; level--) {
1459 		blocks = 0;
1460 		for_each_plane_id_on_crtc(crtc, plane_id) {
1461 			const struct skl_plane_wm *wm =
1462 				&crtc_state->wm.skl.optimal.planes[plane_id];
1463 
1464 			if (plane_id == PLANE_CURSOR) {
1465 				const struct skl_ddb_entry *ddb =
1466 					&crtc_state->wm.skl.plane_ddb[plane_id];
1467 
1468 				if (wm->wm[level].min_ddb_alloc > skl_ddb_entry_size(ddb)) {
1469 					drm_WARN_ON(display->drm,
1470 						    wm->wm[level].min_ddb_alloc != U16_MAX);
1471 					blocks = U32_MAX;
1472 					break;
1473 				}
1474 				continue;
1475 			}
1476 
1477 			blocks += wm->wm[level].min_ddb_alloc;
1478 			blocks += wm->uv_wm[level].min_ddb_alloc;
1479 		}
1480 
1481 		if (blocks <= iter.size) {
1482 			iter.size -= blocks;
1483 			break;
1484 		}
1485 	}
1486 
1487 	if (level < 0) {
1488 		drm_dbg_kms(display->drm,
1489 			    "Requested display configuration exceeds system DDB limitations");
1490 		drm_dbg_kms(display->drm, "minimum required %d/%d\n",
1491 			    blocks, iter.size);
1492 		return -EINVAL;
1493 	}
1494 
1495 	/* avoid the WARN later when we don't allocate any extra DDB */
1496 	if (iter.data_rate == 0)
1497 		iter.size = 0;
1498 
1499 	/*
1500 	 * Grant each plane the blocks it requires at the highest achievable
1501 	 * watermark level, plus an extra share of the leftover blocks
1502 	 * proportional to its relative data rate.
1503 	 */
1504 	for_each_plane_id_on_crtc(crtc, plane_id) {
1505 		struct skl_ddb_entry *ddb =
1506 			&crtc_state->wm.skl.plane_ddb[plane_id];
1507 		struct skl_ddb_entry *ddb_y =
1508 			&crtc_state->wm.skl.plane_ddb_y[plane_id];
1509 		u16 *min_ddb = &crtc_state->wm.skl.plane_min_ddb[plane_id];
1510 		u16 *interim_ddb =
1511 			&crtc_state->wm.skl.plane_interim_ddb[plane_id];
1512 		const struct skl_plane_wm *wm =
1513 			&crtc_state->wm.skl.optimal.planes[plane_id];
1514 
1515 		if (plane_id == PLANE_CURSOR)
1516 			continue;
1517 
1518 		if (DISPLAY_VER(display) < 11 &&
1519 		    crtc_state->nv12_planes & BIT(plane_id)) {
1520 			skl_allocate_plane_ddb(&iter, ddb_y, &wm->wm[level],
1521 					       crtc_state->rel_data_rate_y[plane_id]);
1522 			skl_allocate_plane_ddb(&iter, ddb, &wm->uv_wm[level],
1523 					       crtc_state->rel_data_rate[plane_id]);
1524 		} else {
1525 			skl_allocate_plane_ddb(&iter, ddb, &wm->wm[level],
1526 					       crtc_state->rel_data_rate[plane_id]);
1527 		}
1528 
1529 		if (DISPLAY_VER(display) >= 30) {
1530 			*min_ddb = wm->wm[0].min_ddb_alloc;
1531 			*interim_ddb = wm->sagv.wm0.min_ddb_alloc;
1532 		}
1533 	}
1534 	drm_WARN_ON(display->drm, iter.size != 0 || iter.data_rate != 0);
1535 
1536 	/*
1537 	 * When we calculated watermark values we didn't know how high
1538 	 * of a level we'd actually be able to hit, so we just marked
1539 	 * all levels as "enabled."  Go back now and disable the ones
1540 	 * that aren't actually possible.
1541 	 */
1542 	for (level++; level < display->wm.num_levels; level++) {
1543 		for_each_plane_id_on_crtc(crtc, plane_id) {
1544 			const struct skl_ddb_entry *ddb =
1545 				&crtc_state->wm.skl.plane_ddb[plane_id];
1546 			const struct skl_ddb_entry *ddb_y =
1547 				&crtc_state->wm.skl.plane_ddb_y[plane_id];
1548 			struct skl_plane_wm *wm =
1549 				&crtc_state->wm.skl.optimal.planes[plane_id];
1550 
1551 			if (DISPLAY_VER(display) < 11 &&
1552 			    crtc_state->nv12_planes & BIT(plane_id))
1553 				skl_check_nv12_wm_level(&wm->wm[level],
1554 							&wm->uv_wm[level],
1555 							ddb_y, ddb);
1556 			else
1557 				skl_check_wm_level(&wm->wm[level], ddb);
1558 
1559 			if (skl_need_wm_copy_wa(display, level, wm)) {
1560 				wm->wm[level].blocks = wm->wm[level - 1].blocks;
1561 				wm->wm[level].lines = wm->wm[level - 1].lines;
1562 				wm->wm[level].ignore_lines = wm->wm[level - 1].ignore_lines;
1563 			}
1564 		}
1565 	}
1566 
1567 	/*
1568 	 * Go back and disable the transition and SAGV watermarks
1569 	 * if it turns out we don't have enough DDB blocks for them.
1570 	 */
1571 	for_each_plane_id_on_crtc(crtc, plane_id) {
1572 		const struct skl_ddb_entry *ddb =
1573 			&crtc_state->wm.skl.plane_ddb[plane_id];
1574 		const struct skl_ddb_entry *ddb_y =
1575 			&crtc_state->wm.skl.plane_ddb_y[plane_id];
1576 		u16 *interim_ddb =
1577 			&crtc_state->wm.skl.plane_interim_ddb[plane_id];
1578 		struct skl_plane_wm *wm =
1579 			&crtc_state->wm.skl.optimal.planes[plane_id];
1580 
1581 		if (DISPLAY_VER(display) < 11 &&
1582 		    crtc_state->nv12_planes & BIT(plane_id)) {
1583 			skl_check_wm_level(&wm->trans_wm, ddb_y);
1584 		} else {
1585 			WARN_ON(skl_ddb_entry_size(ddb_y));
1586 
1587 			skl_check_wm_level(&wm->trans_wm, ddb);
1588 		}
1589 
1590 		skl_check_wm_level(&wm->sagv.wm0, ddb);
1591 		if (DISPLAY_VER(display) >= 30)
1592 			*interim_ddb = wm->sagv.wm0.min_ddb_alloc;
1593 
1594 		skl_check_wm_level(&wm->sagv.trans_wm, ddb);
1595 	}
1596 
1597 	return 0;
1598 }
1599 
1600 /*
1601  * The max latency should be 257 (max the punit can code is 255 and we add 2us
1602  * for the read latency) and cpp should always be <= 8, so that
1603  * should allow pixel_rate up to ~2 GHz which seems sufficient since max
1604  * 2xcdclk is 1350 MHz and the pixel rate should never exceed that.
1605  */
1606 static uint_fixed_16_16_t
1607 skl_wm_method1(struct intel_display *display, u32 pixel_rate,
1608 	       u8 cpp, u32 latency, u32 dbuf_block_size)
1609 {
1610 	u32 wm_intermediate_val;
1611 	uint_fixed_16_16_t ret;
1612 
1613 	if (latency == 0)
1614 		return FP_16_16_MAX;
1615 
1616 	wm_intermediate_val = latency * pixel_rate * cpp;
1617 	ret = div_fixed16(wm_intermediate_val, 1000 * dbuf_block_size);
1618 
1619 	if (DISPLAY_VER(display) >= 10)
1620 		ret = add_fixed16_u32(ret, 1);
1621 
1622 	return ret;
1623 }
1624 
1625 static uint_fixed_16_16_t
1626 skl_wm_method2(u32 pixel_rate, u32 pipe_htotal, u32 latency,
1627 	       uint_fixed_16_16_t plane_blocks_per_line)
1628 {
1629 	u32 wm_intermediate_val;
1630 	uint_fixed_16_16_t ret;
1631 
1632 	if (latency == 0)
1633 		return FP_16_16_MAX;
1634 
1635 	wm_intermediate_val = latency * pixel_rate;
1636 	wm_intermediate_val = DIV_ROUND_UP(wm_intermediate_val,
1637 					   pipe_htotal * 1000);
1638 	ret = mul_u32_fixed16(wm_intermediate_val, plane_blocks_per_line);
1639 	return ret;
1640 }
1641 
1642 static int skl_wm_linetime_us(const struct intel_crtc_state *crtc_state,
1643 			      int pixel_rate)
1644 {
1645 	return DIV_ROUND_UP(crtc_state->hw.pipe_mode.crtc_htotal * 1000,
1646 			    pixel_rate);
1647 }
1648 
1649 static int
1650 skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
1651 		      int width, const struct drm_format_info *format,
1652 		      u64 modifier, unsigned int rotation,
1653 		      u32 plane_pixel_rate, struct skl_wm_params *wp,
1654 		      int color_plane, unsigned int pan_x)
1655 {
1656 	struct intel_display *display = to_intel_display(crtc_state);
1657 	u32 interm_pbpl;
1658 
1659 	/* only planar format has two planes */
1660 	if (color_plane == 1 &&
1661 	    !intel_format_info_is_yuv_semiplanar(format, modifier)) {
1662 		drm_dbg_kms(display->drm,
1663 			    "Non planar format have single plane\n");
1664 		return -EINVAL;
1665 	}
1666 
1667 	wp->x_tiled = modifier == I915_FORMAT_MOD_X_TILED;
1668 	wp->y_tiled = modifier != I915_FORMAT_MOD_X_TILED &&
1669 		intel_fb_is_tiled_modifier(modifier);
1670 	wp->rc_surface = intel_fb_is_ccs_modifier(modifier);
1671 	wp->is_planar = intel_format_info_is_yuv_semiplanar(format, modifier);
1672 
1673 	wp->width = width;
1674 	if (color_plane == 1 && wp->is_planar)
1675 		wp->width /= 2;
1676 
1677 	wp->cpp = format->cpp[color_plane];
1678 	wp->plane_pixel_rate = plane_pixel_rate;
1679 
1680 	if (DISPLAY_VER(display) >= 11 &&
1681 	    modifier == I915_FORMAT_MOD_Yf_TILED  && wp->cpp == 1)
1682 		wp->dbuf_block_size = 256;
1683 	else
1684 		wp->dbuf_block_size = 512;
1685 
1686 	if (drm_rotation_90_or_270(rotation)) {
1687 		switch (wp->cpp) {
1688 		case 1:
1689 			wp->y_min_scanlines = 16;
1690 			break;
1691 		case 2:
1692 			wp->y_min_scanlines = 8;
1693 			break;
1694 		case 4:
1695 			wp->y_min_scanlines = 4;
1696 			break;
1697 		default:
1698 			MISSING_CASE(wp->cpp);
1699 			return -EINVAL;
1700 		}
1701 	} else {
1702 		wp->y_min_scanlines = 4;
1703 	}
1704 
1705 	if (skl_needs_memory_bw_wa(display))
1706 		wp->y_min_scanlines *= 2;
1707 
1708 	wp->plane_bytes_per_line = wp->width * wp->cpp;
1709 	if (wp->y_tiled) {
1710 		interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line *
1711 					   wp->y_min_scanlines,
1712 					   wp->dbuf_block_size);
1713 
1714 		if (DISPLAY_VER(display) >= 30)
1715 			interm_pbpl += (pan_x != 0);
1716 		else if (DISPLAY_VER(display) >= 10)
1717 			interm_pbpl++;
1718 
1719 		wp->plane_blocks_per_line = div_fixed16(interm_pbpl,
1720 							wp->y_min_scanlines);
1721 	} else {
1722 		interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line,
1723 					   wp->dbuf_block_size);
1724 
1725 		if (!wp->x_tiled || DISPLAY_VER(display) >= 10)
1726 			interm_pbpl++;
1727 
1728 		wp->plane_blocks_per_line = u32_to_fixed16(interm_pbpl);
1729 	}
1730 
1731 	wp->y_tile_minimum = mul_u32_fixed16(wp->y_min_scanlines,
1732 					     wp->plane_blocks_per_line);
1733 
1734 	wp->linetime_us = skl_wm_linetime_us(crtc_state, plane_pixel_rate);
1735 
1736 	return 0;
1737 }
1738 
1739 static int
1740 skl_compute_plane_wm_params(const struct intel_crtc_state *crtc_state,
1741 			    const struct intel_plane_state *plane_state,
1742 			    struct skl_wm_params *wp, int color_plane)
1743 {
1744 	const struct drm_framebuffer *fb = plane_state->hw.fb;
1745 	int width;
1746 
1747 	/*
1748 	 * Src coordinates are already rotated by 270 degrees for
1749 	 * the 90/270 degree plane rotation cases (to match the
1750 	 * GTT mapping), hence no need to account for rotation here.
1751 	 */
1752 	width = drm_rect_width(&plane_state->uapi.src) >> 16;
1753 
1754 	return skl_compute_wm_params(crtc_state, width,
1755 				     fb->format, fb->modifier,
1756 				     plane_state->hw.rotation,
1757 				     intel_plane_pixel_rate(crtc_state, plane_state),
1758 				     wp, color_plane,
1759 				     plane_state->uapi.src.x1);
1760 }
1761 
1762 static bool skl_wm_has_lines(struct intel_display *display, int level)
1763 {
1764 	if (DISPLAY_VER(display) >= 10)
1765 		return true;
1766 
1767 	/* The number of lines are ignored for the level 0 watermark. */
1768 	return level > 0;
1769 }
1770 
1771 static int skl_wm_max_lines(struct intel_display *display)
1772 {
1773 	if (DISPLAY_VER(display) >= 13)
1774 		return 255;
1775 	else
1776 		return 31;
1777 }
1778 
1779 static bool xe3_auto_min_alloc_capable(struct intel_plane *plane, int level)
1780 {
1781 	struct intel_display *display = to_intel_display(plane);
1782 
1783 	return DISPLAY_VER(display) >= 30 && level == 0 && plane->id != PLANE_CURSOR;
1784 }
1785 
1786 static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
1787 				 struct intel_plane *plane,
1788 				 int level,
1789 				 unsigned int latency,
1790 				 const struct skl_wm_params *wp,
1791 				 const struct skl_wm_level *result_prev,
1792 				 struct skl_wm_level *result /* out */)
1793 {
1794 	struct intel_display *display = to_intel_display(crtc_state);
1795 	uint_fixed_16_16_t method1, method2;
1796 	uint_fixed_16_16_t selected_result;
1797 	u32 blocks, lines, min_ddb_alloc = 0;
1798 
1799 	if (latency == 0 ||
1800 	    (use_minimal_wm0_only(crtc_state, plane) && level > 0)) {
1801 		/* reject it */
1802 		result->min_ddb_alloc = U16_MAX;
1803 		return;
1804 	}
1805 
1806 	method1 = skl_wm_method1(display, wp->plane_pixel_rate,
1807 				 wp->cpp, latency, wp->dbuf_block_size);
1808 	method2 = skl_wm_method2(wp->plane_pixel_rate,
1809 				 crtc_state->hw.pipe_mode.crtc_htotal,
1810 				 latency,
1811 				 wp->plane_blocks_per_line);
1812 
1813 	if (wp->y_tiled) {
1814 		selected_result = max_fixed16(method2, wp->y_tile_minimum);
1815 	} else {
1816 		if ((wp->cpp * crtc_state->hw.pipe_mode.crtc_htotal /
1817 		     wp->dbuf_block_size < 1) &&
1818 		     (wp->plane_bytes_per_line / wp->dbuf_block_size < 1)) {
1819 			selected_result = method2;
1820 		} else if (latency >= wp->linetime_us) {
1821 			if (DISPLAY_VER(display) == 9)
1822 				selected_result = min_fixed16(method1, method2);
1823 			else
1824 				selected_result = method2;
1825 		} else {
1826 			selected_result = method1;
1827 		}
1828 	}
1829 
1830 	blocks = fixed16_to_u32_round_up(selected_result);
1831 	if (DISPLAY_VER(display) < 30)
1832 		blocks++;
1833 
1834 	/*
1835 	 * Lets have blocks at minimum equivalent to plane_blocks_per_line
1836 	 * as there will be at minimum one line for lines configuration. This
1837 	 * is a work around for FIFO underruns observed with resolutions like
1838 	 * 4k 60 Hz in single channel DRAM configurations.
1839 	 *
1840 	 * As per the Bspec 49325, if the ddb allocation can hold at least
1841 	 * one plane_blocks_per_line, we should have selected method2 in
1842 	 * the above logic. Assuming that modern versions have enough dbuf
1843 	 * and method2 guarantees blocks equivalent to at least 1 line,
1844 	 * select the blocks as plane_blocks_per_line.
1845 	 *
1846 	 * TODO: Revisit the logic when we have better understanding on DRAM
1847 	 * channels' impact on the level 0 memory latency and the relevant
1848 	 * wm calculations.
1849 	 */
1850 	if (skl_wm_has_lines(display, level))
1851 		blocks = max(blocks,
1852 			     fixed16_to_u32_round_up(wp->plane_blocks_per_line));
1853 	lines = div_round_up_fixed16(selected_result,
1854 				     wp->plane_blocks_per_line);
1855 
1856 	if (DISPLAY_VER(display) == 9) {
1857 		/* Display WA #1125: skl,bxt,kbl */
1858 		if (level == 0 && wp->rc_surface)
1859 			blocks += fixed16_to_u32_round_up(wp->y_tile_minimum);
1860 
1861 		/* Display WA #1126: skl,bxt,kbl */
1862 		if (level >= 1 && level <= 7) {
1863 			if (wp->y_tiled) {
1864 				blocks += fixed16_to_u32_round_up(wp->y_tile_minimum);
1865 				lines += wp->y_min_scanlines;
1866 			} else {
1867 				blocks++;
1868 			}
1869 		}
1870 	}
1871 
1872 	/*
1873 	 * Make sure result blocks for higher latency levels are
1874 	 * at least as high as level below the current level.
1875 	 * Assumption in DDB algorithm optimization for special
1876 	 * cases. Also covers Display WA #1125 for RC.
1877 	 *
1878 	 * Let's always do this as the algorithm can give non
1879 	 * monotonic results on any platform.
1880 	 */
1881 	blocks = max_t(u32, blocks, result_prev->blocks);
1882 	lines = max_t(u32, lines, result_prev->lines);
1883 
1884 	if (DISPLAY_VER(display) >= 11) {
1885 		if (wp->y_tiled) {
1886 			int extra_lines;
1887 
1888 			if (lines % wp->y_min_scanlines == 0)
1889 				extra_lines = wp->y_min_scanlines;
1890 			else
1891 				extra_lines = wp->y_min_scanlines * 2 -
1892 					lines % wp->y_min_scanlines;
1893 
1894 			min_ddb_alloc = mul_round_up_u32_fixed16(lines + extra_lines,
1895 								 wp->plane_blocks_per_line);
1896 		} else {
1897 			min_ddb_alloc = blocks + DIV_ROUND_UP(blocks, 10);
1898 		}
1899 	}
1900 
1901 	if (!skl_wm_has_lines(display, level))
1902 		lines = 0;
1903 
1904 	if (lines > skl_wm_max_lines(display)) {
1905 		/* reject it */
1906 		result->min_ddb_alloc = U16_MAX;
1907 		return;
1908 	}
1909 
1910 	/*
1911 	 * If lines is valid, assume we can use this watermark level
1912 	 * for now.  We'll come back and disable it after we calculate the
1913 	 * DDB allocation if it turns out we don't actually have enough
1914 	 * blocks to satisfy it.
1915 	 */
1916 	result->blocks = blocks;
1917 	result->lines = lines;
1918 	/* Bspec says: value >= plane ddb allocation -> invalid, hence the +1 here */
1919 	result->min_ddb_alloc = max(min_ddb_alloc, blocks) + 1;
1920 	result->enable = true;
1921 	result->auto_min_alloc_wm_enable = xe3_auto_min_alloc_capable(plane, level);
1922 
1923 	if (DISPLAY_VER(display) < 12 && display->sagv.block_time_us)
1924 		result->can_sagv = latency >= display->sagv.block_time_us;
1925 }
1926 
1927 static void
1928 skl_compute_wm_levels(const struct intel_crtc_state *crtc_state,
1929 		      struct intel_plane *plane,
1930 		      const struct skl_wm_params *wm_params,
1931 		      struct skl_wm_level *levels)
1932 {
1933 	struct intel_display *display = to_intel_display(crtc_state);
1934 	struct skl_wm_level *result_prev = &levels[0];
1935 	int level;
1936 
1937 	for (level = 0; level < display->wm.num_levels; level++) {
1938 		struct skl_wm_level *result = &levels[level];
1939 		unsigned int latency = skl_wm_latency(display, level, wm_params);
1940 
1941 		skl_compute_plane_wm(crtc_state, plane, level, latency,
1942 				     wm_params, result_prev, result);
1943 
1944 		result_prev = result;
1945 	}
1946 }
1947 
1948 static void tgl_compute_sagv_wm(const struct intel_crtc_state *crtc_state,
1949 				struct intel_plane *plane,
1950 				const struct skl_wm_params *wm_params,
1951 				struct skl_plane_wm *plane_wm)
1952 {
1953 	struct intel_display *display = to_intel_display(crtc_state);
1954 	struct skl_wm_level *sagv_wm = &plane_wm->sagv.wm0;
1955 	struct skl_wm_level *levels = plane_wm->wm;
1956 	unsigned int latency = 0;
1957 
1958 	if (display->sagv.block_time_us)
1959 		latency = display->sagv.block_time_us +
1960 			skl_wm_latency(display, 0, wm_params);
1961 
1962 	skl_compute_plane_wm(crtc_state, plane, 0, latency,
1963 			     wm_params, &levels[0],
1964 			     sagv_wm);
1965 }
1966 
1967 static void skl_compute_transition_wm(struct intel_display *display,
1968 				      struct skl_wm_level *trans_wm,
1969 				      const struct skl_wm_level *wm0,
1970 				      const struct skl_wm_params *wp)
1971 {
1972 	u16 trans_min, trans_amount, trans_y_tile_min;
1973 	u16 wm0_blocks, trans_offset, blocks;
1974 
1975 	/* Transition WM don't make any sense if ipc is disabled */
1976 	if (!skl_watermark_ipc_enabled(display))
1977 		return;
1978 
1979 	/*
1980 	 * WaDisableTWM:skl,kbl,cfl,bxt
1981 	 * Transition WM are not recommended by HW team for GEN9
1982 	 */
1983 	if (DISPLAY_VER(display) == 9)
1984 		return;
1985 
1986 	if (DISPLAY_VER(display) >= 11)
1987 		trans_min = 4;
1988 	else
1989 		trans_min = 14;
1990 
1991 	/* Display WA #1140: glk,cnl */
1992 	if (DISPLAY_VER(display) == 10)
1993 		trans_amount = 0;
1994 	else
1995 		trans_amount = 10; /* This is configurable amount */
1996 
1997 	trans_offset = trans_min + trans_amount;
1998 
1999 	/*
2000 	 * The spec asks for Selected Result Blocks for wm0 (the real value),
2001 	 * not Result Blocks (the integer value). Pay attention to the capital
2002 	 * letters. The value wm_l0->blocks is actually Result Blocks, but
2003 	 * since Result Blocks is the ceiling of Selected Result Blocks plus 1,
2004 	 * and since we later will have to get the ceiling of the sum in the
2005 	 * transition watermarks calculation, we can just pretend Selected
2006 	 * Result Blocks is Result Blocks minus 1 and it should work for the
2007 	 * current platforms.
2008 	 */
2009 	wm0_blocks = wm0->blocks - 1;
2010 
2011 	if (wp->y_tiled) {
2012 		trans_y_tile_min =
2013 			(u16)mul_round_up_u32_fixed16(2, wp->y_tile_minimum);
2014 		blocks = max(wm0_blocks, trans_y_tile_min) + trans_offset;
2015 	} else {
2016 		blocks = wm0_blocks + trans_offset;
2017 	}
2018 	blocks++;
2019 
2020 	/*
2021 	 * Just assume we can enable the transition watermark.  After
2022 	 * computing the DDB we'll come back and disable it if that
2023 	 * assumption turns out to be false.
2024 	 */
2025 	trans_wm->blocks = blocks;
2026 	trans_wm->min_ddb_alloc = max_t(u16, wm0->min_ddb_alloc, blocks + 1);
2027 	trans_wm->enable = true;
2028 }
2029 
2030 static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state,
2031 				     const struct intel_plane_state *plane_state,
2032 				     struct intel_plane *plane, int color_plane)
2033 {
2034 	struct intel_display *display = to_intel_display(crtc_state);
2035 	struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane->id];
2036 	struct skl_wm_params wm_params;
2037 	int ret;
2038 
2039 	ret = skl_compute_plane_wm_params(crtc_state, plane_state,
2040 					  &wm_params, color_plane);
2041 	if (ret)
2042 		return ret;
2043 
2044 	skl_compute_wm_levels(crtc_state, plane, &wm_params, wm->wm);
2045 
2046 	skl_compute_transition_wm(display, &wm->trans_wm,
2047 				  &wm->wm[0], &wm_params);
2048 
2049 	if (DISPLAY_VER(display) >= 12) {
2050 		tgl_compute_sagv_wm(crtc_state, plane, &wm_params, wm);
2051 
2052 		skl_compute_transition_wm(display, &wm->sagv.trans_wm,
2053 					  &wm->sagv.wm0, &wm_params);
2054 	}
2055 
2056 	return 0;
2057 }
2058 
2059 static int skl_build_plane_wm_uv(struct intel_crtc_state *crtc_state,
2060 				 const struct intel_plane_state *plane_state,
2061 				 struct intel_plane *plane)
2062 {
2063 	struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane->id];
2064 	struct skl_wm_params wm_params;
2065 	int ret;
2066 
2067 	wm->is_planar = true;
2068 
2069 	/* uv plane watermarks must also be validated for NV12/Planar */
2070 	ret = skl_compute_plane_wm_params(crtc_state, plane_state,
2071 					  &wm_params, 1);
2072 	if (ret)
2073 		return ret;
2074 
2075 	skl_compute_wm_levels(crtc_state, plane, &wm_params, wm->uv_wm);
2076 
2077 	return 0;
2078 }
2079 
2080 static int skl_build_plane_wm(struct intel_crtc_state *crtc_state,
2081 			      const struct intel_plane_state *plane_state)
2082 {
2083 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
2084 	enum plane_id plane_id = plane->id;
2085 	struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane_id];
2086 	const struct drm_framebuffer *fb = plane_state->hw.fb;
2087 	int ret;
2088 
2089 	memset(wm, 0, sizeof(*wm));
2090 
2091 	if (!intel_wm_plane_visible(crtc_state, plane_state))
2092 		return 0;
2093 
2094 	ret = skl_build_plane_wm_single(crtc_state, plane_state,
2095 					plane, 0);
2096 	if (ret)
2097 		return ret;
2098 
2099 	if (fb->format->is_yuv && fb->format->num_planes > 1) {
2100 		ret = skl_build_plane_wm_uv(crtc_state, plane_state,
2101 					    plane);
2102 		if (ret)
2103 			return ret;
2104 	}
2105 
2106 	return 0;
2107 }
2108 
2109 static int icl_build_plane_wm(struct intel_crtc_state *crtc_state,
2110 			      const struct intel_plane_state *plane_state)
2111 {
2112 	struct intel_display *display = to_intel_display(plane_state);
2113 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
2114 	enum plane_id plane_id = plane->id;
2115 	struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane_id];
2116 	int ret;
2117 
2118 	/* Watermarks calculated on UV plane */
2119 	if (plane_state->is_y_plane)
2120 		return 0;
2121 
2122 	memset(wm, 0, sizeof(*wm));
2123 
2124 	if (plane_state->planar_linked_plane) {
2125 		const struct drm_framebuffer *fb = plane_state->hw.fb;
2126 
2127 		drm_WARN_ON(display->drm,
2128 			    !intel_wm_plane_visible(crtc_state, plane_state));
2129 		drm_WARN_ON(display->drm, !fb->format->is_yuv ||
2130 			    fb->format->num_planes == 1);
2131 
2132 		ret = skl_build_plane_wm_single(crtc_state, plane_state,
2133 						plane_state->planar_linked_plane, 0);
2134 		if (ret)
2135 			return ret;
2136 
2137 		ret = skl_build_plane_wm_single(crtc_state, plane_state,
2138 						plane, 1);
2139 		if (ret)
2140 			return ret;
2141 	} else if (intel_wm_plane_visible(crtc_state, plane_state)) {
2142 		ret = skl_build_plane_wm_single(crtc_state, plane_state,
2143 						plane, 0);
2144 		if (ret)
2145 			return ret;
2146 	}
2147 
2148 	return 0;
2149 }
2150 
2151 unsigned int skl_wm0_prefill_lines_worst(const struct intel_crtc_state *crtc_state)
2152 {
2153 	struct intel_display *display = to_intel_display(crtc_state);
2154 	struct intel_plane *plane = to_intel_plane(crtc_state->uapi.crtc->primary);
2155 	const struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
2156 	int ret, pixel_rate, width, level = 0;
2157 	const struct drm_format_info *info;
2158 	struct skl_wm_level wm = {};
2159 	struct skl_wm_params wp;
2160 	unsigned int latency;
2161 	u64 modifier;
2162 	u32 format;
2163 
2164 	/* only expected to be used for VRR guardband calculation */
2165 	drm_WARN_ON(display->drm, !HAS_VRR(display));
2166 
2167 	/* FIXME rather ugly to pick this by hand but maybe no better way? */
2168 	format = DRM_FORMAT_XBGR16161616F;
2169 	if (HAS_4TILE(display))
2170 		modifier = I915_FORMAT_MOD_4_TILED;
2171 	else
2172 		modifier = I915_FORMAT_MOD_Y_TILED;
2173 
2174 	info = drm_get_format_info(display->drm, format, modifier);
2175 
2176 	pixel_rate = DIV_ROUND_UP_ULL(mul_u32_u32(skl_scaler_max_total_scale(crtc_state),
2177 						  pipe_mode->crtc_clock),
2178 				      0x10000);
2179 
2180 	/* FIXME limit to max plane width? */
2181 	width = DIV_ROUND_UP_ULL(mul_u32_u32(skl_scaler_max_hscale(crtc_state),
2182 					     pipe_mode->crtc_hdisplay),
2183 				 0x10000);
2184 
2185 	/* FIXME is 90/270 rotation worse than 0/180? */
2186 	ret = skl_compute_wm_params(crtc_state, width, info,
2187 				    modifier, DRM_MODE_ROTATE_0,
2188 				    pixel_rate, &wp, 0, 1);
2189 	drm_WARN_ON(display->drm, ret);
2190 
2191 	latency = skl_wm_latency(display, level, &wp);
2192 
2193 	skl_compute_plane_wm(crtc_state, plane, level, latency, &wp, &wm, &wm);
2194 
2195 	/* FIXME is this sane? */
2196 	if (wm.min_ddb_alloc == U16_MAX)
2197 		wm.lines = skl_wm_max_lines(display);
2198 
2199 	return wm.lines << 16;
2200 }
2201 
2202 static int skl_max_wm0_lines(const struct intel_crtc_state *crtc_state)
2203 {
2204 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2205 	enum plane_id plane_id;
2206 	int wm0_lines = 0;
2207 
2208 	for_each_plane_id_on_crtc(crtc, plane_id) {
2209 		const struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id];
2210 
2211 		/* FIXME what about !skl_wm_has_lines() platforms? */
2212 		wm0_lines = max_t(int, wm0_lines, wm->wm[0].lines);
2213 	}
2214 
2215 	return wm0_lines;
2216 }
2217 
2218 unsigned int skl_wm0_prefill_lines(const struct intel_crtc_state *crtc_state)
2219 {
2220 	return skl_max_wm0_lines(crtc_state) << 16;
2221 }
2222 
2223 /*
2224  * TODO: In case we use PKG_C_LATENCY to allow C-states when the delayed vblank
2225  * size is too small for the package C exit latency we need to notify PSR about
2226  * the scenario to apply Wa_16025596647.
2227  */
2228 static int skl_max_wm_level_for_vblank(struct intel_crtc_state *crtc_state,
2229 				       const struct skl_prefill_ctx *ctx)
2230 {
2231 	struct intel_display *display = to_intel_display(crtc_state);
2232 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2233 	int level;
2234 
2235 	for (level = display->wm.num_levels - 1; level >= 0; level--) {
2236 		int latency;
2237 
2238 		/* FIXME should we care about the latency w/a's? */
2239 		latency = skl_wm_latency(display, level, NULL);
2240 		if (latency == 0)
2241 			continue;
2242 
2243 		/* FIXME is it correct to use 0 latency for wm0 here? */
2244 		if (level == 0)
2245 			latency = 0;
2246 
2247 		if (!skl_prefill_vblank_too_short(ctx, crtc_state, latency))
2248 			return level;
2249 	}
2250 
2251 	drm_dbg_kms(display->drm, "[CRTC:%d:%s] Not enough time in vblank for prefill\n",
2252 		    crtc->base.base.id, crtc->base.name);
2253 
2254 	return -EINVAL;
2255 }
2256 
2257 static int skl_wm_check_vblank(struct intel_crtc_state *crtc_state)
2258 {
2259 	struct intel_display *display = to_intel_display(crtc_state);
2260 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2261 	struct skl_prefill_ctx ctx;
2262 	int level;
2263 
2264 	if (!crtc_state->hw.active)
2265 		return 0;
2266 
2267 	skl_prefill_init(&ctx, crtc_state);
2268 
2269 	level = skl_max_wm_level_for_vblank(crtc_state, &ctx);
2270 	if (level < 0)
2271 		return level;
2272 
2273 	/*
2274 	 * PSR needs to toggle LATENCY_REPORTING_REMOVED_PIPE_*
2275 	 * based on whether we're limited by the vblank duration.
2276 	 */
2277 	crtc_state->wm_level_disabled = level < display->wm.num_levels - 1;
2278 
2279 	/*
2280 	 * TODO: assert that we are in fact using the maximum guardband
2281 	 * if we end up disabling any WM levels here. Otherwise we clearly
2282 	 * failed in using a realistic worst case prefill estimate when
2283 	 * determining the guardband size.
2284 	 */
2285 
2286 	for (level++; level < display->wm.num_levels; level++) {
2287 		enum plane_id plane_id;
2288 
2289 		for_each_plane_id_on_crtc(crtc, plane_id) {
2290 			struct skl_plane_wm *wm =
2291 				&crtc_state->wm.skl.optimal.planes[plane_id];
2292 
2293 			/*
2294 			 * FIXME just clear enable or flag the entire
2295 			 * thing as bad via min_ddb_alloc=U16_MAX?
2296 			 */
2297 			wm->wm[level].enable = false;
2298 			wm->uv_wm[level].enable = false;
2299 		}
2300 	}
2301 
2302 	if (DISPLAY_VER(display) >= 12 &&
2303 	    display->sagv.block_time_us &&
2304 	    skl_prefill_vblank_too_short(&ctx, crtc_state,
2305 					 display->sagv.block_time_us)) {
2306 		enum plane_id plane_id;
2307 
2308 		for_each_plane_id_on_crtc(crtc, plane_id) {
2309 			struct skl_plane_wm *wm =
2310 				&crtc_state->wm.skl.optimal.planes[plane_id];
2311 
2312 			wm->sagv.wm0.enable = false;
2313 			wm->sagv.trans_wm.enable = false;
2314 		}
2315 	}
2316 
2317 	return 0;
2318 }
2319 
2320 static int skl_build_pipe_wm(struct intel_atomic_state *state,
2321 			     struct intel_crtc *crtc)
2322 {
2323 	struct intel_display *display = to_intel_display(crtc);
2324 	struct intel_crtc_state *crtc_state =
2325 		intel_atomic_get_new_crtc_state(state, crtc);
2326 	const struct intel_plane_state *plane_state;
2327 	struct intel_plane *plane;
2328 	int ret, i;
2329 
2330 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
2331 		/*
2332 		 * FIXME should perhaps check {old,new}_plane_crtc->hw.crtc
2333 		 * instead but we don't populate that correctly for NV12 Y
2334 		 * planes so for now hack this.
2335 		 */
2336 		if (plane->pipe != crtc->pipe)
2337 			continue;
2338 
2339 		if (DISPLAY_VER(display) >= 11)
2340 			ret = icl_build_plane_wm(crtc_state, plane_state);
2341 		else
2342 			ret = skl_build_plane_wm(crtc_state, plane_state);
2343 		if (ret)
2344 			return ret;
2345 	}
2346 
2347 	crtc_state->wm.skl.optimal = crtc_state->wm.skl.raw;
2348 
2349 	return skl_wm_check_vblank(crtc_state);
2350 }
2351 
2352 static bool skl_wm_level_equals(const struct skl_wm_level *l1,
2353 				const struct skl_wm_level *l2)
2354 {
2355 	return l1->enable == l2->enable &&
2356 		l1->ignore_lines == l2->ignore_lines &&
2357 		l1->lines == l2->lines &&
2358 		l1->blocks == l2->blocks &&
2359 		l1->auto_min_alloc_wm_enable == l2->auto_min_alloc_wm_enable;
2360 }
2361 
2362 static bool skl_plane_wm_equals(struct intel_display *display,
2363 				const struct skl_plane_wm *wm1,
2364 				const struct skl_plane_wm *wm2)
2365 {
2366 	int level;
2367 
2368 	for (level = 0; level < display->wm.num_levels; level++) {
2369 		/*
2370 		 * We don't check uv_wm as the hardware doesn't actually
2371 		 * use it. It only gets used for calculating the required
2372 		 * ddb allocation.
2373 		 */
2374 		if (!skl_wm_level_equals(&wm1->wm[level], &wm2->wm[level]))
2375 			return false;
2376 	}
2377 
2378 	return skl_wm_level_equals(&wm1->trans_wm, &wm2->trans_wm) &&
2379 		skl_wm_level_equals(&wm1->sagv.wm0, &wm2->sagv.wm0) &&
2380 		skl_wm_level_equals(&wm1->sagv.trans_wm, &wm2->sagv.trans_wm);
2381 }
2382 
2383 static bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
2384 				    const struct skl_ddb_entry *b)
2385 {
2386 	return a->start < b->end && b->start < a->end;
2387 }
2388 
2389 static void skl_ddb_entry_union(struct skl_ddb_entry *a,
2390 				const struct skl_ddb_entry *b)
2391 {
2392 	if (a->end && b->end) {
2393 		a->start = min(a->start, b->start);
2394 		a->end = max(a->end, b->end);
2395 	} else if (b->end) {
2396 		a->start = b->start;
2397 		a->end = b->end;
2398 	}
2399 }
2400 
2401 bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
2402 				 const struct skl_ddb_entry *entries,
2403 				 int num_entries, int ignore_idx)
2404 {
2405 	int i;
2406 
2407 	for (i = 0; i < num_entries; i++) {
2408 		if (i != ignore_idx &&
2409 		    skl_ddb_entries_overlap(ddb, &entries[i]))
2410 			return true;
2411 	}
2412 
2413 	return false;
2414 }
2415 
2416 static int
2417 skl_ddb_add_affected_planes(struct intel_atomic_state *state,
2418 			    struct intel_crtc *crtc)
2419 {
2420 	struct intel_display *display = to_intel_display(state);
2421 	const struct intel_crtc_state *old_crtc_state =
2422 		intel_atomic_get_old_crtc_state(state, crtc);
2423 	struct intel_crtc_state *new_crtc_state =
2424 		intel_atomic_get_new_crtc_state(state, crtc);
2425 	struct intel_plane *plane;
2426 
2427 	for_each_intel_plane_on_crtc(display->drm, crtc, plane) {
2428 		struct intel_plane_state *plane_state;
2429 		enum plane_id plane_id = plane->id;
2430 
2431 		if (skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb[plane_id],
2432 					&new_crtc_state->wm.skl.plane_ddb[plane_id]) &&
2433 		    skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb_y[plane_id],
2434 					&new_crtc_state->wm.skl.plane_ddb_y[plane_id]))
2435 			continue;
2436 
2437 		if (new_crtc_state->do_async_flip) {
2438 			drm_dbg_kms(display->drm, "[PLANE:%d:%s] Can't change DDB during async flip\n",
2439 				    plane->base.base.id, plane->base.name);
2440 			return -EINVAL;
2441 		}
2442 
2443 		plane_state = intel_atomic_get_plane_state(state, plane);
2444 		if (IS_ERR(plane_state))
2445 			return PTR_ERR(plane_state);
2446 
2447 		new_crtc_state->update_planes |= BIT(plane_id);
2448 		new_crtc_state->async_flip_planes = 0;
2449 		new_crtc_state->do_async_flip = false;
2450 	}
2451 
2452 	return 0;
2453 }
2454 
2455 static u8 intel_dbuf_enabled_slices(const struct intel_dbuf_state *dbuf_state)
2456 {
2457 	struct intel_display *display = to_intel_display(dbuf_state->base.state->base.dev);
2458 	u8 enabled_slices;
2459 	enum pipe pipe;
2460 
2461 	/*
2462 	 * FIXME: For now we always enable slice S1 as per
2463 	 * the Bspec display initialization sequence.
2464 	 */
2465 	enabled_slices = BIT(DBUF_S1);
2466 
2467 	for_each_pipe(display, pipe)
2468 		enabled_slices |= dbuf_state->slices[pipe];
2469 
2470 	return enabled_slices;
2471 }
2472 
2473 static int
2474 skl_compute_ddb(struct intel_atomic_state *state)
2475 {
2476 	struct intel_display *display = to_intel_display(state);
2477 	const struct intel_dbuf_state *old_dbuf_state;
2478 	struct intel_dbuf_state *new_dbuf_state = NULL;
2479 	struct intel_crtc_state *new_crtc_state;
2480 	struct intel_crtc *crtc;
2481 	int ret, i;
2482 
2483 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
2484 		new_dbuf_state = intel_atomic_get_dbuf_state(state);
2485 		if (IS_ERR(new_dbuf_state))
2486 			return PTR_ERR(new_dbuf_state);
2487 
2488 		old_dbuf_state = intel_atomic_get_old_dbuf_state(state);
2489 		break;
2490 	}
2491 
2492 	if (!new_dbuf_state)
2493 		return 0;
2494 
2495 	new_dbuf_state->active_pipes =
2496 		intel_calc_active_pipes(state, old_dbuf_state->active_pipes);
2497 
2498 	if (old_dbuf_state->active_pipes != new_dbuf_state->active_pipes) {
2499 		ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
2500 		if (ret)
2501 			return ret;
2502 	}
2503 
2504 	if (HAS_MBUS_JOINING(display)) {
2505 		new_dbuf_state->joined_mbus =
2506 			adlp_check_mbus_joined(new_dbuf_state->active_pipes);
2507 
2508 		if (old_dbuf_state->joined_mbus != new_dbuf_state->joined_mbus) {
2509 			ret = intel_cdclk_state_set_joined_mbus(state, new_dbuf_state->joined_mbus);
2510 			if (ret)
2511 				return ret;
2512 		}
2513 	}
2514 
2515 	for_each_intel_crtc(display->drm, crtc) {
2516 		enum pipe pipe = crtc->pipe;
2517 
2518 		new_dbuf_state->slices[pipe] =
2519 			skl_compute_dbuf_slices(crtc, new_dbuf_state->active_pipes,
2520 						new_dbuf_state->joined_mbus);
2521 
2522 		if (old_dbuf_state->slices[pipe] == new_dbuf_state->slices[pipe])
2523 			continue;
2524 
2525 		ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
2526 		if (ret)
2527 			return ret;
2528 	}
2529 
2530 	new_dbuf_state->enabled_slices = intel_dbuf_enabled_slices(new_dbuf_state);
2531 
2532 	if (old_dbuf_state->enabled_slices != new_dbuf_state->enabled_slices ||
2533 	    old_dbuf_state->joined_mbus != new_dbuf_state->joined_mbus) {
2534 		ret = intel_atomic_serialize_global_state(&new_dbuf_state->base);
2535 		if (ret)
2536 			return ret;
2537 
2538 		drm_dbg_kms(display->drm,
2539 			    "Enabled dbuf slices 0x%x -> 0x%x (total dbuf slices 0x%x), mbus joined? %s->%s\n",
2540 			    old_dbuf_state->enabled_slices,
2541 			    new_dbuf_state->enabled_slices,
2542 			    DISPLAY_INFO(display)->dbuf.slice_mask,
2543 			    str_yes_no(old_dbuf_state->joined_mbus),
2544 			    str_yes_no(new_dbuf_state->joined_mbus));
2545 	}
2546 
2547 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
2548 		enum pipe pipe = crtc->pipe;
2549 
2550 		new_dbuf_state->weight[pipe] = intel_crtc_ddb_weight(new_crtc_state);
2551 
2552 		if (old_dbuf_state->weight[pipe] == new_dbuf_state->weight[pipe])
2553 			continue;
2554 
2555 		ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
2556 		if (ret)
2557 			return ret;
2558 	}
2559 
2560 	for_each_intel_crtc(display->drm, crtc) {
2561 		ret = skl_crtc_allocate_ddb(state, crtc);
2562 		if (ret)
2563 			return ret;
2564 	}
2565 
2566 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
2567 		ret = skl_crtc_allocate_plane_ddb(state, crtc);
2568 		if (ret)
2569 			return ret;
2570 
2571 		ret = skl_ddb_add_affected_planes(state, crtc);
2572 		if (ret)
2573 			return ret;
2574 	}
2575 
2576 	return 0;
2577 }
2578 
2579 static char enast(bool enable)
2580 {
2581 	return enable ? '*' : ' ';
2582 }
2583 
2584 static noinline_for_stack void
2585 skl_print_plane_changes(struct intel_display *display,
2586 			struct intel_plane *plane,
2587 			const struct skl_plane_wm *old_wm,
2588 			const struct skl_plane_wm *new_wm)
2589 {
2590 	drm_dbg_kms(display->drm,
2591 		    "[PLANE:%d:%s]   level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm"
2592 		    " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm\n",
2593 		    plane->base.base.id, plane->base.name,
2594 		    enast(old_wm->wm[0].enable), enast(old_wm->wm[1].enable),
2595 		    enast(old_wm->wm[2].enable), enast(old_wm->wm[3].enable),
2596 		    enast(old_wm->wm[4].enable), enast(old_wm->wm[5].enable),
2597 		    enast(old_wm->wm[6].enable), enast(old_wm->wm[7].enable),
2598 		    enast(old_wm->trans_wm.enable),
2599 		    enast(old_wm->sagv.wm0.enable),
2600 		    enast(old_wm->sagv.trans_wm.enable),
2601 		    enast(new_wm->wm[0].enable), enast(new_wm->wm[1].enable),
2602 		    enast(new_wm->wm[2].enable), enast(new_wm->wm[3].enable),
2603 		    enast(new_wm->wm[4].enable), enast(new_wm->wm[5].enable),
2604 		    enast(new_wm->wm[6].enable), enast(new_wm->wm[7].enable),
2605 		    enast(new_wm->trans_wm.enable),
2606 		    enast(new_wm->sagv.wm0.enable),
2607 		    enast(new_wm->sagv.trans_wm.enable));
2608 
2609 	drm_dbg_kms(display->drm,
2610 		    "[PLANE:%d:%s]   lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d"
2611 		      " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d\n",
2612 		    plane->base.base.id, plane->base.name,
2613 		    enast(old_wm->wm[0].ignore_lines), old_wm->wm[0].lines,
2614 		    enast(old_wm->wm[1].ignore_lines), old_wm->wm[1].lines,
2615 		    enast(old_wm->wm[2].ignore_lines), old_wm->wm[2].lines,
2616 		    enast(old_wm->wm[3].ignore_lines), old_wm->wm[3].lines,
2617 		    enast(old_wm->wm[4].ignore_lines), old_wm->wm[4].lines,
2618 		    enast(old_wm->wm[5].ignore_lines), old_wm->wm[5].lines,
2619 		    enast(old_wm->wm[6].ignore_lines), old_wm->wm[6].lines,
2620 		    enast(old_wm->wm[7].ignore_lines), old_wm->wm[7].lines,
2621 		    enast(old_wm->trans_wm.ignore_lines), old_wm->trans_wm.lines,
2622 		    enast(old_wm->sagv.wm0.ignore_lines), old_wm->sagv.wm0.lines,
2623 		    enast(old_wm->sagv.trans_wm.ignore_lines), old_wm->sagv.trans_wm.lines,
2624 		    enast(new_wm->wm[0].ignore_lines), new_wm->wm[0].lines,
2625 		    enast(new_wm->wm[1].ignore_lines), new_wm->wm[1].lines,
2626 		    enast(new_wm->wm[2].ignore_lines), new_wm->wm[2].lines,
2627 		    enast(new_wm->wm[3].ignore_lines), new_wm->wm[3].lines,
2628 		    enast(new_wm->wm[4].ignore_lines), new_wm->wm[4].lines,
2629 		    enast(new_wm->wm[5].ignore_lines), new_wm->wm[5].lines,
2630 		    enast(new_wm->wm[6].ignore_lines), new_wm->wm[6].lines,
2631 		    enast(new_wm->wm[7].ignore_lines), new_wm->wm[7].lines,
2632 		    enast(new_wm->trans_wm.ignore_lines), new_wm->trans_wm.lines,
2633 		    enast(new_wm->sagv.wm0.ignore_lines), new_wm->sagv.wm0.lines,
2634 		    enast(new_wm->sagv.trans_wm.ignore_lines), new_wm->sagv.trans_wm.lines);
2635 
2636 	drm_dbg_kms(display->drm,
2637 		    "[PLANE:%d:%s]  blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d"
2638 		    " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n",
2639 		    plane->base.base.id, plane->base.name,
2640 		    old_wm->wm[0].blocks, old_wm->wm[1].blocks,
2641 		    old_wm->wm[2].blocks, old_wm->wm[3].blocks,
2642 		    old_wm->wm[4].blocks, old_wm->wm[5].blocks,
2643 		    old_wm->wm[6].blocks, old_wm->wm[7].blocks,
2644 		    old_wm->trans_wm.blocks,
2645 		    old_wm->sagv.wm0.blocks,
2646 		    old_wm->sagv.trans_wm.blocks,
2647 		    new_wm->wm[0].blocks, new_wm->wm[1].blocks,
2648 		    new_wm->wm[2].blocks, new_wm->wm[3].blocks,
2649 		    new_wm->wm[4].blocks, new_wm->wm[5].blocks,
2650 		    new_wm->wm[6].blocks, new_wm->wm[7].blocks,
2651 		    new_wm->trans_wm.blocks,
2652 		    new_wm->sagv.wm0.blocks,
2653 		    new_wm->sagv.trans_wm.blocks);
2654 
2655 	drm_dbg_kms(display->drm,
2656 		    "[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d"
2657 		    " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n",
2658 		    plane->base.base.id, plane->base.name,
2659 		    old_wm->wm[0].min_ddb_alloc, old_wm->wm[1].min_ddb_alloc,
2660 		    old_wm->wm[2].min_ddb_alloc, old_wm->wm[3].min_ddb_alloc,
2661 		    old_wm->wm[4].min_ddb_alloc, old_wm->wm[5].min_ddb_alloc,
2662 		    old_wm->wm[6].min_ddb_alloc, old_wm->wm[7].min_ddb_alloc,
2663 		    old_wm->trans_wm.min_ddb_alloc,
2664 		    old_wm->sagv.wm0.min_ddb_alloc,
2665 		    old_wm->sagv.trans_wm.min_ddb_alloc,
2666 		    new_wm->wm[0].min_ddb_alloc, new_wm->wm[1].min_ddb_alloc,
2667 		    new_wm->wm[2].min_ddb_alloc, new_wm->wm[3].min_ddb_alloc,
2668 		    new_wm->wm[4].min_ddb_alloc, new_wm->wm[5].min_ddb_alloc,
2669 		    new_wm->wm[6].min_ddb_alloc, new_wm->wm[7].min_ddb_alloc,
2670 		    new_wm->trans_wm.min_ddb_alloc,
2671 		    new_wm->sagv.wm0.min_ddb_alloc,
2672 		    new_wm->sagv.trans_wm.min_ddb_alloc);
2673 }
2674 
2675 static void
2676 skl_print_wm_changes(struct intel_atomic_state *state)
2677 {
2678 	struct intel_display *display = to_intel_display(state);
2679 	const struct intel_crtc_state *old_crtc_state;
2680 	const struct intel_crtc_state *new_crtc_state;
2681 	struct intel_plane *plane;
2682 	struct intel_crtc *crtc;
2683 	int i;
2684 
2685 	if (!drm_debug_enabled(DRM_UT_KMS))
2686 		return;
2687 
2688 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
2689 					    new_crtc_state, i) {
2690 		const struct skl_pipe_wm *old_pipe_wm, *new_pipe_wm;
2691 
2692 		old_pipe_wm = &old_crtc_state->wm.skl.optimal;
2693 		new_pipe_wm = &new_crtc_state->wm.skl.optimal;
2694 
2695 		for_each_intel_plane_on_crtc(display->drm, crtc, plane) {
2696 			enum plane_id plane_id = plane->id;
2697 			const struct skl_ddb_entry *old, *new;
2698 
2699 			old = &old_crtc_state->wm.skl.plane_ddb[plane_id];
2700 			new = &new_crtc_state->wm.skl.plane_ddb[plane_id];
2701 
2702 			if (skl_ddb_entry_equal(old, new))
2703 				continue;
2704 			drm_dbg_kms(display->drm,
2705 				    "[PLANE:%d:%s] ddb (%4d - %4d) -> (%4d - %4d), size %4d -> %4d\n",
2706 				    plane->base.base.id, plane->base.name,
2707 				    old->start, old->end, new->start, new->end,
2708 				    skl_ddb_entry_size(old), skl_ddb_entry_size(new));
2709 		}
2710 
2711 		for_each_intel_plane_on_crtc(display->drm, crtc, plane) {
2712 			enum plane_id plane_id = plane->id;
2713 			const struct skl_plane_wm *old_wm, *new_wm;
2714 
2715 			old_wm = &old_pipe_wm->planes[plane_id];
2716 			new_wm = &new_pipe_wm->planes[plane_id];
2717 
2718 			if (skl_plane_wm_equals(display, old_wm, new_wm))
2719 				continue;
2720 
2721 			skl_print_plane_changes(display, plane, old_wm, new_wm);
2722 		}
2723 	}
2724 }
2725 
2726 static bool skl_plane_selected_wm_equals(struct intel_plane *plane,
2727 					 const struct skl_pipe_wm *old_pipe_wm,
2728 					 const struct skl_pipe_wm *new_pipe_wm)
2729 {
2730 	struct intel_display *display = to_intel_display(plane);
2731 	int level;
2732 
2733 	for (level = 0; level < display->wm.num_levels; level++) {
2734 		/*
2735 		 * We don't check uv_wm as the hardware doesn't actually
2736 		 * use it. It only gets used for calculating the required
2737 		 * ddb allocation.
2738 		 */
2739 		if (!skl_wm_level_equals(skl_plane_wm_level(old_pipe_wm, plane->id, level),
2740 					 skl_plane_wm_level(new_pipe_wm, plane->id, level)))
2741 			return false;
2742 	}
2743 
2744 	if (HAS_HW_SAGV_WM(display)) {
2745 		const struct skl_plane_wm *old_wm = &old_pipe_wm->planes[plane->id];
2746 		const struct skl_plane_wm *new_wm = &new_pipe_wm->planes[plane->id];
2747 
2748 		if (!skl_wm_level_equals(&old_wm->sagv.wm0, &new_wm->sagv.wm0) ||
2749 		    !skl_wm_level_equals(&old_wm->sagv.trans_wm, &new_wm->sagv.trans_wm))
2750 			return false;
2751 	}
2752 
2753 	return skl_wm_level_equals(skl_plane_trans_wm(old_pipe_wm, plane->id),
2754 				   skl_plane_trans_wm(new_pipe_wm, plane->id));
2755 }
2756 
2757 /*
2758  * To make sure the cursor watermark registers are always consistent
2759  * with our computed state the following scenario needs special
2760  * treatment:
2761  *
2762  * 1. enable cursor
2763  * 2. move cursor entirely offscreen
2764  * 3. disable cursor
2765  *
2766  * Step 2. does call .disable_plane() but does not zero the watermarks
2767  * (since we consider an offscreen cursor still active for the purposes
2768  * of watermarks). Step 3. would not normally call .disable_plane()
2769  * because the actual plane visibility isn't changing, and we don't
2770  * deallocate the cursor ddb until the pipe gets disabled. So we must
2771  * force step 3. to call .disable_plane() to update the watermark
2772  * registers properly.
2773  *
2774  * Other planes do not suffer from this issues as their watermarks are
2775  * calculated based on the actual plane visibility. The only time this
2776  * can trigger for the other planes is during the initial readout as the
2777  * default value of the watermarks registers is not zero.
2778  */
2779 static int skl_wm_add_affected_planes(struct intel_atomic_state *state,
2780 				      struct intel_crtc *crtc)
2781 {
2782 	struct intel_display *display = to_intel_display(state);
2783 	const struct intel_crtc_state *old_crtc_state =
2784 		intel_atomic_get_old_crtc_state(state, crtc);
2785 	struct intel_crtc_state *new_crtc_state =
2786 		intel_atomic_get_new_crtc_state(state, crtc);
2787 	struct intel_plane *plane;
2788 
2789 	for_each_intel_plane_on_crtc(display->drm, crtc, plane) {
2790 		struct intel_plane_state *plane_state;
2791 		enum plane_id plane_id = plane->id;
2792 
2793 		/*
2794 		 * Force a full wm update for every plane on modeset.
2795 		 * Required because the reset value of the wm registers
2796 		 * is non-zero, whereas we want all disabled planes to
2797 		 * have zero watermarks. So if we turn off the relevant
2798 		 * power well the hardware state will go out of sync
2799 		 * with the software state.
2800 		 */
2801 		if (!intel_crtc_needs_modeset(new_crtc_state) &&
2802 		    skl_plane_selected_wm_equals(plane,
2803 						 &old_crtc_state->wm.skl.optimal,
2804 						 &new_crtc_state->wm.skl.optimal))
2805 			continue;
2806 
2807 		if (new_crtc_state->do_async_flip) {
2808 			drm_dbg_kms(display->drm, "[PLANE:%d:%s] Can't change watermarks during async flip\n",
2809 				    plane->base.base.id, plane->base.name);
2810 			return -EINVAL;
2811 		}
2812 
2813 		plane_state = intel_atomic_get_plane_state(state, plane);
2814 		if (IS_ERR(plane_state))
2815 			return PTR_ERR(plane_state);
2816 
2817 		new_crtc_state->update_planes |= BIT(plane_id);
2818 		new_crtc_state->async_flip_planes = 0;
2819 		new_crtc_state->do_async_flip = false;
2820 	}
2821 
2822 	return 0;
2823 }
2824 
2825 static int pkgc_max_linetime(struct intel_atomic_state *state)
2826 {
2827 	struct intel_display *display = to_intel_display(state);
2828 	const struct intel_crtc_state *crtc_state;
2829 	struct intel_crtc *crtc;
2830 	int i, max_linetime;
2831 
2832 	/*
2833 	 * Apparenty the hardware uses WM_LINETIME internally for
2834 	 * this stuff, compute everything based on that.
2835 	 */
2836 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
2837 		display->pkgc.disable[crtc->pipe] = crtc_state->vrr.enable;
2838 		display->pkgc.linetime[crtc->pipe] = DIV_ROUND_UP(crtc_state->linetime, 8);
2839 	}
2840 
2841 	max_linetime = 0;
2842 	for_each_intel_crtc(display->drm, crtc) {
2843 		if (display->pkgc.disable[crtc->pipe])
2844 			return 0;
2845 
2846 		max_linetime = max(display->pkgc.linetime[crtc->pipe], max_linetime);
2847 	}
2848 
2849 	return max_linetime;
2850 }
2851 
2852 void
2853 intel_program_dpkgc_latency(struct intel_atomic_state *state)
2854 {
2855 	struct intel_display *display = to_intel_display(state);
2856 	int max_linetime, latency, added_wake_time = 0;
2857 
2858 	if (DISPLAY_VER(display) < 20)
2859 		return;
2860 
2861 	mutex_lock(&display->wm.wm_mutex);
2862 
2863 	latency = skl_watermark_max_latency(display, 1);
2864 
2865 	/* FIXME runtime changes to enable_flipq are racy */
2866 	if (display->params.enable_flipq)
2867 		added_wake_time = intel_flipq_exec_time_us(display);
2868 
2869 	/*
2870 	 * Wa_22020432604
2871 	 * "PKG_C_LATENCY Added Wake Time field is not working"
2872 	 */
2873 	if (latency && IS_DISPLAY_VER(display, 20, 30)) {
2874 		latency += added_wake_time;
2875 		added_wake_time = 0;
2876 	}
2877 
2878 	max_linetime = pkgc_max_linetime(state);
2879 
2880 	if (max_linetime == 0 || latency == 0) {
2881 		latency = REG_FIELD_GET(LNL_PKG_C_LATENCY_MASK,
2882 					LNL_PKG_C_LATENCY_MASK);
2883 		added_wake_time = 0;
2884 	} else {
2885 		/*
2886 		 * Wa_22020299601
2887 		 * "Increase the latency programmed in PKG_C_LATENCY Pkg C Latency to be a
2888 		 *  multiple of the pipeline time from WM_LINETIME"
2889 		 */
2890 		latency = roundup(latency, max_linetime);
2891 	}
2892 
2893 	intel_de_write(display, LNL_PKG_C_LATENCY,
2894 		       REG_FIELD_PREP(LNL_ADDED_WAKE_TIME_MASK, added_wake_time) |
2895 		       REG_FIELD_PREP(LNL_PKG_C_LATENCY_MASK, latency));
2896 
2897 	mutex_unlock(&display->wm.wm_mutex);
2898 }
2899 
2900 static int
2901 skl_compute_wm(struct intel_atomic_state *state)
2902 {
2903 	struct intel_display *display = to_intel_display(state);
2904 	struct intel_crtc *crtc;
2905 	struct intel_crtc_state __maybe_unused *new_crtc_state;
2906 	int ret, i;
2907 
2908 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
2909 		ret = skl_build_pipe_wm(state, crtc);
2910 		if (ret)
2911 			return ret;
2912 	}
2913 
2914 	ret = skl_compute_ddb(state);
2915 	if (ret)
2916 		return ret;
2917 
2918 	/*
2919 	 * skl_compute_ddb() will have adjusted the final watermarks
2920 	 * based on how much ddb is available. Now we can actually
2921 	 * check if the final watermarks changed.
2922 	 */
2923 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
2924 		struct skl_pipe_wm *pipe_wm = &new_crtc_state->wm.skl.optimal;
2925 
2926 		/*
2927 		 * We store use_sagv_wm in the crtc state rather than relying on
2928 		 * that bw state since we have no convenient way to get at the
2929 		 * latter from the plane commit hooks (especially in the legacy
2930 		 * cursor case).
2931 		 *
2932 		 * drm_atomic_check_only() gets upset if we pull more crtcs
2933 		 * into the state, so we have to calculate this based on the
2934 		 * individual intel_crtc_can_enable_sagv() rather than
2935 		 * the overall intel_bw_can_enable_sagv(). Otherwise the
2936 		 * crtcs not included in the commit would not switch to the
2937 		 * SAGV watermarks when we are about to enable SAGV, and that
2938 		 * would lead to underruns. This does mean extra power draw
2939 		 * when only a subset of the crtcs are blocking SAGV as the
2940 		 * other crtcs can't be allowed to use the more optimal
2941 		 * normal (ie. non-SAGV) watermarks.
2942 		 */
2943 		pipe_wm->use_sagv_wm = !HAS_HW_SAGV_WM(display) &&
2944 			DISPLAY_VER(display) >= 12 &&
2945 			intel_crtc_can_enable_sagv(new_crtc_state);
2946 
2947 		ret = skl_wm_add_affected_planes(state, crtc);
2948 		if (ret)
2949 			return ret;
2950 	}
2951 
2952 	skl_print_wm_changes(state);
2953 
2954 	return 0;
2955 }
2956 
2957 static void skl_wm_level_from_reg_val(struct intel_display *display,
2958 				      u32 val, struct skl_wm_level *level)
2959 {
2960 	level->enable = val & PLANE_WM_EN;
2961 	level->ignore_lines = val & PLANE_WM_IGNORE_LINES;
2962 	level->blocks = REG_FIELD_GET(PLANE_WM_BLOCKS_MASK, val);
2963 	level->lines = REG_FIELD_GET(PLANE_WM_LINES_MASK, val);
2964 	level->auto_min_alloc_wm_enable = DISPLAY_VER(display) >= 30 ?
2965 					   val & PLANE_WM_AUTO_MIN_ALLOC_EN : 0;
2966 }
2967 
2968 static void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
2969 				     struct skl_pipe_wm *out)
2970 {
2971 	struct intel_display *display = to_intel_display(crtc);
2972 	enum pipe pipe = crtc->pipe;
2973 	enum plane_id plane_id;
2974 	int level;
2975 	u32 val;
2976 
2977 	for_each_plane_id_on_crtc(crtc, plane_id) {
2978 		struct skl_plane_wm *wm = &out->planes[plane_id];
2979 
2980 		for (level = 0; level < display->wm.num_levels; level++) {
2981 			if (plane_id != PLANE_CURSOR)
2982 				val = intel_de_read(display, PLANE_WM(pipe, plane_id, level));
2983 			else
2984 				val = intel_de_read(display, CUR_WM(pipe, level));
2985 
2986 			skl_wm_level_from_reg_val(display, val, &wm->wm[level]);
2987 		}
2988 
2989 		if (plane_id != PLANE_CURSOR)
2990 			val = intel_de_read(display, PLANE_WM_TRANS(pipe, plane_id));
2991 		else
2992 			val = intel_de_read(display, CUR_WM_TRANS(pipe));
2993 
2994 		skl_wm_level_from_reg_val(display, val, &wm->trans_wm);
2995 
2996 		if (HAS_HW_SAGV_WM(display)) {
2997 			if (plane_id != PLANE_CURSOR)
2998 				val = intel_de_read(display, PLANE_WM_SAGV(pipe, plane_id));
2999 			else
3000 				val = intel_de_read(display, CUR_WM_SAGV(pipe));
3001 
3002 			skl_wm_level_from_reg_val(display, val, &wm->sagv.wm0);
3003 
3004 			if (plane_id != PLANE_CURSOR)
3005 				val = intel_de_read(display, PLANE_WM_SAGV_TRANS(pipe, plane_id));
3006 			else
3007 				val = intel_de_read(display, CUR_WM_SAGV_TRANS(pipe));
3008 
3009 			skl_wm_level_from_reg_val(display, val, &wm->sagv.trans_wm);
3010 		} else if (DISPLAY_VER(display) >= 12) {
3011 			wm->sagv.wm0 = wm->wm[0];
3012 			wm->sagv.trans_wm = wm->trans_wm;
3013 		}
3014 	}
3015 }
3016 
3017 static void skl_wm_get_hw_state(struct intel_display *display)
3018 {
3019 	struct intel_dbuf_state *dbuf_state =
3020 		to_intel_dbuf_state(display->dbuf.obj.state);
3021 	struct intel_crtc *crtc;
3022 
3023 	if (HAS_MBUS_JOINING(display))
3024 		dbuf_state->joined_mbus = intel_de_read(display, MBUS_CTL) & MBUS_JOIN;
3025 
3026 	dbuf_state->mdclk_cdclk_ratio = intel_mdclk_cdclk_ratio(display, &display->cdclk.hw);
3027 	dbuf_state->active_pipes = 0;
3028 
3029 	for_each_intel_crtc(display->drm, crtc) {
3030 		struct intel_crtc_state *crtc_state =
3031 			to_intel_crtc_state(crtc->base.state);
3032 		enum pipe pipe = crtc->pipe;
3033 		unsigned int mbus_offset;
3034 		enum plane_id plane_id;
3035 		u8 slices;
3036 
3037 		memset(&crtc_state->wm.skl.optimal, 0,
3038 		       sizeof(crtc_state->wm.skl.optimal));
3039 		if (crtc_state->hw.active) {
3040 			skl_pipe_wm_get_hw_state(crtc, &crtc_state->wm.skl.optimal);
3041 			dbuf_state->active_pipes |= BIT(pipe);
3042 		}
3043 		crtc_state->wm.skl.raw = crtc_state->wm.skl.optimal;
3044 
3045 		memset(&dbuf_state->ddb[pipe], 0, sizeof(dbuf_state->ddb[pipe]));
3046 
3047 		for_each_plane_id_on_crtc(crtc, plane_id) {
3048 			struct skl_ddb_entry *ddb =
3049 				&crtc_state->wm.skl.plane_ddb[plane_id];
3050 			struct skl_ddb_entry *ddb_y =
3051 				&crtc_state->wm.skl.plane_ddb_y[plane_id];
3052 			u16 *min_ddb =
3053 				&crtc_state->wm.skl.plane_min_ddb[plane_id];
3054 			u16 *interim_ddb =
3055 				&crtc_state->wm.skl.plane_interim_ddb[plane_id];
3056 
3057 			if (!crtc_state->hw.active)
3058 				continue;
3059 
3060 			skl_ddb_get_hw_plane_state(display, crtc->pipe,
3061 						   plane_id, ddb, ddb_y,
3062 						   min_ddb, interim_ddb);
3063 
3064 			skl_ddb_entry_union(&dbuf_state->ddb[pipe], ddb);
3065 			skl_ddb_entry_union(&dbuf_state->ddb[pipe], ddb_y);
3066 		}
3067 
3068 		dbuf_state->weight[pipe] = intel_crtc_ddb_weight(crtc_state);
3069 
3070 		/*
3071 		 * Used for checking overlaps, so we need absolute
3072 		 * offsets instead of MBUS relative offsets.
3073 		 */
3074 		slices = skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes,
3075 						 dbuf_state->joined_mbus);
3076 		mbus_offset = mbus_ddb_offset(display, slices);
3077 		crtc_state->wm.skl.ddb.start = mbus_offset + dbuf_state->ddb[pipe].start;
3078 		crtc_state->wm.skl.ddb.end = mbus_offset + dbuf_state->ddb[pipe].end;
3079 
3080 		/* The slices actually used by the planes on the pipe */
3081 		dbuf_state->slices[pipe] =
3082 			skl_ddb_dbuf_slice_mask(display, &crtc_state->wm.skl.ddb);
3083 
3084 		drm_dbg_kms(display->drm,
3085 			    "[CRTC:%d:%s] dbuf slices 0x%x, ddb (%d - %d), active pipes 0x%x, mbus joined: %s\n",
3086 			    crtc->base.base.id, crtc->base.name,
3087 			    dbuf_state->slices[pipe], dbuf_state->ddb[pipe].start,
3088 			    dbuf_state->ddb[pipe].end, dbuf_state->active_pipes,
3089 			    str_yes_no(dbuf_state->joined_mbus));
3090 	}
3091 
3092 	dbuf_state->enabled_slices = display->dbuf.enabled_slices;
3093 }
3094 
3095 bool skl_watermark_ipc_enabled(struct intel_display *display)
3096 {
3097 	return display->wm.ipc_enabled;
3098 }
3099 
3100 void skl_watermark_ipc_update(struct intel_display *display)
3101 {
3102 	if (!HAS_IPC(display))
3103 		return;
3104 
3105 	intel_de_rmw(display, DISP_ARB_CTL2, DISP_IPC_ENABLE,
3106 		     skl_watermark_ipc_enabled(display) ? DISP_IPC_ENABLE : 0);
3107 }
3108 
3109 static bool skl_watermark_ipc_can_enable(struct intel_display *display)
3110 {
3111 	/* Display WA #0477 WaDisableIPC: skl */
3112 	if (display->platform.skylake)
3113 		return false;
3114 
3115 	/* Display WA #1141: SKL:all KBL:all CFL */
3116 	if (display->platform.kabylake ||
3117 	    display->platform.coffeelake ||
3118 	    display->platform.cometlake) {
3119 		const struct dram_info *dram_info = intel_dram_info(display->drm);
3120 
3121 		return dram_info->symmetric_memory;
3122 	}
3123 
3124 	return true;
3125 }
3126 
3127 void skl_watermark_ipc_init(struct intel_display *display)
3128 {
3129 	if (!HAS_IPC(display))
3130 		return;
3131 
3132 	display->wm.ipc_enabled = skl_watermark_ipc_can_enable(display);
3133 
3134 	skl_watermark_ipc_update(display);
3135 }
3136 
3137 static void multiply_wm_latency(struct intel_display *display, int mult)
3138 {
3139 	u16 *wm = display->wm.skl_latency;
3140 	int level, num_levels = display->wm.num_levels;
3141 
3142 	for (level = 0; level < num_levels; level++)
3143 		wm[level] *= mult;
3144 }
3145 
3146 static void increase_wm_latency(struct intel_display *display, int inc)
3147 {
3148 	u16 *wm = display->wm.skl_latency;
3149 	int level, num_levels = display->wm.num_levels;
3150 
3151 	wm[0] += inc;
3152 
3153 	for (level = 1; level < num_levels; level++) {
3154 		if (wm[level] == 0)
3155 			break;
3156 
3157 		wm[level] += inc;
3158 	}
3159 }
3160 
3161 static bool need_16gb_dimm_wa(struct intel_display *display)
3162 {
3163 	const struct dram_info *dram_info = intel_dram_info(display->drm);
3164 
3165 	return (display->platform.skylake || display->platform.kabylake ||
3166 		display->platform.coffeelake || display->platform.cometlake ||
3167 		DISPLAY_VER(display) == 11) && dram_info->has_16gb_dimms;
3168 }
3169 
3170 static int wm_read_latency(struct intel_display *display)
3171 {
3172 	if (DISPLAY_VER(display) >= 14)
3173 		return 6;
3174 	else if (DISPLAY_VER(display) >= 12)
3175 		return 3;
3176 	else
3177 		return 2;
3178 }
3179 
3180 static void sanitize_wm_latency(struct intel_display *display)
3181 {
3182 	u16 *wm = display->wm.skl_latency;
3183 	int level, num_levels = display->wm.num_levels;
3184 
3185 	/*
3186 	 * If a level n (n > 1) has a 0us latency, all levels m (m >= n)
3187 	 * need to be disabled. We make sure to sanitize the values out
3188 	 * of the punit to satisfy this requirement.
3189 	 */
3190 	for (level = 1; level < num_levels; level++) {
3191 		if (wm[level] == 0)
3192 			break;
3193 	}
3194 
3195 	for (level = level + 1; level < num_levels; level++)
3196 		wm[level] = 0;
3197 }
3198 
3199 static void make_wm_latency_monotonic(struct intel_display *display)
3200 {
3201 	u16 *wm = display->wm.skl_latency;
3202 	int level, num_levels = display->wm.num_levels;
3203 
3204 	for (level = 1; level < num_levels; level++) {
3205 		if (wm[level] == 0)
3206 			break;
3207 
3208 		wm[level] = max(wm[level], wm[level-1]);
3209 	}
3210 }
3211 
3212 static void
3213 adjust_wm_latency(struct intel_display *display)
3214 {
3215 	u16 *wm = display->wm.skl_latency;
3216 
3217 	if (display->platform.dg2)
3218 		multiply_wm_latency(display, 2);
3219 
3220 	sanitize_wm_latency(display);
3221 
3222 	make_wm_latency_monotonic(display);
3223 
3224 	/*
3225 	 * WaWmMemoryReadLatency
3226 	 *
3227 	 * punit doesn't take into account the read latency so we need
3228 	 * to add proper adjustment to each valid level we retrieve
3229 	 * from the punit when level 0 response data is 0us.
3230 	 */
3231 	if (wm[0] == 0)
3232 		increase_wm_latency(display, wm_read_latency(display));
3233 
3234 	/*
3235 	 * WA Level-0 adjustment for 16Gb+ DIMMs: SKL+
3236 	 * If we could not get dimm info enable this WA to prevent from
3237 	 * any underrun. If not able to get DIMM info assume 16Gb+ DIMM
3238 	 * to avoid any underrun.
3239 	 */
3240 	if (need_16gb_dimm_wa(display))
3241 		increase_wm_latency(display, 1);
3242 }
3243 
3244 static void mtl_read_wm_latency(struct intel_display *display)
3245 {
3246 	u16 *wm = display->wm.skl_latency;
3247 	u32 val;
3248 
3249 	val = intel_de_read(display, MTL_LATENCY_LP0_LP1);
3250 	wm[0] = REG_FIELD_GET(MTL_LATENCY_LEVEL_EVEN_MASK, val);
3251 	wm[1] = REG_FIELD_GET(MTL_LATENCY_LEVEL_ODD_MASK, val);
3252 
3253 	val = intel_de_read(display, MTL_LATENCY_LP2_LP3);
3254 	wm[2] = REG_FIELD_GET(MTL_LATENCY_LEVEL_EVEN_MASK, val);
3255 	wm[3] = REG_FIELD_GET(MTL_LATENCY_LEVEL_ODD_MASK, val);
3256 
3257 	val = intel_de_read(display, MTL_LATENCY_LP4_LP5);
3258 	wm[4] = REG_FIELD_GET(MTL_LATENCY_LEVEL_EVEN_MASK, val);
3259 	wm[5] = REG_FIELD_GET(MTL_LATENCY_LEVEL_ODD_MASK, val);
3260 }
3261 
3262 static void skl_read_wm_latency(struct intel_display *display)
3263 {
3264 	u16 *wm = display->wm.skl_latency;
3265 	u32 val;
3266 	int ret;
3267 
3268 	/* read the first set of memory latencies[0:3] */
3269 	val = 0; /* data0 to be programmed to 0 for first set */
3270 	ret = intel_pcode_read(display->drm, GEN9_PCODE_READ_MEM_LATENCY, &val, NULL);
3271 	if (ret) {
3272 		drm_err(display->drm, "SKL Mailbox read error = %d\n", ret);
3273 		return;
3274 	}
3275 
3276 	wm[0] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_0_4_MASK, val);
3277 	wm[1] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_1_5_MASK, val);
3278 	wm[2] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_2_6_MASK, val);
3279 	wm[3] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_3_7_MASK, val);
3280 
3281 	/* read the second set of memory latencies[4:7] */
3282 	val = 1; /* data0 to be programmed to 1 for second set */
3283 	ret = intel_pcode_read(display->drm, GEN9_PCODE_READ_MEM_LATENCY, &val, NULL);
3284 	if (ret) {
3285 		drm_err(display->drm, "SKL Mailbox read error = %d\n", ret);
3286 		return;
3287 	}
3288 
3289 	wm[4] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_0_4_MASK, val);
3290 	wm[5] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_1_5_MASK, val);
3291 	wm[6] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_2_6_MASK, val);
3292 	wm[7] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_3_7_MASK, val);
3293 }
3294 
3295 static void skl_setup_wm_latency(struct intel_display *display)
3296 {
3297 	if (HAS_HW_SAGV_WM(display))
3298 		display->wm.num_levels = 6;
3299 	else
3300 		display->wm.num_levels = 8;
3301 
3302 	if (DISPLAY_VER(display) >= 14)
3303 		mtl_read_wm_latency(display);
3304 	else
3305 		skl_read_wm_latency(display);
3306 
3307 	intel_print_wm_latency(display, "original", display->wm.skl_latency);
3308 
3309 	adjust_wm_latency(display);
3310 
3311 	intel_print_wm_latency(display, "adjusted", display->wm.skl_latency);
3312 }
3313 
3314 static struct intel_global_state *intel_dbuf_duplicate_state(struct intel_global_obj *obj)
3315 {
3316 	struct intel_dbuf_state *dbuf_state;
3317 
3318 	dbuf_state = kmemdup(obj->state, sizeof(*dbuf_state), GFP_KERNEL);
3319 	if (!dbuf_state)
3320 		return NULL;
3321 
3322 	return &dbuf_state->base;
3323 }
3324 
3325 static void intel_dbuf_destroy_state(struct intel_global_obj *obj,
3326 				     struct intel_global_state *state)
3327 {
3328 	kfree(state);
3329 }
3330 
3331 static const struct intel_global_state_funcs intel_dbuf_funcs = {
3332 	.atomic_duplicate_state = intel_dbuf_duplicate_state,
3333 	.atomic_destroy_state = intel_dbuf_destroy_state,
3334 };
3335 
3336 struct intel_dbuf_state *
3337 intel_atomic_get_dbuf_state(struct intel_atomic_state *state)
3338 {
3339 	struct intel_display *display = to_intel_display(state);
3340 	struct intel_global_state *dbuf_state;
3341 
3342 	dbuf_state = intel_atomic_get_global_obj_state(state, &display->dbuf.obj);
3343 	if (IS_ERR(dbuf_state))
3344 		return ERR_CAST(dbuf_state);
3345 
3346 	return to_intel_dbuf_state(dbuf_state);
3347 }
3348 
3349 int intel_dbuf_init(struct intel_display *display)
3350 {
3351 	struct intel_dbuf_state *dbuf_state;
3352 
3353 	dbuf_state = kzalloc(sizeof(*dbuf_state), GFP_KERNEL);
3354 	if (!dbuf_state)
3355 		return -ENOMEM;
3356 
3357 	intel_atomic_global_obj_init(display, &display->dbuf.obj,
3358 				     &dbuf_state->base, &intel_dbuf_funcs);
3359 
3360 	return 0;
3361 }
3362 
3363 static bool xelpdp_is_only_pipe_per_dbuf_bank(enum pipe pipe, u8 active_pipes)
3364 {
3365 	switch (pipe) {
3366 	case PIPE_A:
3367 	case PIPE_D:
3368 		active_pipes &= BIT(PIPE_A) | BIT(PIPE_D);
3369 		break;
3370 	case PIPE_B:
3371 	case PIPE_C:
3372 		active_pipes &= BIT(PIPE_B) | BIT(PIPE_C);
3373 		break;
3374 	default: /* to suppress compiler warning */
3375 		MISSING_CASE(pipe);
3376 		return false;
3377 	}
3378 
3379 	return is_power_of_2(active_pipes);
3380 }
3381 
3382 static u32 pipe_mbus_dbox_ctl(const struct intel_crtc *crtc,
3383 			      const struct intel_dbuf_state *dbuf_state)
3384 {
3385 	struct intel_display *display = to_intel_display(crtc);
3386 	u32 val = 0;
3387 
3388 	if (DISPLAY_VER(display) >= 14)
3389 		val |= MBUS_DBOX_I_CREDIT(2);
3390 
3391 	if (DISPLAY_VER(display) >= 12) {
3392 		val |= MBUS_DBOX_B2B_TRANSACTIONS_MAX(16);
3393 		val |= MBUS_DBOX_B2B_TRANSACTIONS_DELAY(1);
3394 		val |= MBUS_DBOX_REGULATE_B2B_TRANSACTIONS_EN;
3395 	}
3396 
3397 	if (DISPLAY_VER(display) >= 14)
3398 		val |= dbuf_state->joined_mbus ?
3399 			MBUS_DBOX_A_CREDIT(12) : MBUS_DBOX_A_CREDIT(8);
3400 	else if (display->platform.alderlake_p)
3401 		/* Wa_22010947358:adl-p */
3402 		val |= dbuf_state->joined_mbus ?
3403 			MBUS_DBOX_A_CREDIT(6) : MBUS_DBOX_A_CREDIT(4);
3404 	else
3405 		val |= MBUS_DBOX_A_CREDIT(2);
3406 
3407 	if (DISPLAY_VER(display) >= 14) {
3408 		val |= MBUS_DBOX_B_CREDIT(0xA);
3409 	} else if (display->platform.alderlake_p) {
3410 		val |= MBUS_DBOX_BW_CREDIT(2);
3411 		val |= MBUS_DBOX_B_CREDIT(8);
3412 	} else if (DISPLAY_VER(display) >= 12) {
3413 		val |= MBUS_DBOX_BW_CREDIT(2);
3414 		val |= MBUS_DBOX_B_CREDIT(12);
3415 	} else {
3416 		val |= MBUS_DBOX_BW_CREDIT(1);
3417 		val |= MBUS_DBOX_B_CREDIT(8);
3418 	}
3419 
3420 	if (DISPLAY_VERx100(display) == 1400) {
3421 		if (xelpdp_is_only_pipe_per_dbuf_bank(crtc->pipe, dbuf_state->active_pipes))
3422 			val |= MBUS_DBOX_BW_8CREDITS_MTL;
3423 		else
3424 			val |= MBUS_DBOX_BW_4CREDITS_MTL;
3425 	}
3426 
3427 	return val;
3428 }
3429 
3430 static void pipe_mbus_dbox_ctl_update(struct intel_display *display,
3431 				      const struct intel_dbuf_state *dbuf_state)
3432 {
3433 	struct intel_crtc *crtc;
3434 
3435 	for_each_intel_crtc_in_pipe_mask(display->drm, crtc, dbuf_state->active_pipes)
3436 		intel_de_write(display, PIPE_MBUS_DBOX_CTL(crtc->pipe),
3437 			       pipe_mbus_dbox_ctl(crtc, dbuf_state));
3438 }
3439 
3440 static void intel_mbus_dbox_update(struct intel_atomic_state *state)
3441 {
3442 	struct intel_display *display = to_intel_display(state);
3443 	const struct intel_dbuf_state *new_dbuf_state, *old_dbuf_state;
3444 
3445 	if (DISPLAY_VER(display) < 11)
3446 		return;
3447 
3448 	new_dbuf_state = intel_atomic_get_new_dbuf_state(state);
3449 	old_dbuf_state = intel_atomic_get_old_dbuf_state(state);
3450 	if (!new_dbuf_state ||
3451 	    (new_dbuf_state->joined_mbus == old_dbuf_state->joined_mbus &&
3452 	     new_dbuf_state->active_pipes == old_dbuf_state->active_pipes))
3453 		return;
3454 
3455 	pipe_mbus_dbox_ctl_update(display, new_dbuf_state);
3456 }
3457 
3458 int intel_dbuf_state_set_mdclk_cdclk_ratio(struct intel_atomic_state *state,
3459 					   int ratio)
3460 {
3461 	struct intel_dbuf_state *dbuf_state;
3462 
3463 	dbuf_state = intel_atomic_get_dbuf_state(state);
3464 	if (IS_ERR(dbuf_state))
3465 		return PTR_ERR(dbuf_state);
3466 
3467 	dbuf_state->mdclk_cdclk_ratio = ratio;
3468 
3469 	return intel_atomic_lock_global_state(&dbuf_state->base);
3470 }
3471 
3472 void intel_dbuf_mdclk_cdclk_ratio_update(struct intel_display *display,
3473 					 int ratio, bool joined_mbus)
3474 {
3475 	enum dbuf_slice slice;
3476 
3477 	if (!HAS_MBUS_JOINING(display))
3478 		return;
3479 
3480 	if (DISPLAY_VER(display) >= 20)
3481 		intel_de_rmw(display, MBUS_CTL, MBUS_TRANSLATION_THROTTLE_MIN_MASK,
3482 			     MBUS_TRANSLATION_THROTTLE_MIN(ratio - 1));
3483 
3484 	if (joined_mbus)
3485 		ratio *= 2;
3486 
3487 	drm_dbg_kms(display->drm, "Updating dbuf ratio to %d (mbus joined: %s)\n",
3488 		    ratio, str_yes_no(joined_mbus));
3489 
3490 	for_each_dbuf_slice(display, slice)
3491 		intel_de_rmw(display, DBUF_CTL_S(slice),
3492 			     DBUF_MIN_TRACKER_STATE_SERVICE_MASK,
3493 			     DBUF_MIN_TRACKER_STATE_SERVICE(ratio - 1));
3494 }
3495 
3496 static void intel_dbuf_mdclk_min_tracker_update(struct intel_atomic_state *state)
3497 {
3498 	struct intel_display *display = to_intel_display(state);
3499 	const struct intel_dbuf_state *old_dbuf_state =
3500 		intel_atomic_get_old_dbuf_state(state);
3501 	const struct intel_dbuf_state *new_dbuf_state =
3502 		intel_atomic_get_new_dbuf_state(state);
3503 	int mdclk_cdclk_ratio;
3504 
3505 	if (intel_cdclk_is_decreasing_later(state)) {
3506 		/* cdclk/mdclk will be changed later by intel_set_cdclk_post_plane_update() */
3507 		mdclk_cdclk_ratio = old_dbuf_state->mdclk_cdclk_ratio;
3508 	} else {
3509 		/* cdclk/mdclk already changed by intel_set_cdclk_pre_plane_update() */
3510 		mdclk_cdclk_ratio = new_dbuf_state->mdclk_cdclk_ratio;
3511 	}
3512 
3513 	intel_dbuf_mdclk_cdclk_ratio_update(display, mdclk_cdclk_ratio,
3514 					    new_dbuf_state->joined_mbus);
3515 }
3516 
3517 static enum pipe intel_mbus_joined_pipe(struct intel_atomic_state *state,
3518 					const struct intel_dbuf_state *dbuf_state)
3519 {
3520 	struct intel_display *display = to_intel_display(state);
3521 	enum pipe pipe = ffs(dbuf_state->active_pipes) - 1;
3522 	const struct intel_crtc_state *new_crtc_state;
3523 	struct intel_crtc *crtc;
3524 
3525 	drm_WARN_ON(display->drm, !dbuf_state->joined_mbus);
3526 	drm_WARN_ON(display->drm, !is_power_of_2(dbuf_state->active_pipes));
3527 
3528 	crtc = intel_crtc_for_pipe(display, pipe);
3529 	new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
3530 
3531 	if (new_crtc_state && !intel_crtc_needs_modeset(new_crtc_state))
3532 		return pipe;
3533 	else
3534 		return INVALID_PIPE;
3535 }
3536 
3537 static void mbus_ctl_join_update(struct intel_display *display,
3538 				 const struct intel_dbuf_state *dbuf_state,
3539 				 enum pipe pipe)
3540 {
3541 	u32 mbus_ctl;
3542 
3543 	if (dbuf_state->joined_mbus)
3544 		mbus_ctl = MBUS_HASHING_MODE_1x4 | MBUS_JOIN;
3545 	else
3546 		mbus_ctl = MBUS_HASHING_MODE_2x2;
3547 
3548 	if (pipe != INVALID_PIPE)
3549 		mbus_ctl |= MBUS_JOIN_PIPE_SELECT(pipe);
3550 	else
3551 		mbus_ctl |= MBUS_JOIN_PIPE_SELECT_NONE;
3552 
3553 	intel_de_rmw(display, MBUS_CTL,
3554 		     MBUS_HASHING_MODE_MASK | MBUS_JOIN |
3555 		     MBUS_JOIN_PIPE_SELECT_MASK, mbus_ctl);
3556 }
3557 
3558 static void intel_dbuf_mbus_join_update(struct intel_atomic_state *state,
3559 					enum pipe pipe)
3560 {
3561 	struct intel_display *display = to_intel_display(state);
3562 	const struct intel_dbuf_state *old_dbuf_state =
3563 		intel_atomic_get_old_dbuf_state(state);
3564 	const struct intel_dbuf_state *new_dbuf_state =
3565 		intel_atomic_get_new_dbuf_state(state);
3566 
3567 	drm_dbg_kms(display->drm, "Changing mbus joined: %s -> %s (pipe: %c)\n",
3568 		    str_yes_no(old_dbuf_state->joined_mbus),
3569 		    str_yes_no(new_dbuf_state->joined_mbus),
3570 		    pipe != INVALID_PIPE ? pipe_name(pipe) : '*');
3571 
3572 	mbus_ctl_join_update(display, new_dbuf_state, pipe);
3573 }
3574 
3575 void intel_dbuf_mbus_pre_ddb_update(struct intel_atomic_state *state)
3576 {
3577 	const struct intel_dbuf_state *new_dbuf_state =
3578 		intel_atomic_get_new_dbuf_state(state);
3579 	const struct intel_dbuf_state *old_dbuf_state =
3580 		intel_atomic_get_old_dbuf_state(state);
3581 
3582 	if (!new_dbuf_state)
3583 		return;
3584 
3585 	if (!old_dbuf_state->joined_mbus && new_dbuf_state->joined_mbus) {
3586 		enum pipe pipe = intel_mbus_joined_pipe(state, new_dbuf_state);
3587 
3588 		WARN_ON(!new_dbuf_state->base.changed);
3589 
3590 		intel_dbuf_mbus_join_update(state, pipe);
3591 		intel_mbus_dbox_update(state);
3592 		intel_dbuf_mdclk_min_tracker_update(state);
3593 	}
3594 }
3595 
3596 void intel_dbuf_mbus_post_ddb_update(struct intel_atomic_state *state)
3597 {
3598 	struct intel_display *display = to_intel_display(state);
3599 	const struct intel_dbuf_state *new_dbuf_state =
3600 		intel_atomic_get_new_dbuf_state(state);
3601 	const struct intel_dbuf_state *old_dbuf_state =
3602 		intel_atomic_get_old_dbuf_state(state);
3603 
3604 	if (!new_dbuf_state)
3605 		return;
3606 
3607 	if (old_dbuf_state->joined_mbus && !new_dbuf_state->joined_mbus) {
3608 		enum pipe pipe = intel_mbus_joined_pipe(state, old_dbuf_state);
3609 
3610 		WARN_ON(!new_dbuf_state->base.changed);
3611 
3612 		intel_dbuf_mdclk_min_tracker_update(state);
3613 		intel_mbus_dbox_update(state);
3614 		intel_dbuf_mbus_join_update(state, pipe);
3615 
3616 		if (pipe != INVALID_PIPE) {
3617 			struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe);
3618 
3619 			intel_crtc_wait_for_next_vblank(crtc);
3620 		}
3621 	} else if (old_dbuf_state->joined_mbus == new_dbuf_state->joined_mbus &&
3622 		   old_dbuf_state->active_pipes != new_dbuf_state->active_pipes) {
3623 		WARN_ON(!new_dbuf_state->base.changed);
3624 
3625 		intel_dbuf_mdclk_min_tracker_update(state);
3626 		intel_mbus_dbox_update(state);
3627 	}
3628 
3629 }
3630 
3631 void intel_dbuf_pre_plane_update(struct intel_atomic_state *state)
3632 {
3633 	struct intel_display *display = to_intel_display(state);
3634 	const struct intel_dbuf_state *new_dbuf_state =
3635 		intel_atomic_get_new_dbuf_state(state);
3636 	const struct intel_dbuf_state *old_dbuf_state =
3637 		intel_atomic_get_old_dbuf_state(state);
3638 	u8 old_slices, new_slices;
3639 
3640 	if (!new_dbuf_state)
3641 		return;
3642 
3643 	old_slices = old_dbuf_state->enabled_slices;
3644 	new_slices = old_dbuf_state->enabled_slices | new_dbuf_state->enabled_slices;
3645 
3646 	if (old_slices == new_slices)
3647 		return;
3648 
3649 	WARN_ON(!new_dbuf_state->base.changed);
3650 
3651 	gen9_dbuf_slices_update(display, new_slices);
3652 }
3653 
3654 void intel_dbuf_post_plane_update(struct intel_atomic_state *state)
3655 {
3656 	struct intel_display *display = to_intel_display(state);
3657 	const struct intel_dbuf_state *new_dbuf_state =
3658 		intel_atomic_get_new_dbuf_state(state);
3659 	const struct intel_dbuf_state *old_dbuf_state =
3660 		intel_atomic_get_old_dbuf_state(state);
3661 	u8 old_slices, new_slices;
3662 
3663 	if (!new_dbuf_state)
3664 		return;
3665 
3666 	old_slices = old_dbuf_state->enabled_slices | new_dbuf_state->enabled_slices;
3667 	new_slices = new_dbuf_state->enabled_slices;
3668 
3669 	if (old_slices == new_slices)
3670 		return;
3671 
3672 	WARN_ON(!new_dbuf_state->base.changed);
3673 
3674 	gen9_dbuf_slices_update(display, new_slices);
3675 }
3676 
3677 int intel_dbuf_num_enabled_slices(const struct intel_dbuf_state *dbuf_state)
3678 {
3679 	return hweight8(dbuf_state->enabled_slices);
3680 }
3681 
3682 int intel_dbuf_num_active_pipes(const struct intel_dbuf_state *dbuf_state)
3683 {
3684 	return hweight8(dbuf_state->active_pipes);
3685 }
3686 
3687 bool intel_dbuf_pmdemand_needs_update(struct intel_atomic_state *state)
3688 {
3689 	struct intel_display *display = to_intel_display(state);
3690 	const struct intel_dbuf_state *new_dbuf_state, *old_dbuf_state;
3691 
3692 	new_dbuf_state = intel_atomic_get_new_dbuf_state(state);
3693 	old_dbuf_state = intel_atomic_get_old_dbuf_state(state);
3694 
3695 	if (new_dbuf_state &&
3696 	    new_dbuf_state->active_pipes != old_dbuf_state->active_pipes)
3697 		return true;
3698 
3699 	if (DISPLAY_VER(display) < 30) {
3700 		if (new_dbuf_state &&
3701 		    new_dbuf_state->enabled_slices !=
3702 		    old_dbuf_state->enabled_slices)
3703 			return true;
3704 	}
3705 
3706 	return false;
3707 }
3708 
3709 static void skl_mbus_sanitize(struct intel_display *display)
3710 {
3711 	struct intel_dbuf_state *dbuf_state =
3712 		to_intel_dbuf_state(display->dbuf.obj.state);
3713 
3714 	if (!HAS_MBUS_JOINING(display))
3715 		return;
3716 
3717 	if (!dbuf_state->joined_mbus ||
3718 	    adlp_check_mbus_joined(dbuf_state->active_pipes))
3719 		return;
3720 
3721 	drm_dbg_kms(display->drm, "Disabling redundant MBUS joining (active pipes 0x%x)\n",
3722 		    dbuf_state->active_pipes);
3723 
3724 	dbuf_state->joined_mbus = false;
3725 	intel_dbuf_mdclk_cdclk_ratio_update(display,
3726 					    dbuf_state->mdclk_cdclk_ratio,
3727 					    dbuf_state->joined_mbus);
3728 	pipe_mbus_dbox_ctl_update(display, dbuf_state);
3729 	mbus_ctl_join_update(display, dbuf_state, INVALID_PIPE);
3730 }
3731 
3732 static bool skl_dbuf_is_misconfigured(struct intel_display *display)
3733 {
3734 	const struct intel_dbuf_state *dbuf_state =
3735 		to_intel_dbuf_state(display->dbuf.obj.state);
3736 	struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
3737 	struct intel_crtc *crtc;
3738 
3739 	for_each_intel_crtc(display->drm, crtc) {
3740 		const struct intel_crtc_state *crtc_state =
3741 			to_intel_crtc_state(crtc->base.state);
3742 
3743 		entries[crtc->pipe] = crtc_state->wm.skl.ddb;
3744 	}
3745 
3746 	for_each_intel_crtc(display->drm, crtc) {
3747 		const struct intel_crtc_state *crtc_state =
3748 			to_intel_crtc_state(crtc->base.state);
3749 		u8 slices;
3750 
3751 		slices = skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes,
3752 						 dbuf_state->joined_mbus);
3753 		if (dbuf_state->slices[crtc->pipe] & ~slices)
3754 			return true;
3755 
3756 		if (skl_ddb_allocation_overlaps(&crtc_state->wm.skl.ddb, entries,
3757 						I915_MAX_PIPES, crtc->pipe))
3758 			return true;
3759 	}
3760 
3761 	return false;
3762 }
3763 
3764 static void skl_dbuf_sanitize(struct intel_display *display)
3765 {
3766 	struct intel_crtc *crtc;
3767 
3768 	/*
3769 	 * On TGL/RKL (at least) the BIOS likes to assign the planes
3770 	 * to the wrong DBUF slices. This will cause an infinite loop
3771 	 * in skl_commit_modeset_enables() as it can't find a way to
3772 	 * transition between the old bogus DBUF layout to the new
3773 	 * proper DBUF layout without DBUF allocation overlaps between
3774 	 * the planes (which cannot be allowed or else the hardware
3775 	 * may hang). If we detect a bogus DBUF layout just turn off
3776 	 * all the planes so that skl_commit_modeset_enables() can
3777 	 * simply ignore them.
3778 	 */
3779 	if (!skl_dbuf_is_misconfigured(display))
3780 		return;
3781 
3782 	drm_dbg_kms(display->drm, "BIOS has misprogrammed the DBUF, disabling all planes\n");
3783 
3784 	for_each_intel_crtc(display->drm, crtc) {
3785 		struct intel_plane *plane = to_intel_plane(crtc->base.primary);
3786 		const struct intel_plane_state *plane_state =
3787 			to_intel_plane_state(plane->base.state);
3788 		struct intel_crtc_state *crtc_state =
3789 			to_intel_crtc_state(crtc->base.state);
3790 
3791 		if (plane_state->uapi.visible)
3792 			intel_plane_disable_noatomic(crtc, plane);
3793 
3794 		drm_WARN_ON(display->drm, crtc_state->active_planes != 0);
3795 
3796 		memset(&crtc_state->wm.skl.ddb, 0, sizeof(crtc_state->wm.skl.ddb));
3797 	}
3798 }
3799 
3800 static void skl_wm_sanitize(struct intel_display *display)
3801 {
3802 	skl_mbus_sanitize(display);
3803 	skl_dbuf_sanitize(display);
3804 }
3805 
3806 void skl_wm_crtc_disable_noatomic(struct intel_crtc *crtc)
3807 {
3808 	struct intel_display *display = to_intel_display(crtc);
3809 	struct intel_crtc_state *crtc_state =
3810 		to_intel_crtc_state(crtc->base.state);
3811 	struct intel_dbuf_state *dbuf_state =
3812 		to_intel_dbuf_state(display->dbuf.obj.state);
3813 	enum pipe pipe = crtc->pipe;
3814 
3815 	if (DISPLAY_VER(display) < 9)
3816 		return;
3817 
3818 	dbuf_state->active_pipes &= ~BIT(pipe);
3819 
3820 	dbuf_state->weight[pipe] = 0;
3821 	dbuf_state->slices[pipe] = 0;
3822 
3823 	memset(&dbuf_state->ddb[pipe], 0, sizeof(dbuf_state->ddb[pipe]));
3824 
3825 	memset(&crtc_state->wm.skl.ddb, 0, sizeof(crtc_state->wm.skl.ddb));
3826 }
3827 
3828 void skl_wm_plane_disable_noatomic(struct intel_crtc *crtc,
3829 				   struct intel_plane *plane)
3830 {
3831 	struct intel_display *display = to_intel_display(crtc);
3832 	struct intel_crtc_state *crtc_state =
3833 		to_intel_crtc_state(crtc->base.state);
3834 
3835 	if (DISPLAY_VER(display) < 9)
3836 		return;
3837 
3838 	skl_ddb_entry_init(&crtc_state->wm.skl.plane_ddb[plane->id], 0, 0);
3839 	skl_ddb_entry_init(&crtc_state->wm.skl.plane_ddb[plane->id], 0, 0);
3840 
3841 	crtc_state->wm.skl.plane_min_ddb[plane->id] = 0;
3842 	crtc_state->wm.skl.plane_interim_ddb[plane->id] = 0;
3843 
3844 	memset(&crtc_state->wm.skl.raw.planes[plane->id], 0,
3845 	       sizeof(crtc_state->wm.skl.raw.planes[plane->id]));
3846 	memset(&crtc_state->wm.skl.optimal.planes[plane->id], 0,
3847 	       sizeof(crtc_state->wm.skl.optimal.planes[plane->id]));
3848 }
3849 
3850 void intel_wm_state_verify(struct intel_atomic_state *state,
3851 			   struct intel_crtc *crtc)
3852 {
3853 	struct intel_display *display = to_intel_display(state);
3854 	const struct intel_crtc_state *new_crtc_state =
3855 		intel_atomic_get_new_crtc_state(state, crtc);
3856 	struct skl_hw_state {
3857 		struct skl_ddb_entry ddb[I915_MAX_PLANES];
3858 		struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
3859 		u16 min_ddb[I915_MAX_PLANES];
3860 		u16 interim_ddb[I915_MAX_PLANES];
3861 		struct skl_pipe_wm wm;
3862 	} *hw;
3863 	const struct skl_pipe_wm *sw_wm = &new_crtc_state->wm.skl.optimal;
3864 	struct intel_plane *plane;
3865 	u8 hw_enabled_slices;
3866 	int level;
3867 
3868 	if (DISPLAY_VER(display) < 9 || !new_crtc_state->hw.active)
3869 		return;
3870 
3871 	hw = kzalloc(sizeof(*hw), GFP_KERNEL);
3872 	if (!hw)
3873 		return;
3874 
3875 	skl_pipe_wm_get_hw_state(crtc, &hw->wm);
3876 
3877 	skl_pipe_ddb_get_hw_state(crtc, hw->ddb, hw->ddb_y, hw->min_ddb, hw->interim_ddb);
3878 
3879 	hw_enabled_slices = intel_enabled_dbuf_slices_mask(display);
3880 
3881 	if (DISPLAY_VER(display) >= 11 &&
3882 	    hw_enabled_slices != display->dbuf.enabled_slices)
3883 		drm_err(display->drm,
3884 			"mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n",
3885 			display->dbuf.enabled_slices,
3886 			hw_enabled_slices);
3887 
3888 	for_each_intel_plane_on_crtc(display->drm, crtc, plane) {
3889 		const struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
3890 		const struct skl_wm_level *hw_wm_level, *sw_wm_level;
3891 
3892 		/* Watermarks */
3893 		for (level = 0; level < display->wm.num_levels; level++) {
3894 			hw_wm_level = &hw->wm.planes[plane->id].wm[level];
3895 			sw_wm_level = skl_plane_wm_level(sw_wm, plane->id, level);
3896 
3897 			if (skl_wm_level_equals(hw_wm_level, sw_wm_level))
3898 				continue;
3899 
3900 			drm_err(display->drm,
3901 				"[PLANE:%d:%s] mismatch in WM%d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
3902 				plane->base.base.id, plane->base.name, level,
3903 				sw_wm_level->enable,
3904 				sw_wm_level->blocks,
3905 				sw_wm_level->lines,
3906 				hw_wm_level->enable,
3907 				hw_wm_level->blocks,
3908 				hw_wm_level->lines);
3909 		}
3910 
3911 		hw_wm_level = &hw->wm.planes[plane->id].trans_wm;
3912 		sw_wm_level = skl_plane_trans_wm(sw_wm, plane->id);
3913 
3914 		if (!skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
3915 			drm_err(display->drm,
3916 				"[PLANE:%d:%s] mismatch in trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
3917 				plane->base.base.id, plane->base.name,
3918 				sw_wm_level->enable,
3919 				sw_wm_level->blocks,
3920 				sw_wm_level->lines,
3921 				hw_wm_level->enable,
3922 				hw_wm_level->blocks,
3923 				hw_wm_level->lines);
3924 		}
3925 
3926 		hw_wm_level = &hw->wm.planes[plane->id].sagv.wm0;
3927 		sw_wm_level = &sw_wm->planes[plane->id].sagv.wm0;
3928 
3929 		if (HAS_HW_SAGV_WM(display) &&
3930 		    !skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
3931 			drm_err(display->drm,
3932 				"[PLANE:%d:%s] mismatch in SAGV WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
3933 				plane->base.base.id, plane->base.name,
3934 				sw_wm_level->enable,
3935 				sw_wm_level->blocks,
3936 				sw_wm_level->lines,
3937 				hw_wm_level->enable,
3938 				hw_wm_level->blocks,
3939 				hw_wm_level->lines);
3940 		}
3941 
3942 		hw_wm_level = &hw->wm.planes[plane->id].sagv.trans_wm;
3943 		sw_wm_level = &sw_wm->planes[plane->id].sagv.trans_wm;
3944 
3945 		if (HAS_HW_SAGV_WM(display) &&
3946 		    !skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
3947 			drm_err(display->drm,
3948 				"[PLANE:%d:%s] mismatch in SAGV trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
3949 				plane->base.base.id, plane->base.name,
3950 				sw_wm_level->enable,
3951 				sw_wm_level->blocks,
3952 				sw_wm_level->lines,
3953 				hw_wm_level->enable,
3954 				hw_wm_level->blocks,
3955 				hw_wm_level->lines);
3956 		}
3957 
3958 		/* DDB */
3959 		hw_ddb_entry = &hw->ddb[PLANE_CURSOR];
3960 		sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb[PLANE_CURSOR];
3961 
3962 		if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
3963 			drm_err(display->drm,
3964 				"[PLANE:%d:%s] mismatch in DDB (expected (%u,%u), found (%u,%u))\n",
3965 				plane->base.base.id, plane->base.name,
3966 				sw_ddb_entry->start, sw_ddb_entry->end,
3967 				hw_ddb_entry->start, hw_ddb_entry->end);
3968 		}
3969 	}
3970 
3971 	kfree(hw);
3972 }
3973 
3974 static const struct intel_wm_funcs skl_wm_funcs = {
3975 	.compute_global_watermarks = skl_compute_wm,
3976 	.get_hw_state = skl_wm_get_hw_state,
3977 	.sanitize = skl_wm_sanitize,
3978 };
3979 
3980 void skl_wm_init(struct intel_display *display)
3981 {
3982 	intel_sagv_init(display);
3983 
3984 	skl_setup_wm_latency(display);
3985 
3986 	display->funcs.wm = &skl_wm_funcs;
3987 }
3988 
3989 static int skl_watermark_ipc_status_show(struct seq_file *m, void *data)
3990 {
3991 	struct intel_display *display = m->private;
3992 
3993 	seq_printf(m, "Isochronous Priority Control: %s\n",
3994 		   str_yes_no(skl_watermark_ipc_enabled(display)));
3995 	return 0;
3996 }
3997 
3998 static int skl_watermark_ipc_status_open(struct inode *inode, struct file *file)
3999 {
4000 	struct intel_display *display = inode->i_private;
4001 
4002 	return single_open(file, skl_watermark_ipc_status_show, display);
4003 }
4004 
4005 static ssize_t skl_watermark_ipc_status_write(struct file *file,
4006 					      const char __user *ubuf,
4007 					      size_t len, loff_t *offp)
4008 {
4009 	struct seq_file *m = file->private_data;
4010 	struct intel_display *display = m->private;
4011 	bool enable;
4012 	int ret;
4013 
4014 	ret = kstrtobool_from_user(ubuf, len, &enable);
4015 	if (ret < 0)
4016 		return ret;
4017 
4018 	with_intel_display_rpm(display) {
4019 		if (!skl_watermark_ipc_enabled(display) && enable)
4020 			drm_info(display->drm,
4021 				 "Enabling IPC: WM will be proper only after next commit\n");
4022 		display->wm.ipc_enabled = enable;
4023 		skl_watermark_ipc_update(display);
4024 	}
4025 
4026 	return len;
4027 }
4028 
4029 static const struct file_operations skl_watermark_ipc_status_fops = {
4030 	.owner = THIS_MODULE,
4031 	.open = skl_watermark_ipc_status_open,
4032 	.read = seq_read,
4033 	.llseek = seq_lseek,
4034 	.release = single_release,
4035 	.write = skl_watermark_ipc_status_write
4036 };
4037 
4038 static int intel_sagv_status_show(struct seq_file *m, void *unused)
4039 {
4040 	struct intel_display *display = m->private;
4041 	static const char * const sagv_status[] = {
4042 		[I915_SAGV_UNKNOWN] = "unknown",
4043 		[I915_SAGV_DISABLED] = "disabled",
4044 		[I915_SAGV_ENABLED] = "enabled",
4045 		[I915_SAGV_NOT_CONTROLLED] = "not controlled",
4046 	};
4047 
4048 	seq_printf(m, "SAGV available: %s\n", str_yes_no(intel_has_sagv(display)));
4049 	seq_printf(m, "SAGV modparam: %s\n",
4050 		   str_enabled_disabled(display->params.enable_sagv));
4051 	seq_printf(m, "SAGV status: %s\n", sagv_status[display->sagv.status]);
4052 	seq_printf(m, "SAGV block time: %d usec\n", display->sagv.block_time_us);
4053 
4054 	return 0;
4055 }
4056 
4057 DEFINE_SHOW_ATTRIBUTE(intel_sagv_status);
4058 
4059 void skl_watermark_debugfs_register(struct intel_display *display)
4060 {
4061 	struct dentry *debugfs_root = display->drm->debugfs_root;
4062 
4063 	if (HAS_IPC(display))
4064 		debugfs_create_file("i915_ipc_status", 0644, debugfs_root,
4065 				    display, &skl_watermark_ipc_status_fops);
4066 
4067 	if (HAS_SAGV(display))
4068 		debugfs_create_file("i915_sagv_status", 0444, debugfs_root,
4069 				    display, &intel_sagv_status_fops);
4070 }
4071 
4072 unsigned int skl_watermark_max_latency(struct intel_display *display, int initial_wm_level)
4073 {
4074 	int level;
4075 
4076 	for (level = display->wm.num_levels - 1; level >= initial_wm_level; level--) {
4077 		unsigned int latency = skl_wm_latency(display, level, NULL);
4078 
4079 		if (latency)
4080 			return latency;
4081 	}
4082 
4083 	return 0;
4084 }
4085