xref: /linux/drivers/gpu/drm/i915/display/skl_watermark.c (revision 71dfa617ea9f18e4585fe78364217cd32b1fc382)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include <drm/drm_blend.h>
7 
8 #include "i915_drv.h"
9 #include "i915_reg.h"
10 #include "i9xx_wm.h"
11 #include "intel_atomic.h"
12 #include "intel_atomic_plane.h"
13 #include "intel_bw.h"
14 #include "intel_cdclk.h"
15 #include "intel_crtc.h"
16 #include "intel_de.h"
17 #include "intel_display.h"
18 #include "intel_display_power.h"
19 #include "intel_display_types.h"
20 #include "intel_fb.h"
21 #include "intel_fixed.h"
22 #include "intel_pcode.h"
23 #include "intel_wm.h"
24 #include "skl_watermark.h"
25 #include "skl_watermark_regs.h"
26 
27 /*It is expected that DSB can do posted writes to every register in
28  * the pipe and planes within 100us. For flip queue use case, the
29  * recommended DSB execution time is 100us + one SAGV block time.
30  */
31 #define DSB_EXE_TIME 100
32 
33 static void skl_sagv_disable(struct drm_i915_private *i915);
34 
35 /* Stores plane specific WM parameters */
36 struct skl_wm_params {
37 	bool x_tiled, y_tiled;
38 	bool rc_surface;
39 	bool is_planar;
40 	u32 width;
41 	u8 cpp;
42 	u32 plane_pixel_rate;
43 	u32 y_min_scanlines;
44 	u32 plane_bytes_per_line;
45 	uint_fixed_16_16_t plane_blocks_per_line;
46 	uint_fixed_16_16_t y_tile_minimum;
47 	u32 linetime_us;
48 	u32 dbuf_block_size;
49 };
50 
51 u8 intel_enabled_dbuf_slices_mask(struct drm_i915_private *i915)
52 {
53 	u8 enabled_slices = 0;
54 	enum dbuf_slice slice;
55 
56 	for_each_dbuf_slice(i915, slice) {
57 		if (intel_de_read(i915, DBUF_CTL_S(slice)) & DBUF_POWER_STATE)
58 			enabled_slices |= BIT(slice);
59 	}
60 
61 	return enabled_slices;
62 }
63 
64 /*
65  * FIXME: We still don't have the proper code detect if we need to apply the WA,
66  * so assume we'll always need it in order to avoid underruns.
67  */
68 static bool skl_needs_memory_bw_wa(struct drm_i915_private *i915)
69 {
70 	return DISPLAY_VER(i915) == 9;
71 }
72 
73 static bool
74 intel_has_sagv(struct drm_i915_private *i915)
75 {
76 	return HAS_SAGV(i915) &&
77 		i915->display.sagv.status != I915_SAGV_NOT_CONTROLLED;
78 }
79 
80 static u32
81 intel_sagv_block_time(struct drm_i915_private *i915)
82 {
83 	if (DISPLAY_VER(i915) >= 14) {
84 		u32 val;
85 
86 		val = intel_de_read(i915, MTL_LATENCY_SAGV);
87 
88 		return REG_FIELD_GET(MTL_LATENCY_QCLK_SAGV, val);
89 	} else if (DISPLAY_VER(i915) >= 12) {
90 		u32 val = 0;
91 		int ret;
92 
93 		ret = snb_pcode_read(&i915->uncore,
94 				     GEN12_PCODE_READ_SAGV_BLOCK_TIME_US,
95 				     &val, NULL);
96 		if (ret) {
97 			drm_dbg_kms(&i915->drm, "Couldn't read SAGV block time!\n");
98 			return 0;
99 		}
100 
101 		return val;
102 	} else if (DISPLAY_VER(i915) == 11) {
103 		return 10;
104 	} else if (HAS_SAGV(i915)) {
105 		return 30;
106 	} else {
107 		return 0;
108 	}
109 }
110 
111 static void intel_sagv_init(struct drm_i915_private *i915)
112 {
113 	if (!HAS_SAGV(i915))
114 		i915->display.sagv.status = I915_SAGV_NOT_CONTROLLED;
115 
116 	/*
117 	 * Probe to see if we have working SAGV control.
118 	 * For icl+ this was already determined by intel_bw_init_hw().
119 	 */
120 	if (DISPLAY_VER(i915) < 11)
121 		skl_sagv_disable(i915);
122 
123 	drm_WARN_ON(&i915->drm, i915->display.sagv.status == I915_SAGV_UNKNOWN);
124 
125 	i915->display.sagv.block_time_us = intel_sagv_block_time(i915);
126 
127 	drm_dbg_kms(&i915->drm, "SAGV supported: %s, original SAGV block time: %u us\n",
128 		    str_yes_no(intel_has_sagv(i915)), i915->display.sagv.block_time_us);
129 
130 	/* avoid overflow when adding with wm0 latency/etc. */
131 	if (drm_WARN(&i915->drm, i915->display.sagv.block_time_us > U16_MAX,
132 		     "Excessive SAGV block time %u, ignoring\n",
133 		     i915->display.sagv.block_time_us))
134 		i915->display.sagv.block_time_us = 0;
135 
136 	if (!intel_has_sagv(i915))
137 		i915->display.sagv.block_time_us = 0;
138 }
139 
140 /*
141  * SAGV dynamically adjusts the system agent voltage and clock frequencies
142  * depending on power and performance requirements. The display engine access
143  * to system memory is blocked during the adjustment time. Because of the
144  * blocking time, having this enabled can cause full system hangs and/or pipe
145  * underruns if we don't meet all of the following requirements:
146  *
147  *  - <= 1 pipe enabled
148  *  - All planes can enable watermarks for latencies >= SAGV engine block time
149  *  - We're not using an interlaced display configuration
150  */
151 static void skl_sagv_enable(struct drm_i915_private *i915)
152 {
153 	int ret;
154 
155 	if (!intel_has_sagv(i915))
156 		return;
157 
158 	if (i915->display.sagv.status == I915_SAGV_ENABLED)
159 		return;
160 
161 	drm_dbg_kms(&i915->drm, "Enabling SAGV\n");
162 	ret = snb_pcode_write(&i915->uncore, GEN9_PCODE_SAGV_CONTROL,
163 			      GEN9_SAGV_ENABLE);
164 
165 	/* We don't need to wait for SAGV when enabling */
166 
167 	/*
168 	 * Some skl systems, pre-release machines in particular,
169 	 * don't actually have SAGV.
170 	 */
171 	if (IS_SKYLAKE(i915) && ret == -ENXIO) {
172 		drm_dbg(&i915->drm, "No SAGV found on system, ignoring\n");
173 		i915->display.sagv.status = I915_SAGV_NOT_CONTROLLED;
174 		return;
175 	} else if (ret < 0) {
176 		drm_err(&i915->drm, "Failed to enable SAGV\n");
177 		return;
178 	}
179 
180 	i915->display.sagv.status = I915_SAGV_ENABLED;
181 }
182 
183 static void skl_sagv_disable(struct drm_i915_private *i915)
184 {
185 	int ret;
186 
187 	if (!intel_has_sagv(i915))
188 		return;
189 
190 	if (i915->display.sagv.status == I915_SAGV_DISABLED)
191 		return;
192 
193 	drm_dbg_kms(&i915->drm, "Disabling SAGV\n");
194 	/* bspec says to keep retrying for at least 1 ms */
195 	ret = skl_pcode_request(&i915->uncore, GEN9_PCODE_SAGV_CONTROL,
196 				GEN9_SAGV_DISABLE,
197 				GEN9_SAGV_IS_DISABLED, GEN9_SAGV_IS_DISABLED,
198 				1);
199 	/*
200 	 * Some skl systems, pre-release machines in particular,
201 	 * don't actually have SAGV.
202 	 */
203 	if (IS_SKYLAKE(i915) && ret == -ENXIO) {
204 		drm_dbg(&i915->drm, "No SAGV found on system, ignoring\n");
205 		i915->display.sagv.status = I915_SAGV_NOT_CONTROLLED;
206 		return;
207 	} else if (ret < 0) {
208 		drm_err(&i915->drm, "Failed to disable SAGV (%d)\n", ret);
209 		return;
210 	}
211 
212 	i915->display.sagv.status = I915_SAGV_DISABLED;
213 }
214 
215 static void skl_sagv_pre_plane_update(struct intel_atomic_state *state)
216 {
217 	struct drm_i915_private *i915 = to_i915(state->base.dev);
218 	const struct intel_bw_state *new_bw_state =
219 		intel_atomic_get_new_bw_state(state);
220 
221 	if (!new_bw_state)
222 		return;
223 
224 	if (!intel_can_enable_sagv(i915, new_bw_state))
225 		skl_sagv_disable(i915);
226 }
227 
228 static void skl_sagv_post_plane_update(struct intel_atomic_state *state)
229 {
230 	struct drm_i915_private *i915 = to_i915(state->base.dev);
231 	const struct intel_bw_state *new_bw_state =
232 		intel_atomic_get_new_bw_state(state);
233 
234 	if (!new_bw_state)
235 		return;
236 
237 	if (intel_can_enable_sagv(i915, new_bw_state))
238 		skl_sagv_enable(i915);
239 }
240 
241 static void icl_sagv_pre_plane_update(struct intel_atomic_state *state)
242 {
243 	struct drm_i915_private *i915 = to_i915(state->base.dev);
244 	const struct intel_bw_state *old_bw_state =
245 		intel_atomic_get_old_bw_state(state);
246 	const struct intel_bw_state *new_bw_state =
247 		intel_atomic_get_new_bw_state(state);
248 	u16 old_mask, new_mask;
249 
250 	if (!new_bw_state)
251 		return;
252 
253 	old_mask = old_bw_state->qgv_points_mask;
254 	new_mask = old_bw_state->qgv_points_mask | new_bw_state->qgv_points_mask;
255 
256 	if (old_mask == new_mask)
257 		return;
258 
259 	WARN_ON(!new_bw_state->base.changed);
260 
261 	drm_dbg_kms(&i915->drm, "Restricting QGV points: 0x%x -> 0x%x\n",
262 		    old_mask, new_mask);
263 
264 	/*
265 	 * Restrict required qgv points before updating the configuration.
266 	 * According to BSpec we can't mask and unmask qgv points at the same
267 	 * time. Also masking should be done before updating the configuration
268 	 * and unmasking afterwards.
269 	 */
270 	icl_pcode_restrict_qgv_points(i915, new_mask);
271 }
272 
273 static void icl_sagv_post_plane_update(struct intel_atomic_state *state)
274 {
275 	struct drm_i915_private *i915 = to_i915(state->base.dev);
276 	const struct intel_bw_state *old_bw_state =
277 		intel_atomic_get_old_bw_state(state);
278 	const struct intel_bw_state *new_bw_state =
279 		intel_atomic_get_new_bw_state(state);
280 	u16 old_mask, new_mask;
281 
282 	if (!new_bw_state)
283 		return;
284 
285 	old_mask = old_bw_state->qgv_points_mask | new_bw_state->qgv_points_mask;
286 	new_mask = new_bw_state->qgv_points_mask;
287 
288 	if (old_mask == new_mask)
289 		return;
290 
291 	WARN_ON(!new_bw_state->base.changed);
292 
293 	drm_dbg_kms(&i915->drm, "Relaxing QGV points: 0x%x -> 0x%x\n",
294 		    old_mask, new_mask);
295 
296 	/*
297 	 * Allow required qgv points after updating the configuration.
298 	 * According to BSpec we can't mask and unmask qgv points at the same
299 	 * time. Also masking should be done before updating the configuration
300 	 * and unmasking afterwards.
301 	 */
302 	icl_pcode_restrict_qgv_points(i915, new_mask);
303 }
304 
305 void intel_sagv_pre_plane_update(struct intel_atomic_state *state)
306 {
307 	struct drm_i915_private *i915 = to_i915(state->base.dev);
308 
309 	/*
310 	 * Just return if we can't control SAGV or don't have it.
311 	 * This is different from situation when we have SAGV but just can't
312 	 * afford it due to DBuf limitation - in case if SAGV is completely
313 	 * disabled in a BIOS, we are not even allowed to send a PCode request,
314 	 * as it will throw an error. So have to check it here.
315 	 */
316 	if (!intel_has_sagv(i915))
317 		return;
318 
319 	if (DISPLAY_VER(i915) >= 11)
320 		icl_sagv_pre_plane_update(state);
321 	else
322 		skl_sagv_pre_plane_update(state);
323 }
324 
325 void intel_sagv_post_plane_update(struct intel_atomic_state *state)
326 {
327 	struct drm_i915_private *i915 = to_i915(state->base.dev);
328 
329 	/*
330 	 * Just return if we can't control SAGV or don't have it.
331 	 * This is different from situation when we have SAGV but just can't
332 	 * afford it due to DBuf limitation - in case if SAGV is completely
333 	 * disabled in a BIOS, we are not even allowed to send a PCode request,
334 	 * as it will throw an error. So have to check it here.
335 	 */
336 	if (!intel_has_sagv(i915))
337 		return;
338 
339 	if (DISPLAY_VER(i915) >= 11)
340 		icl_sagv_post_plane_update(state);
341 	else
342 		skl_sagv_post_plane_update(state);
343 }
344 
345 static bool skl_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
346 {
347 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
348 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
349 	enum plane_id plane_id;
350 	int max_level = INT_MAX;
351 
352 	if (!intel_has_sagv(i915))
353 		return false;
354 
355 	if (!crtc_state->hw.active)
356 		return true;
357 
358 	if (crtc_state->hw.pipe_mode.flags & DRM_MODE_FLAG_INTERLACE)
359 		return false;
360 
361 	for_each_plane_id_on_crtc(crtc, plane_id) {
362 		const struct skl_plane_wm *wm =
363 			&crtc_state->wm.skl.optimal.planes[plane_id];
364 		int level;
365 
366 		/* Skip this plane if it's not enabled */
367 		if (!wm->wm[0].enable)
368 			continue;
369 
370 		/* Find the highest enabled wm level for this plane */
371 		for (level = i915->display.wm.num_levels - 1;
372 		     !wm->wm[level].enable; --level)
373 		     { }
374 
375 		/* Highest common enabled wm level for all planes */
376 		max_level = min(level, max_level);
377 	}
378 
379 	/* No enabled planes? */
380 	if (max_level == INT_MAX)
381 		return true;
382 
383 	for_each_plane_id_on_crtc(crtc, plane_id) {
384 		const struct skl_plane_wm *wm =
385 			&crtc_state->wm.skl.optimal.planes[plane_id];
386 
387 		/*
388 		 * All enabled planes must have enabled a common wm level that
389 		 * can tolerate memory latencies higher than sagv_block_time_us
390 		 */
391 		if (wm->wm[0].enable && !wm->wm[max_level].can_sagv)
392 			return false;
393 	}
394 
395 	return true;
396 }
397 
398 static bool tgl_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
399 {
400 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
401 	enum plane_id plane_id;
402 
403 	if (!crtc_state->hw.active)
404 		return true;
405 
406 	for_each_plane_id_on_crtc(crtc, plane_id) {
407 		const struct skl_plane_wm *wm =
408 			&crtc_state->wm.skl.optimal.planes[plane_id];
409 
410 		if (wm->wm[0].enable && !wm->sagv.wm0.enable)
411 			return false;
412 	}
413 
414 	return true;
415 }
416 
417 static bool intel_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
418 {
419 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
420 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
421 
422 	if (!i915->display.params.enable_sagv)
423 		return false;
424 
425 	if (DISPLAY_VER(i915) >= 12)
426 		return tgl_crtc_can_enable_sagv(crtc_state);
427 	else
428 		return skl_crtc_can_enable_sagv(crtc_state);
429 }
430 
431 bool intel_can_enable_sagv(struct drm_i915_private *i915,
432 			   const struct intel_bw_state *bw_state)
433 {
434 	if (DISPLAY_VER(i915) < 11 &&
435 	    bw_state->active_pipes && !is_power_of_2(bw_state->active_pipes))
436 		return false;
437 
438 	return bw_state->pipe_sagv_reject == 0;
439 }
440 
441 static int intel_compute_sagv_mask(struct intel_atomic_state *state)
442 {
443 	struct drm_i915_private *i915 = to_i915(state->base.dev);
444 	int ret;
445 	struct intel_crtc *crtc;
446 	struct intel_crtc_state *new_crtc_state;
447 	struct intel_bw_state *new_bw_state = NULL;
448 	const struct intel_bw_state *old_bw_state = NULL;
449 	int i;
450 
451 	for_each_new_intel_crtc_in_state(state, crtc,
452 					 new_crtc_state, i) {
453 		struct skl_pipe_wm *pipe_wm = &new_crtc_state->wm.skl.optimal;
454 
455 		new_bw_state = intel_atomic_get_bw_state(state);
456 		if (IS_ERR(new_bw_state))
457 			return PTR_ERR(new_bw_state);
458 
459 		old_bw_state = intel_atomic_get_old_bw_state(state);
460 
461 		/*
462 		 * We store use_sagv_wm in the crtc state rather than relying on
463 		 * that bw state since we have no convenient way to get at the
464 		 * latter from the plane commit hooks (especially in the legacy
465 		 * cursor case).
466 		 *
467 		 * drm_atomic_check_only() gets upset if we pull more crtcs
468 		 * into the state, so we have to calculate this based on the
469 		 * individual intel_crtc_can_enable_sagv() rather than
470 		 * the overall intel_can_enable_sagv(). Otherwise the
471 		 * crtcs not included in the commit would not switch to the
472 		 * SAGV watermarks when we are about to enable SAGV, and that
473 		 * would lead to underruns. This does mean extra power draw
474 		 * when only a subset of the crtcs are blocking SAGV as the
475 		 * other crtcs can't be allowed to use the more optimal
476 		 * normal (ie. non-SAGV) watermarks.
477 		 */
478 		pipe_wm->use_sagv_wm = !HAS_HW_SAGV_WM(i915) &&
479 			DISPLAY_VER(i915) >= 12 &&
480 			intel_crtc_can_enable_sagv(new_crtc_state);
481 
482 		if (intel_crtc_can_enable_sagv(new_crtc_state))
483 			new_bw_state->pipe_sagv_reject &= ~BIT(crtc->pipe);
484 		else
485 			new_bw_state->pipe_sagv_reject |= BIT(crtc->pipe);
486 	}
487 
488 	if (!new_bw_state)
489 		return 0;
490 
491 	new_bw_state->active_pipes =
492 		intel_calc_active_pipes(state, old_bw_state->active_pipes);
493 
494 	if (new_bw_state->active_pipes != old_bw_state->active_pipes) {
495 		ret = intel_atomic_lock_global_state(&new_bw_state->base);
496 		if (ret)
497 			return ret;
498 	}
499 
500 	if (intel_can_enable_sagv(i915, new_bw_state) !=
501 	    intel_can_enable_sagv(i915, old_bw_state)) {
502 		ret = intel_atomic_serialize_global_state(&new_bw_state->base);
503 		if (ret)
504 			return ret;
505 	} else if (new_bw_state->pipe_sagv_reject != old_bw_state->pipe_sagv_reject) {
506 		ret = intel_atomic_lock_global_state(&new_bw_state->base);
507 		if (ret)
508 			return ret;
509 	}
510 
511 	return 0;
512 }
513 
514 static u16 skl_ddb_entry_init(struct skl_ddb_entry *entry,
515 			      u16 start, u16 end)
516 {
517 	entry->start = start;
518 	entry->end = end;
519 
520 	return end;
521 }
522 
523 static int intel_dbuf_slice_size(struct drm_i915_private *i915)
524 {
525 	return DISPLAY_INFO(i915)->dbuf.size /
526 		hweight8(DISPLAY_INFO(i915)->dbuf.slice_mask);
527 }
528 
529 static void
530 skl_ddb_entry_for_slices(struct drm_i915_private *i915, u8 slice_mask,
531 			 struct skl_ddb_entry *ddb)
532 {
533 	int slice_size = intel_dbuf_slice_size(i915);
534 
535 	if (!slice_mask) {
536 		ddb->start = 0;
537 		ddb->end = 0;
538 		return;
539 	}
540 
541 	ddb->start = (ffs(slice_mask) - 1) * slice_size;
542 	ddb->end = fls(slice_mask) * slice_size;
543 
544 	WARN_ON(ddb->start >= ddb->end);
545 	WARN_ON(ddb->end > DISPLAY_INFO(i915)->dbuf.size);
546 }
547 
548 static unsigned int mbus_ddb_offset(struct drm_i915_private *i915, u8 slice_mask)
549 {
550 	struct skl_ddb_entry ddb;
551 
552 	if (slice_mask & (BIT(DBUF_S1) | BIT(DBUF_S2)))
553 		slice_mask = BIT(DBUF_S1);
554 	else if (slice_mask & (BIT(DBUF_S3) | BIT(DBUF_S4)))
555 		slice_mask = BIT(DBUF_S3);
556 
557 	skl_ddb_entry_for_slices(i915, slice_mask, &ddb);
558 
559 	return ddb.start;
560 }
561 
562 u32 skl_ddb_dbuf_slice_mask(struct drm_i915_private *i915,
563 			    const struct skl_ddb_entry *entry)
564 {
565 	int slice_size = intel_dbuf_slice_size(i915);
566 	enum dbuf_slice start_slice, end_slice;
567 	u8 slice_mask = 0;
568 
569 	if (!skl_ddb_entry_size(entry))
570 		return 0;
571 
572 	start_slice = entry->start / slice_size;
573 	end_slice = (entry->end - 1) / slice_size;
574 
575 	/*
576 	 * Per plane DDB entry can in a really worst case be on multiple slices
577 	 * but single entry is anyway contigious.
578 	 */
579 	while (start_slice <= end_slice) {
580 		slice_mask |= BIT(start_slice);
581 		start_slice++;
582 	}
583 
584 	return slice_mask;
585 }
586 
587 static unsigned int intel_crtc_ddb_weight(const struct intel_crtc_state *crtc_state)
588 {
589 	const struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
590 	int hdisplay, vdisplay;
591 
592 	if (!crtc_state->hw.active)
593 		return 0;
594 
595 	/*
596 	 * Watermark/ddb requirement highly depends upon width of the
597 	 * framebuffer, So instead of allocating DDB equally among pipes
598 	 * distribute DDB based on resolution/width of the display.
599 	 */
600 	drm_mode_get_hv_timing(pipe_mode, &hdisplay, &vdisplay);
601 
602 	return hdisplay;
603 }
604 
605 static void intel_crtc_dbuf_weights(const struct intel_dbuf_state *dbuf_state,
606 				    enum pipe for_pipe,
607 				    unsigned int *weight_start,
608 				    unsigned int *weight_end,
609 				    unsigned int *weight_total)
610 {
611 	struct drm_i915_private *i915 =
612 		to_i915(dbuf_state->base.state->base.dev);
613 	enum pipe pipe;
614 
615 	*weight_start = 0;
616 	*weight_end = 0;
617 	*weight_total = 0;
618 
619 	for_each_pipe(i915, pipe) {
620 		int weight = dbuf_state->weight[pipe];
621 
622 		/*
623 		 * Do not account pipes using other slice sets
624 		 * luckily as of current BSpec slice sets do not partially
625 		 * intersect(pipes share either same one slice or same slice set
626 		 * i.e no partial intersection), so it is enough to check for
627 		 * equality for now.
628 		 */
629 		if (dbuf_state->slices[pipe] != dbuf_state->slices[for_pipe])
630 			continue;
631 
632 		*weight_total += weight;
633 		if (pipe < for_pipe) {
634 			*weight_start += weight;
635 			*weight_end += weight;
636 		} else if (pipe == for_pipe) {
637 			*weight_end += weight;
638 		}
639 	}
640 }
641 
642 static int
643 skl_crtc_allocate_ddb(struct intel_atomic_state *state, struct intel_crtc *crtc)
644 {
645 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
646 	unsigned int weight_total, weight_start, weight_end;
647 	const struct intel_dbuf_state *old_dbuf_state =
648 		intel_atomic_get_old_dbuf_state(state);
649 	struct intel_dbuf_state *new_dbuf_state =
650 		intel_atomic_get_new_dbuf_state(state);
651 	struct intel_crtc_state *crtc_state;
652 	struct skl_ddb_entry ddb_slices;
653 	enum pipe pipe = crtc->pipe;
654 	unsigned int mbus_offset = 0;
655 	u32 ddb_range_size;
656 	u32 dbuf_slice_mask;
657 	u32 start, end;
658 	int ret;
659 
660 	if (new_dbuf_state->weight[pipe] == 0) {
661 		skl_ddb_entry_init(&new_dbuf_state->ddb[pipe], 0, 0);
662 		goto out;
663 	}
664 
665 	dbuf_slice_mask = new_dbuf_state->slices[pipe];
666 
667 	skl_ddb_entry_for_slices(i915, dbuf_slice_mask, &ddb_slices);
668 	mbus_offset = mbus_ddb_offset(i915, dbuf_slice_mask);
669 	ddb_range_size = skl_ddb_entry_size(&ddb_slices);
670 
671 	intel_crtc_dbuf_weights(new_dbuf_state, pipe,
672 				&weight_start, &weight_end, &weight_total);
673 
674 	start = ddb_range_size * weight_start / weight_total;
675 	end = ddb_range_size * weight_end / weight_total;
676 
677 	skl_ddb_entry_init(&new_dbuf_state->ddb[pipe],
678 			   ddb_slices.start - mbus_offset + start,
679 			   ddb_slices.start - mbus_offset + end);
680 
681 out:
682 	if (old_dbuf_state->slices[pipe] == new_dbuf_state->slices[pipe] &&
683 	    skl_ddb_entry_equal(&old_dbuf_state->ddb[pipe],
684 				&new_dbuf_state->ddb[pipe]))
685 		return 0;
686 
687 	ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
688 	if (ret)
689 		return ret;
690 
691 	crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
692 	if (IS_ERR(crtc_state))
693 		return PTR_ERR(crtc_state);
694 
695 	/*
696 	 * Used for checking overlaps, so we need absolute
697 	 * offsets instead of MBUS relative offsets.
698 	 */
699 	crtc_state->wm.skl.ddb.start = mbus_offset + new_dbuf_state->ddb[pipe].start;
700 	crtc_state->wm.skl.ddb.end = mbus_offset + new_dbuf_state->ddb[pipe].end;
701 
702 	drm_dbg_kms(&i915->drm,
703 		    "[CRTC:%d:%s] dbuf slices 0x%x -> 0x%x, ddb (%d - %d) -> (%d - %d), active pipes 0x%x -> 0x%x\n",
704 		    crtc->base.base.id, crtc->base.name,
705 		    old_dbuf_state->slices[pipe], new_dbuf_state->slices[pipe],
706 		    old_dbuf_state->ddb[pipe].start, old_dbuf_state->ddb[pipe].end,
707 		    new_dbuf_state->ddb[pipe].start, new_dbuf_state->ddb[pipe].end,
708 		    old_dbuf_state->active_pipes, new_dbuf_state->active_pipes);
709 
710 	return 0;
711 }
712 
713 static int skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
714 				 int width, const struct drm_format_info *format,
715 				 u64 modifier, unsigned int rotation,
716 				 u32 plane_pixel_rate, struct skl_wm_params *wp,
717 				 int color_plane);
718 
719 static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
720 				 struct intel_plane *plane,
721 				 int level,
722 				 unsigned int latency,
723 				 const struct skl_wm_params *wp,
724 				 const struct skl_wm_level *result_prev,
725 				 struct skl_wm_level *result /* out */);
726 
727 static unsigned int skl_wm_latency(struct drm_i915_private *i915, int level,
728 				   const struct skl_wm_params *wp)
729 {
730 	unsigned int latency = i915->display.wm.skl_latency[level];
731 
732 	if (latency == 0)
733 		return 0;
734 
735 	/*
736 	 * WaIncreaseLatencyIPCEnabled: kbl,cfl
737 	 * Display WA #1141: kbl,cfl
738 	 */
739 	if ((IS_KABYLAKE(i915) || IS_COFFEELAKE(i915) || IS_COMETLAKE(i915)) &&
740 	    skl_watermark_ipc_enabled(i915))
741 		latency += 4;
742 
743 	if (skl_needs_memory_bw_wa(i915) && wp && wp->x_tiled)
744 		latency += 15;
745 
746 	return latency;
747 }
748 
749 static unsigned int
750 skl_cursor_allocation(const struct intel_crtc_state *crtc_state,
751 		      int num_active)
752 {
753 	struct intel_plane *plane = to_intel_plane(crtc_state->uapi.crtc->cursor);
754 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
755 	struct skl_wm_level wm = {};
756 	int ret, min_ddb_alloc = 0;
757 	struct skl_wm_params wp;
758 	int level;
759 
760 	ret = skl_compute_wm_params(crtc_state, 256,
761 				    drm_format_info(DRM_FORMAT_ARGB8888),
762 				    DRM_FORMAT_MOD_LINEAR,
763 				    DRM_MODE_ROTATE_0,
764 				    crtc_state->pixel_rate, &wp, 0);
765 	drm_WARN_ON(&i915->drm, ret);
766 
767 	for (level = 0; level < i915->display.wm.num_levels; level++) {
768 		unsigned int latency = skl_wm_latency(i915, level, &wp);
769 
770 		skl_compute_plane_wm(crtc_state, plane, level, latency, &wp, &wm, &wm);
771 		if (wm.min_ddb_alloc == U16_MAX)
772 			break;
773 
774 		min_ddb_alloc = wm.min_ddb_alloc;
775 	}
776 
777 	return max(num_active == 1 ? 32 : 8, min_ddb_alloc);
778 }
779 
780 static void skl_ddb_entry_init_from_hw(struct skl_ddb_entry *entry, u32 reg)
781 {
782 	skl_ddb_entry_init(entry,
783 			   REG_FIELD_GET(PLANE_BUF_START_MASK, reg),
784 			   REG_FIELD_GET(PLANE_BUF_END_MASK, reg));
785 	if (entry->end)
786 		entry->end++;
787 }
788 
789 static void
790 skl_ddb_get_hw_plane_state(struct drm_i915_private *i915,
791 			   const enum pipe pipe,
792 			   const enum plane_id plane_id,
793 			   struct skl_ddb_entry *ddb,
794 			   struct skl_ddb_entry *ddb_y)
795 {
796 	u32 val;
797 
798 	/* Cursor doesn't support NV12/planar, so no extra calculation needed */
799 	if (plane_id == PLANE_CURSOR) {
800 		val = intel_de_read(i915, CUR_BUF_CFG(pipe));
801 		skl_ddb_entry_init_from_hw(ddb, val);
802 		return;
803 	}
804 
805 	val = intel_de_read(i915, PLANE_BUF_CFG(pipe, plane_id));
806 	skl_ddb_entry_init_from_hw(ddb, val);
807 
808 	if (DISPLAY_VER(i915) >= 11)
809 		return;
810 
811 	val = intel_de_read(i915, PLANE_NV12_BUF_CFG(pipe, plane_id));
812 	skl_ddb_entry_init_from_hw(ddb_y, val);
813 }
814 
815 static void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
816 				      struct skl_ddb_entry *ddb,
817 				      struct skl_ddb_entry *ddb_y)
818 {
819 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
820 	enum intel_display_power_domain power_domain;
821 	enum pipe pipe = crtc->pipe;
822 	intel_wakeref_t wakeref;
823 	enum plane_id plane_id;
824 
825 	power_domain = POWER_DOMAIN_PIPE(pipe);
826 	wakeref = intel_display_power_get_if_enabled(i915, power_domain);
827 	if (!wakeref)
828 		return;
829 
830 	for_each_plane_id_on_crtc(crtc, plane_id)
831 		skl_ddb_get_hw_plane_state(i915, pipe,
832 					   plane_id,
833 					   &ddb[plane_id],
834 					   &ddb_y[plane_id]);
835 
836 	intel_display_power_put(i915, power_domain, wakeref);
837 }
838 
839 struct dbuf_slice_conf_entry {
840 	u8 active_pipes;
841 	u8 dbuf_mask[I915_MAX_PIPES];
842 	bool join_mbus;
843 };
844 
845 /*
846  * Table taken from Bspec 12716
847  * Pipes do have some preferred DBuf slice affinity,
848  * plus there are some hardcoded requirements on how
849  * those should be distributed for multipipe scenarios.
850  * For more DBuf slices algorithm can get even more messy
851  * and less readable, so decided to use a table almost
852  * as is from BSpec itself - that way it is at least easier
853  * to compare, change and check.
854  */
855 static const struct dbuf_slice_conf_entry icl_allowed_dbufs[] =
856 /* Autogenerated with igt/tools/intel_dbuf_map tool: */
857 {
858 	{
859 		.active_pipes = BIT(PIPE_A),
860 		.dbuf_mask = {
861 			[PIPE_A] = BIT(DBUF_S1),
862 		},
863 	},
864 	{
865 		.active_pipes = BIT(PIPE_B),
866 		.dbuf_mask = {
867 			[PIPE_B] = BIT(DBUF_S1),
868 		},
869 	},
870 	{
871 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
872 		.dbuf_mask = {
873 			[PIPE_A] = BIT(DBUF_S1),
874 			[PIPE_B] = BIT(DBUF_S2),
875 		},
876 	},
877 	{
878 		.active_pipes = BIT(PIPE_C),
879 		.dbuf_mask = {
880 			[PIPE_C] = BIT(DBUF_S2),
881 		},
882 	},
883 	{
884 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_C),
885 		.dbuf_mask = {
886 			[PIPE_A] = BIT(DBUF_S1),
887 			[PIPE_C] = BIT(DBUF_S2),
888 		},
889 	},
890 	{
891 		.active_pipes = BIT(PIPE_B) | BIT(PIPE_C),
892 		.dbuf_mask = {
893 			[PIPE_B] = BIT(DBUF_S1),
894 			[PIPE_C] = BIT(DBUF_S2),
895 		},
896 	},
897 	{
898 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
899 		.dbuf_mask = {
900 			[PIPE_A] = BIT(DBUF_S1),
901 			[PIPE_B] = BIT(DBUF_S1),
902 			[PIPE_C] = BIT(DBUF_S2),
903 		},
904 	},
905 	{}
906 };
907 
908 /*
909  * Table taken from Bspec 49255
910  * Pipes do have some preferred DBuf slice affinity,
911  * plus there are some hardcoded requirements on how
912  * those should be distributed for multipipe scenarios.
913  * For more DBuf slices algorithm can get even more messy
914  * and less readable, so decided to use a table almost
915  * as is from BSpec itself - that way it is at least easier
916  * to compare, change and check.
917  */
918 static const struct dbuf_slice_conf_entry tgl_allowed_dbufs[] =
919 /* Autogenerated with igt/tools/intel_dbuf_map tool: */
920 {
921 	{
922 		.active_pipes = BIT(PIPE_A),
923 		.dbuf_mask = {
924 			[PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
925 		},
926 	},
927 	{
928 		.active_pipes = BIT(PIPE_B),
929 		.dbuf_mask = {
930 			[PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
931 		},
932 	},
933 	{
934 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
935 		.dbuf_mask = {
936 			[PIPE_A] = BIT(DBUF_S2),
937 			[PIPE_B] = BIT(DBUF_S1),
938 		},
939 	},
940 	{
941 		.active_pipes = BIT(PIPE_C),
942 		.dbuf_mask = {
943 			[PIPE_C] = BIT(DBUF_S2) | BIT(DBUF_S1),
944 		},
945 	},
946 	{
947 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_C),
948 		.dbuf_mask = {
949 			[PIPE_A] = BIT(DBUF_S1),
950 			[PIPE_C] = BIT(DBUF_S2),
951 		},
952 	},
953 	{
954 		.active_pipes = BIT(PIPE_B) | BIT(PIPE_C),
955 		.dbuf_mask = {
956 			[PIPE_B] = BIT(DBUF_S1),
957 			[PIPE_C] = BIT(DBUF_S2),
958 		},
959 	},
960 	{
961 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
962 		.dbuf_mask = {
963 			[PIPE_A] = BIT(DBUF_S1),
964 			[PIPE_B] = BIT(DBUF_S1),
965 			[PIPE_C] = BIT(DBUF_S2),
966 		},
967 	},
968 	{
969 		.active_pipes = BIT(PIPE_D),
970 		.dbuf_mask = {
971 			[PIPE_D] = BIT(DBUF_S2) | BIT(DBUF_S1),
972 		},
973 	},
974 	{
975 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_D),
976 		.dbuf_mask = {
977 			[PIPE_A] = BIT(DBUF_S1),
978 			[PIPE_D] = BIT(DBUF_S2),
979 		},
980 	},
981 	{
982 		.active_pipes = BIT(PIPE_B) | BIT(PIPE_D),
983 		.dbuf_mask = {
984 			[PIPE_B] = BIT(DBUF_S1),
985 			[PIPE_D] = BIT(DBUF_S2),
986 		},
987 	},
988 	{
989 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_D),
990 		.dbuf_mask = {
991 			[PIPE_A] = BIT(DBUF_S1),
992 			[PIPE_B] = BIT(DBUF_S1),
993 			[PIPE_D] = BIT(DBUF_S2),
994 		},
995 	},
996 	{
997 		.active_pipes = BIT(PIPE_C) | BIT(PIPE_D),
998 		.dbuf_mask = {
999 			[PIPE_C] = BIT(DBUF_S1),
1000 			[PIPE_D] = BIT(DBUF_S2),
1001 		},
1002 	},
1003 	{
1004 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_C) | BIT(PIPE_D),
1005 		.dbuf_mask = {
1006 			[PIPE_A] = BIT(DBUF_S1),
1007 			[PIPE_C] = BIT(DBUF_S2),
1008 			[PIPE_D] = BIT(DBUF_S2),
1009 		},
1010 	},
1011 	{
1012 		.active_pipes = BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
1013 		.dbuf_mask = {
1014 			[PIPE_B] = BIT(DBUF_S1),
1015 			[PIPE_C] = BIT(DBUF_S2),
1016 			[PIPE_D] = BIT(DBUF_S2),
1017 		},
1018 	},
1019 	{
1020 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
1021 		.dbuf_mask = {
1022 			[PIPE_A] = BIT(DBUF_S1),
1023 			[PIPE_B] = BIT(DBUF_S1),
1024 			[PIPE_C] = BIT(DBUF_S2),
1025 			[PIPE_D] = BIT(DBUF_S2),
1026 		},
1027 	},
1028 	{}
1029 };
1030 
1031 static const struct dbuf_slice_conf_entry dg2_allowed_dbufs[] = {
1032 	{
1033 		.active_pipes = BIT(PIPE_A),
1034 		.dbuf_mask = {
1035 			[PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
1036 		},
1037 	},
1038 	{
1039 		.active_pipes = BIT(PIPE_B),
1040 		.dbuf_mask = {
1041 			[PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
1042 		},
1043 	},
1044 	{
1045 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
1046 		.dbuf_mask = {
1047 			[PIPE_A] = BIT(DBUF_S1),
1048 			[PIPE_B] = BIT(DBUF_S2),
1049 		},
1050 	},
1051 	{
1052 		.active_pipes = BIT(PIPE_C),
1053 		.dbuf_mask = {
1054 			[PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
1055 		},
1056 	},
1057 	{
1058 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_C),
1059 		.dbuf_mask = {
1060 			[PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
1061 			[PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
1062 		},
1063 	},
1064 	{
1065 		.active_pipes = BIT(PIPE_B) | BIT(PIPE_C),
1066 		.dbuf_mask = {
1067 			[PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
1068 			[PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
1069 		},
1070 	},
1071 	{
1072 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
1073 		.dbuf_mask = {
1074 			[PIPE_A] = BIT(DBUF_S1),
1075 			[PIPE_B] = BIT(DBUF_S2),
1076 			[PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
1077 		},
1078 	},
1079 	{
1080 		.active_pipes = BIT(PIPE_D),
1081 		.dbuf_mask = {
1082 			[PIPE_D] = BIT(DBUF_S3) | BIT(DBUF_S4),
1083 		},
1084 	},
1085 	{
1086 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_D),
1087 		.dbuf_mask = {
1088 			[PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
1089 			[PIPE_D] = BIT(DBUF_S3) | BIT(DBUF_S4),
1090 		},
1091 	},
1092 	{
1093 		.active_pipes = BIT(PIPE_B) | BIT(PIPE_D),
1094 		.dbuf_mask = {
1095 			[PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
1096 			[PIPE_D] = BIT(DBUF_S3) | BIT(DBUF_S4),
1097 		},
1098 	},
1099 	{
1100 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_D),
1101 		.dbuf_mask = {
1102 			[PIPE_A] = BIT(DBUF_S1),
1103 			[PIPE_B] = BIT(DBUF_S2),
1104 			[PIPE_D] = BIT(DBUF_S3) | BIT(DBUF_S4),
1105 		},
1106 	},
1107 	{
1108 		.active_pipes = BIT(PIPE_C) | BIT(PIPE_D),
1109 		.dbuf_mask = {
1110 			[PIPE_C] = BIT(DBUF_S3),
1111 			[PIPE_D] = BIT(DBUF_S4),
1112 		},
1113 	},
1114 	{
1115 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_C) | BIT(PIPE_D),
1116 		.dbuf_mask = {
1117 			[PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
1118 			[PIPE_C] = BIT(DBUF_S3),
1119 			[PIPE_D] = BIT(DBUF_S4),
1120 		},
1121 	},
1122 	{
1123 		.active_pipes = BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
1124 		.dbuf_mask = {
1125 			[PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
1126 			[PIPE_C] = BIT(DBUF_S3),
1127 			[PIPE_D] = BIT(DBUF_S4),
1128 		},
1129 	},
1130 	{
1131 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
1132 		.dbuf_mask = {
1133 			[PIPE_A] = BIT(DBUF_S1),
1134 			[PIPE_B] = BIT(DBUF_S2),
1135 			[PIPE_C] = BIT(DBUF_S3),
1136 			[PIPE_D] = BIT(DBUF_S4),
1137 		},
1138 	},
1139 	{}
1140 };
1141 
1142 static const struct dbuf_slice_conf_entry adlp_allowed_dbufs[] = {
1143 	/*
1144 	 * Keep the join_mbus cases first so check_mbus_joined()
1145 	 * will prefer them over the !join_mbus cases.
1146 	 */
1147 	{
1148 		.active_pipes = BIT(PIPE_A),
1149 		.dbuf_mask = {
1150 			[PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2) | BIT(DBUF_S3) | BIT(DBUF_S4),
1151 		},
1152 		.join_mbus = true,
1153 	},
1154 	{
1155 		.active_pipes = BIT(PIPE_B),
1156 		.dbuf_mask = {
1157 			[PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2) | BIT(DBUF_S3) | BIT(DBUF_S4),
1158 		},
1159 		.join_mbus = true,
1160 	},
1161 	{
1162 		.active_pipes = BIT(PIPE_A),
1163 		.dbuf_mask = {
1164 			[PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
1165 		},
1166 		.join_mbus = false,
1167 	},
1168 	{
1169 		.active_pipes = BIT(PIPE_B),
1170 		.dbuf_mask = {
1171 			[PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
1172 		},
1173 		.join_mbus = false,
1174 	},
1175 	{
1176 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
1177 		.dbuf_mask = {
1178 			[PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
1179 			[PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
1180 		},
1181 	},
1182 	{
1183 		.active_pipes = BIT(PIPE_C),
1184 		.dbuf_mask = {
1185 			[PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
1186 		},
1187 	},
1188 	{
1189 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_C),
1190 		.dbuf_mask = {
1191 			[PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
1192 			[PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
1193 		},
1194 	},
1195 	{
1196 		.active_pipes = BIT(PIPE_B) | BIT(PIPE_C),
1197 		.dbuf_mask = {
1198 			[PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
1199 			[PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
1200 		},
1201 	},
1202 	{
1203 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
1204 		.dbuf_mask = {
1205 			[PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
1206 			[PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
1207 			[PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
1208 		},
1209 	},
1210 	{
1211 		.active_pipes = BIT(PIPE_D),
1212 		.dbuf_mask = {
1213 			[PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
1214 		},
1215 	},
1216 	{
1217 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_D),
1218 		.dbuf_mask = {
1219 			[PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
1220 			[PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
1221 		},
1222 	},
1223 	{
1224 		.active_pipes = BIT(PIPE_B) | BIT(PIPE_D),
1225 		.dbuf_mask = {
1226 			[PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
1227 			[PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
1228 		},
1229 	},
1230 	{
1231 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_D),
1232 		.dbuf_mask = {
1233 			[PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
1234 			[PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
1235 			[PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
1236 		},
1237 	},
1238 	{
1239 		.active_pipes = BIT(PIPE_C) | BIT(PIPE_D),
1240 		.dbuf_mask = {
1241 			[PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
1242 			[PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
1243 		},
1244 	},
1245 	{
1246 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_C) | BIT(PIPE_D),
1247 		.dbuf_mask = {
1248 			[PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
1249 			[PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
1250 			[PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
1251 		},
1252 	},
1253 	{
1254 		.active_pipes = BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
1255 		.dbuf_mask = {
1256 			[PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
1257 			[PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
1258 			[PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
1259 		},
1260 	},
1261 	{
1262 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
1263 		.dbuf_mask = {
1264 			[PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
1265 			[PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
1266 			[PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
1267 			[PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
1268 		},
1269 	},
1270 	{}
1271 
1272 };
1273 
1274 static bool check_mbus_joined(u8 active_pipes,
1275 			      const struct dbuf_slice_conf_entry *dbuf_slices)
1276 {
1277 	int i;
1278 
1279 	for (i = 0; dbuf_slices[i].active_pipes != 0; i++) {
1280 		if (dbuf_slices[i].active_pipes == active_pipes)
1281 			return dbuf_slices[i].join_mbus;
1282 	}
1283 	return false;
1284 }
1285 
1286 static bool adlp_check_mbus_joined(u8 active_pipes)
1287 {
1288 	return check_mbus_joined(active_pipes, adlp_allowed_dbufs);
1289 }
1290 
1291 static u8 compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus,
1292 			      const struct dbuf_slice_conf_entry *dbuf_slices)
1293 {
1294 	int i;
1295 
1296 	for (i = 0; dbuf_slices[i].active_pipes != 0; i++) {
1297 		if (dbuf_slices[i].active_pipes == active_pipes &&
1298 		    dbuf_slices[i].join_mbus == join_mbus)
1299 			return dbuf_slices[i].dbuf_mask[pipe];
1300 	}
1301 	return 0;
1302 }
1303 
1304 /*
1305  * This function finds an entry with same enabled pipe configuration and
1306  * returns correspondent DBuf slice mask as stated in BSpec for particular
1307  * platform.
1308  */
1309 static u8 icl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus)
1310 {
1311 	/*
1312 	 * FIXME: For ICL this is still a bit unclear as prev BSpec revision
1313 	 * required calculating "pipe ratio" in order to determine
1314 	 * if one or two slices can be used for single pipe configurations
1315 	 * as additional constraint to the existing table.
1316 	 * However based on recent info, it should be not "pipe ratio"
1317 	 * but rather ratio between pixel_rate and cdclk with additional
1318 	 * constants, so for now we are using only table until this is
1319 	 * clarified. Also this is the reason why crtc_state param is
1320 	 * still here - we will need it once those additional constraints
1321 	 * pop up.
1322 	 */
1323 	return compute_dbuf_slices(pipe, active_pipes, join_mbus,
1324 				   icl_allowed_dbufs);
1325 }
1326 
1327 static u8 tgl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus)
1328 {
1329 	return compute_dbuf_slices(pipe, active_pipes, join_mbus,
1330 				   tgl_allowed_dbufs);
1331 }
1332 
1333 static u8 adlp_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus)
1334 {
1335 	return compute_dbuf_slices(pipe, active_pipes, join_mbus,
1336 				   adlp_allowed_dbufs);
1337 }
1338 
1339 static u8 dg2_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus)
1340 {
1341 	return compute_dbuf_slices(pipe, active_pipes, join_mbus,
1342 				   dg2_allowed_dbufs);
1343 }
1344 
1345 static u8 skl_compute_dbuf_slices(struct intel_crtc *crtc, u8 active_pipes, bool join_mbus)
1346 {
1347 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
1348 	enum pipe pipe = crtc->pipe;
1349 
1350 	if (IS_DG2(i915))
1351 		return dg2_compute_dbuf_slices(pipe, active_pipes, join_mbus);
1352 	else if (DISPLAY_VER(i915) >= 13)
1353 		return adlp_compute_dbuf_slices(pipe, active_pipes, join_mbus);
1354 	else if (DISPLAY_VER(i915) == 12)
1355 		return tgl_compute_dbuf_slices(pipe, active_pipes, join_mbus);
1356 	else if (DISPLAY_VER(i915) == 11)
1357 		return icl_compute_dbuf_slices(pipe, active_pipes, join_mbus);
1358 	/*
1359 	 * For anything else just return one slice yet.
1360 	 * Should be extended for other platforms.
1361 	 */
1362 	return active_pipes & BIT(pipe) ? BIT(DBUF_S1) : 0;
1363 }
1364 
1365 static bool
1366 use_minimal_wm0_only(const struct intel_crtc_state *crtc_state,
1367 		     struct intel_plane *plane)
1368 {
1369 	struct drm_i915_private *i915 = to_i915(plane->base.dev);
1370 
1371 	return DISPLAY_VER(i915) >= 13 &&
1372 	       crtc_state->uapi.async_flip &&
1373 	       plane->async_flip;
1374 }
1375 
1376 static u64
1377 skl_total_relative_data_rate(const struct intel_crtc_state *crtc_state)
1378 {
1379 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1380 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
1381 	enum plane_id plane_id;
1382 	u64 data_rate = 0;
1383 
1384 	for_each_plane_id_on_crtc(crtc, plane_id) {
1385 		if (plane_id == PLANE_CURSOR)
1386 			continue;
1387 
1388 		data_rate += crtc_state->rel_data_rate[plane_id];
1389 
1390 		if (DISPLAY_VER(i915) < 11)
1391 			data_rate += crtc_state->rel_data_rate_y[plane_id];
1392 	}
1393 
1394 	return data_rate;
1395 }
1396 
1397 static const struct skl_wm_level *
1398 skl_plane_wm_level(const struct skl_pipe_wm *pipe_wm,
1399 		   enum plane_id plane_id,
1400 		   int level)
1401 {
1402 	const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
1403 
1404 	if (level == 0 && pipe_wm->use_sagv_wm)
1405 		return &wm->sagv.wm0;
1406 
1407 	return &wm->wm[level];
1408 }
1409 
1410 static const struct skl_wm_level *
1411 skl_plane_trans_wm(const struct skl_pipe_wm *pipe_wm,
1412 		   enum plane_id plane_id)
1413 {
1414 	const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
1415 
1416 	if (pipe_wm->use_sagv_wm)
1417 		return &wm->sagv.trans_wm;
1418 
1419 	return &wm->trans_wm;
1420 }
1421 
1422 /*
1423  * We only disable the watermarks for each plane if
1424  * they exceed the ddb allocation of said plane. This
1425  * is done so that we don't end up touching cursor
1426  * watermarks needlessly when some other plane reduces
1427  * our max possible watermark level.
1428  *
1429  * Bspec has this to say about the PLANE_WM enable bit:
1430  * "All the watermarks at this level for all enabled
1431  *  planes must be enabled before the level will be used."
1432  * So this is actually safe to do.
1433  */
1434 static void
1435 skl_check_wm_level(struct skl_wm_level *wm, const struct skl_ddb_entry *ddb)
1436 {
1437 	if (wm->min_ddb_alloc > skl_ddb_entry_size(ddb))
1438 		memset(wm, 0, sizeof(*wm));
1439 }
1440 
1441 static void
1442 skl_check_nv12_wm_level(struct skl_wm_level *wm, struct skl_wm_level *uv_wm,
1443 			const struct skl_ddb_entry *ddb_y, const struct skl_ddb_entry *ddb)
1444 {
1445 	if (wm->min_ddb_alloc > skl_ddb_entry_size(ddb_y) ||
1446 	    uv_wm->min_ddb_alloc > skl_ddb_entry_size(ddb)) {
1447 		memset(wm, 0, sizeof(*wm));
1448 		memset(uv_wm, 0, sizeof(*uv_wm));
1449 	}
1450 }
1451 
1452 static bool skl_need_wm_copy_wa(struct drm_i915_private *i915, int level,
1453 				const struct skl_plane_wm *wm)
1454 {
1455 	/*
1456 	 * Wa_1408961008:icl, ehl
1457 	 * Wa_14012656716:tgl, adl
1458 	 * Wa_14017887344:icl
1459 	 * Wa_14017868169:adl, tgl
1460 	 * Due to some power saving optimizations, different subsystems
1461 	 * like PSR, might still use even disabled wm level registers,
1462 	 * for "reference", so lets keep at least the values sane.
1463 	 * Considering amount of WA requiring us to do similar things, was
1464 	 * decided to simply do it for all of the platforms, as those wm
1465 	 * levels are disabled, this isn't going to do harm anyway.
1466 	 */
1467 	return level > 0 && !wm->wm[level].enable;
1468 }
1469 
1470 struct skl_plane_ddb_iter {
1471 	u64 data_rate;
1472 	u16 start, size;
1473 };
1474 
1475 static void
1476 skl_allocate_plane_ddb(struct skl_plane_ddb_iter *iter,
1477 		       struct skl_ddb_entry *ddb,
1478 		       const struct skl_wm_level *wm,
1479 		       u64 data_rate)
1480 {
1481 	u16 size, extra = 0;
1482 
1483 	if (data_rate) {
1484 		extra = min_t(u16, iter->size,
1485 			      DIV64_U64_ROUND_UP(iter->size * data_rate,
1486 						 iter->data_rate));
1487 		iter->size -= extra;
1488 		iter->data_rate -= data_rate;
1489 	}
1490 
1491 	/*
1492 	 * Keep ddb entry of all disabled planes explicitly zeroed
1493 	 * to avoid skl_ddb_add_affected_planes() adding them to
1494 	 * the state when other planes change their allocations.
1495 	 */
1496 	size = wm->min_ddb_alloc + extra;
1497 	if (size)
1498 		iter->start = skl_ddb_entry_init(ddb, iter->start,
1499 						 iter->start + size);
1500 }
1501 
1502 static int
1503 skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
1504 			    struct intel_crtc *crtc)
1505 {
1506 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
1507 	struct intel_crtc_state *crtc_state =
1508 		intel_atomic_get_new_crtc_state(state, crtc);
1509 	const struct intel_dbuf_state *dbuf_state =
1510 		intel_atomic_get_new_dbuf_state(state);
1511 	const struct skl_ddb_entry *alloc = &dbuf_state->ddb[crtc->pipe];
1512 	int num_active = hweight8(dbuf_state->active_pipes);
1513 	struct skl_plane_ddb_iter iter;
1514 	enum plane_id plane_id;
1515 	u16 cursor_size;
1516 	u32 blocks;
1517 	int level;
1518 
1519 	/* Clear the partitioning for disabled planes. */
1520 	memset(crtc_state->wm.skl.plane_ddb, 0, sizeof(crtc_state->wm.skl.plane_ddb));
1521 	memset(crtc_state->wm.skl.plane_ddb_y, 0, sizeof(crtc_state->wm.skl.plane_ddb_y));
1522 
1523 	if (!crtc_state->hw.active)
1524 		return 0;
1525 
1526 	iter.start = alloc->start;
1527 	iter.size = skl_ddb_entry_size(alloc);
1528 	if (iter.size == 0)
1529 		return 0;
1530 
1531 	/* Allocate fixed number of blocks for cursor. */
1532 	cursor_size = skl_cursor_allocation(crtc_state, num_active);
1533 	iter.size -= cursor_size;
1534 	skl_ddb_entry_init(&crtc_state->wm.skl.plane_ddb[PLANE_CURSOR],
1535 			   alloc->end - cursor_size, alloc->end);
1536 
1537 	iter.data_rate = skl_total_relative_data_rate(crtc_state);
1538 
1539 	/*
1540 	 * Find the highest watermark level for which we can satisfy the block
1541 	 * requirement of active planes.
1542 	 */
1543 	for (level = i915->display.wm.num_levels - 1; level >= 0; level--) {
1544 		blocks = 0;
1545 		for_each_plane_id_on_crtc(crtc, plane_id) {
1546 			const struct skl_plane_wm *wm =
1547 				&crtc_state->wm.skl.optimal.planes[plane_id];
1548 
1549 			if (plane_id == PLANE_CURSOR) {
1550 				const struct skl_ddb_entry *ddb =
1551 					&crtc_state->wm.skl.plane_ddb[plane_id];
1552 
1553 				if (wm->wm[level].min_ddb_alloc > skl_ddb_entry_size(ddb)) {
1554 					drm_WARN_ON(&i915->drm,
1555 						    wm->wm[level].min_ddb_alloc != U16_MAX);
1556 					blocks = U32_MAX;
1557 					break;
1558 				}
1559 				continue;
1560 			}
1561 
1562 			blocks += wm->wm[level].min_ddb_alloc;
1563 			blocks += wm->uv_wm[level].min_ddb_alloc;
1564 		}
1565 
1566 		if (blocks <= iter.size) {
1567 			iter.size -= blocks;
1568 			break;
1569 		}
1570 	}
1571 
1572 	if (level < 0) {
1573 		drm_dbg_kms(&i915->drm,
1574 			    "Requested display configuration exceeds system DDB limitations");
1575 		drm_dbg_kms(&i915->drm, "minimum required %d/%d\n",
1576 			    blocks, iter.size);
1577 		return -EINVAL;
1578 	}
1579 
1580 	/* avoid the WARN later when we don't allocate any extra DDB */
1581 	if (iter.data_rate == 0)
1582 		iter.size = 0;
1583 
1584 	/*
1585 	 * Grant each plane the blocks it requires at the highest achievable
1586 	 * watermark level, plus an extra share of the leftover blocks
1587 	 * proportional to its relative data rate.
1588 	 */
1589 	for_each_plane_id_on_crtc(crtc, plane_id) {
1590 		struct skl_ddb_entry *ddb =
1591 			&crtc_state->wm.skl.plane_ddb[plane_id];
1592 		struct skl_ddb_entry *ddb_y =
1593 			&crtc_state->wm.skl.plane_ddb_y[plane_id];
1594 		const struct skl_plane_wm *wm =
1595 			&crtc_state->wm.skl.optimal.planes[plane_id];
1596 
1597 		if (plane_id == PLANE_CURSOR)
1598 			continue;
1599 
1600 		if (DISPLAY_VER(i915) < 11 &&
1601 		    crtc_state->nv12_planes & BIT(plane_id)) {
1602 			skl_allocate_plane_ddb(&iter, ddb_y, &wm->wm[level],
1603 					       crtc_state->rel_data_rate_y[plane_id]);
1604 			skl_allocate_plane_ddb(&iter, ddb, &wm->uv_wm[level],
1605 					       crtc_state->rel_data_rate[plane_id]);
1606 		} else {
1607 			skl_allocate_plane_ddb(&iter, ddb, &wm->wm[level],
1608 					       crtc_state->rel_data_rate[plane_id]);
1609 		}
1610 	}
1611 	drm_WARN_ON(&i915->drm, iter.size != 0 || iter.data_rate != 0);
1612 
1613 	/*
1614 	 * When we calculated watermark values we didn't know how high
1615 	 * of a level we'd actually be able to hit, so we just marked
1616 	 * all levels as "enabled."  Go back now and disable the ones
1617 	 * that aren't actually possible.
1618 	 */
1619 	for (level++; level < i915->display.wm.num_levels; level++) {
1620 		for_each_plane_id_on_crtc(crtc, plane_id) {
1621 			const struct skl_ddb_entry *ddb =
1622 				&crtc_state->wm.skl.plane_ddb[plane_id];
1623 			const struct skl_ddb_entry *ddb_y =
1624 				&crtc_state->wm.skl.plane_ddb_y[plane_id];
1625 			struct skl_plane_wm *wm =
1626 				&crtc_state->wm.skl.optimal.planes[plane_id];
1627 
1628 			if (DISPLAY_VER(i915) < 11 &&
1629 			    crtc_state->nv12_planes & BIT(plane_id))
1630 				skl_check_nv12_wm_level(&wm->wm[level],
1631 							&wm->uv_wm[level],
1632 							ddb_y, ddb);
1633 			else
1634 				skl_check_wm_level(&wm->wm[level], ddb);
1635 
1636 			if (skl_need_wm_copy_wa(i915, level, wm)) {
1637 				wm->wm[level].blocks = wm->wm[level - 1].blocks;
1638 				wm->wm[level].lines = wm->wm[level - 1].lines;
1639 				wm->wm[level].ignore_lines = wm->wm[level - 1].ignore_lines;
1640 			}
1641 		}
1642 	}
1643 
1644 	/*
1645 	 * Go back and disable the transition and SAGV watermarks
1646 	 * if it turns out we don't have enough DDB blocks for them.
1647 	 */
1648 	for_each_plane_id_on_crtc(crtc, plane_id) {
1649 		const struct skl_ddb_entry *ddb =
1650 			&crtc_state->wm.skl.plane_ddb[plane_id];
1651 		const struct skl_ddb_entry *ddb_y =
1652 			&crtc_state->wm.skl.plane_ddb_y[plane_id];
1653 		struct skl_plane_wm *wm =
1654 			&crtc_state->wm.skl.optimal.planes[plane_id];
1655 
1656 		if (DISPLAY_VER(i915) < 11 &&
1657 		    crtc_state->nv12_planes & BIT(plane_id)) {
1658 			skl_check_wm_level(&wm->trans_wm, ddb_y);
1659 		} else {
1660 			WARN_ON(skl_ddb_entry_size(ddb_y));
1661 
1662 			skl_check_wm_level(&wm->trans_wm, ddb);
1663 		}
1664 
1665 		skl_check_wm_level(&wm->sagv.wm0, ddb);
1666 		skl_check_wm_level(&wm->sagv.trans_wm, ddb);
1667 	}
1668 
1669 	return 0;
1670 }
1671 
1672 /*
1673  * The max latency should be 257 (max the punit can code is 255 and we add 2us
1674  * for the read latency) and cpp should always be <= 8, so that
1675  * should allow pixel_rate up to ~2 GHz which seems sufficient since max
1676  * 2xcdclk is 1350 MHz and the pixel rate should never exceed that.
1677  */
1678 static uint_fixed_16_16_t
1679 skl_wm_method1(const struct drm_i915_private *i915, u32 pixel_rate,
1680 	       u8 cpp, u32 latency, u32 dbuf_block_size)
1681 {
1682 	u32 wm_intermediate_val;
1683 	uint_fixed_16_16_t ret;
1684 
1685 	if (latency == 0)
1686 		return FP_16_16_MAX;
1687 
1688 	wm_intermediate_val = latency * pixel_rate * cpp;
1689 	ret = div_fixed16(wm_intermediate_val, 1000 * dbuf_block_size);
1690 
1691 	if (DISPLAY_VER(i915) >= 10)
1692 		ret = add_fixed16_u32(ret, 1);
1693 
1694 	return ret;
1695 }
1696 
1697 static uint_fixed_16_16_t
1698 skl_wm_method2(u32 pixel_rate, u32 pipe_htotal, u32 latency,
1699 	       uint_fixed_16_16_t plane_blocks_per_line)
1700 {
1701 	u32 wm_intermediate_val;
1702 	uint_fixed_16_16_t ret;
1703 
1704 	if (latency == 0)
1705 		return FP_16_16_MAX;
1706 
1707 	wm_intermediate_val = latency * pixel_rate;
1708 	wm_intermediate_val = DIV_ROUND_UP(wm_intermediate_val,
1709 					   pipe_htotal * 1000);
1710 	ret = mul_u32_fixed16(wm_intermediate_val, plane_blocks_per_line);
1711 	return ret;
1712 }
1713 
1714 static uint_fixed_16_16_t
1715 intel_get_linetime_us(const struct intel_crtc_state *crtc_state)
1716 {
1717 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1718 	u32 pixel_rate;
1719 	u32 crtc_htotal;
1720 	uint_fixed_16_16_t linetime_us;
1721 
1722 	if (!crtc_state->hw.active)
1723 		return u32_to_fixed16(0);
1724 
1725 	pixel_rate = crtc_state->pixel_rate;
1726 
1727 	if (drm_WARN_ON(&i915->drm, pixel_rate == 0))
1728 		return u32_to_fixed16(0);
1729 
1730 	crtc_htotal = crtc_state->hw.pipe_mode.crtc_htotal;
1731 	linetime_us = div_fixed16(crtc_htotal * 1000, pixel_rate);
1732 
1733 	return linetime_us;
1734 }
1735 
1736 static int
1737 skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
1738 		      int width, const struct drm_format_info *format,
1739 		      u64 modifier, unsigned int rotation,
1740 		      u32 plane_pixel_rate, struct skl_wm_params *wp,
1741 		      int color_plane)
1742 {
1743 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1744 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
1745 	u32 interm_pbpl;
1746 
1747 	/* only planar format has two planes */
1748 	if (color_plane == 1 &&
1749 	    !intel_format_info_is_yuv_semiplanar(format, modifier)) {
1750 		drm_dbg_kms(&i915->drm,
1751 			    "Non planar format have single plane\n");
1752 		return -EINVAL;
1753 	}
1754 
1755 	wp->x_tiled = modifier == I915_FORMAT_MOD_X_TILED;
1756 	wp->y_tiled = modifier != I915_FORMAT_MOD_X_TILED &&
1757 		intel_fb_is_tiled_modifier(modifier);
1758 	wp->rc_surface = intel_fb_is_ccs_modifier(modifier);
1759 	wp->is_planar = intel_format_info_is_yuv_semiplanar(format, modifier);
1760 
1761 	wp->width = width;
1762 	if (color_plane == 1 && wp->is_planar)
1763 		wp->width /= 2;
1764 
1765 	wp->cpp = format->cpp[color_plane];
1766 	wp->plane_pixel_rate = plane_pixel_rate;
1767 
1768 	if (DISPLAY_VER(i915) >= 11 &&
1769 	    modifier == I915_FORMAT_MOD_Yf_TILED  && wp->cpp == 1)
1770 		wp->dbuf_block_size = 256;
1771 	else
1772 		wp->dbuf_block_size = 512;
1773 
1774 	if (drm_rotation_90_or_270(rotation)) {
1775 		switch (wp->cpp) {
1776 		case 1:
1777 			wp->y_min_scanlines = 16;
1778 			break;
1779 		case 2:
1780 			wp->y_min_scanlines = 8;
1781 			break;
1782 		case 4:
1783 			wp->y_min_scanlines = 4;
1784 			break;
1785 		default:
1786 			MISSING_CASE(wp->cpp);
1787 			return -EINVAL;
1788 		}
1789 	} else {
1790 		wp->y_min_scanlines = 4;
1791 	}
1792 
1793 	if (skl_needs_memory_bw_wa(i915))
1794 		wp->y_min_scanlines *= 2;
1795 
1796 	wp->plane_bytes_per_line = wp->width * wp->cpp;
1797 	if (wp->y_tiled) {
1798 		interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line *
1799 					   wp->y_min_scanlines,
1800 					   wp->dbuf_block_size);
1801 
1802 		if (DISPLAY_VER(i915) >= 10)
1803 			interm_pbpl++;
1804 
1805 		wp->plane_blocks_per_line = div_fixed16(interm_pbpl,
1806 							wp->y_min_scanlines);
1807 	} else {
1808 		interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line,
1809 					   wp->dbuf_block_size);
1810 
1811 		if (!wp->x_tiled || DISPLAY_VER(i915) >= 10)
1812 			interm_pbpl++;
1813 
1814 		wp->plane_blocks_per_line = u32_to_fixed16(interm_pbpl);
1815 	}
1816 
1817 	wp->y_tile_minimum = mul_u32_fixed16(wp->y_min_scanlines,
1818 					     wp->plane_blocks_per_line);
1819 
1820 	wp->linetime_us = fixed16_to_u32_round_up(intel_get_linetime_us(crtc_state));
1821 
1822 	return 0;
1823 }
1824 
1825 static int
1826 skl_compute_plane_wm_params(const struct intel_crtc_state *crtc_state,
1827 			    const struct intel_plane_state *plane_state,
1828 			    struct skl_wm_params *wp, int color_plane)
1829 {
1830 	const struct drm_framebuffer *fb = plane_state->hw.fb;
1831 	int width;
1832 
1833 	/*
1834 	 * Src coordinates are already rotated by 270 degrees for
1835 	 * the 90/270 degree plane rotation cases (to match the
1836 	 * GTT mapping), hence no need to account for rotation here.
1837 	 */
1838 	width = drm_rect_width(&plane_state->uapi.src) >> 16;
1839 
1840 	return skl_compute_wm_params(crtc_state, width,
1841 				     fb->format, fb->modifier,
1842 				     plane_state->hw.rotation,
1843 				     intel_plane_pixel_rate(crtc_state, plane_state),
1844 				     wp, color_plane);
1845 }
1846 
1847 static bool skl_wm_has_lines(struct drm_i915_private *i915, int level)
1848 {
1849 	if (DISPLAY_VER(i915) >= 10)
1850 		return true;
1851 
1852 	/* The number of lines are ignored for the level 0 watermark. */
1853 	return level > 0;
1854 }
1855 
1856 static int skl_wm_max_lines(struct drm_i915_private *i915)
1857 {
1858 	if (DISPLAY_VER(i915) >= 13)
1859 		return 255;
1860 	else
1861 		return 31;
1862 }
1863 
1864 static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
1865 				 struct intel_plane *plane,
1866 				 int level,
1867 				 unsigned int latency,
1868 				 const struct skl_wm_params *wp,
1869 				 const struct skl_wm_level *result_prev,
1870 				 struct skl_wm_level *result /* out */)
1871 {
1872 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1873 	uint_fixed_16_16_t method1, method2;
1874 	uint_fixed_16_16_t selected_result;
1875 	u32 blocks, lines, min_ddb_alloc = 0;
1876 
1877 	if (latency == 0 ||
1878 	    (use_minimal_wm0_only(crtc_state, plane) && level > 0)) {
1879 		/* reject it */
1880 		result->min_ddb_alloc = U16_MAX;
1881 		return;
1882 	}
1883 
1884 	method1 = skl_wm_method1(i915, wp->plane_pixel_rate,
1885 				 wp->cpp, latency, wp->dbuf_block_size);
1886 	method2 = skl_wm_method2(wp->plane_pixel_rate,
1887 				 crtc_state->hw.pipe_mode.crtc_htotal,
1888 				 latency,
1889 				 wp->plane_blocks_per_line);
1890 
1891 	if (wp->y_tiled) {
1892 		selected_result = max_fixed16(method2, wp->y_tile_minimum);
1893 	} else {
1894 		if ((wp->cpp * crtc_state->hw.pipe_mode.crtc_htotal /
1895 		     wp->dbuf_block_size < 1) &&
1896 		     (wp->plane_bytes_per_line / wp->dbuf_block_size < 1)) {
1897 			selected_result = method2;
1898 		} else if (latency >= wp->linetime_us) {
1899 			if (DISPLAY_VER(i915) == 9)
1900 				selected_result = min_fixed16(method1, method2);
1901 			else
1902 				selected_result = method2;
1903 		} else {
1904 			selected_result = method1;
1905 		}
1906 	}
1907 
1908 	blocks = fixed16_to_u32_round_up(selected_result) + 1;
1909 	/*
1910 	 * Lets have blocks at minimum equivalent to plane_blocks_per_line
1911 	 * as there will be at minimum one line for lines configuration. This
1912 	 * is a work around for FIFO underruns observed with resolutions like
1913 	 * 4k 60 Hz in single channel DRAM configurations.
1914 	 *
1915 	 * As per the Bspec 49325, if the ddb allocation can hold at least
1916 	 * one plane_blocks_per_line, we should have selected method2 in
1917 	 * the above logic. Assuming that modern versions have enough dbuf
1918 	 * and method2 guarantees blocks equivalent to at least 1 line,
1919 	 * select the blocks as plane_blocks_per_line.
1920 	 *
1921 	 * TODO: Revisit the logic when we have better understanding on DRAM
1922 	 * channels' impact on the level 0 memory latency and the relevant
1923 	 * wm calculations.
1924 	 */
1925 	if (skl_wm_has_lines(i915, level))
1926 		blocks = max(blocks,
1927 			     fixed16_to_u32_round_up(wp->plane_blocks_per_line));
1928 	lines = div_round_up_fixed16(selected_result,
1929 				     wp->plane_blocks_per_line);
1930 
1931 	if (DISPLAY_VER(i915) == 9) {
1932 		/* Display WA #1125: skl,bxt,kbl */
1933 		if (level == 0 && wp->rc_surface)
1934 			blocks += fixed16_to_u32_round_up(wp->y_tile_minimum);
1935 
1936 		/* Display WA #1126: skl,bxt,kbl */
1937 		if (level >= 1 && level <= 7) {
1938 			if (wp->y_tiled) {
1939 				blocks += fixed16_to_u32_round_up(wp->y_tile_minimum);
1940 				lines += wp->y_min_scanlines;
1941 			} else {
1942 				blocks++;
1943 			}
1944 
1945 			/*
1946 			 * Make sure result blocks for higher latency levels are
1947 			 * at least as high as level below the current level.
1948 			 * Assumption in DDB algorithm optimization for special
1949 			 * cases. Also covers Display WA #1125 for RC.
1950 			 */
1951 			if (result_prev->blocks > blocks)
1952 				blocks = result_prev->blocks;
1953 		}
1954 	}
1955 
1956 	if (DISPLAY_VER(i915) >= 11) {
1957 		if (wp->y_tiled) {
1958 			int extra_lines;
1959 
1960 			if (lines % wp->y_min_scanlines == 0)
1961 				extra_lines = wp->y_min_scanlines;
1962 			else
1963 				extra_lines = wp->y_min_scanlines * 2 -
1964 					lines % wp->y_min_scanlines;
1965 
1966 			min_ddb_alloc = mul_round_up_u32_fixed16(lines + extra_lines,
1967 								 wp->plane_blocks_per_line);
1968 		} else {
1969 			min_ddb_alloc = blocks + DIV_ROUND_UP(blocks, 10);
1970 		}
1971 	}
1972 
1973 	if (!skl_wm_has_lines(i915, level))
1974 		lines = 0;
1975 
1976 	if (lines > skl_wm_max_lines(i915)) {
1977 		/* reject it */
1978 		result->min_ddb_alloc = U16_MAX;
1979 		return;
1980 	}
1981 
1982 	/*
1983 	 * If lines is valid, assume we can use this watermark level
1984 	 * for now.  We'll come back and disable it after we calculate the
1985 	 * DDB allocation if it turns out we don't actually have enough
1986 	 * blocks to satisfy it.
1987 	 */
1988 	result->blocks = blocks;
1989 	result->lines = lines;
1990 	/* Bspec says: value >= plane ddb allocation -> invalid, hence the +1 here */
1991 	result->min_ddb_alloc = max(min_ddb_alloc, blocks) + 1;
1992 	result->enable = true;
1993 
1994 	if (DISPLAY_VER(i915) < 12 && i915->display.sagv.block_time_us)
1995 		result->can_sagv = latency >= i915->display.sagv.block_time_us;
1996 }
1997 
1998 static void
1999 skl_compute_wm_levels(const struct intel_crtc_state *crtc_state,
2000 		      struct intel_plane *plane,
2001 		      const struct skl_wm_params *wm_params,
2002 		      struct skl_wm_level *levels)
2003 {
2004 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2005 	struct skl_wm_level *result_prev = &levels[0];
2006 	int level;
2007 
2008 	for (level = 0; level < i915->display.wm.num_levels; level++) {
2009 		struct skl_wm_level *result = &levels[level];
2010 		unsigned int latency = skl_wm_latency(i915, level, wm_params);
2011 
2012 		skl_compute_plane_wm(crtc_state, plane, level, latency,
2013 				     wm_params, result_prev, result);
2014 
2015 		result_prev = result;
2016 	}
2017 }
2018 
2019 static void tgl_compute_sagv_wm(const struct intel_crtc_state *crtc_state,
2020 				struct intel_plane *plane,
2021 				const struct skl_wm_params *wm_params,
2022 				struct skl_plane_wm *plane_wm)
2023 {
2024 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2025 	struct skl_wm_level *sagv_wm = &plane_wm->sagv.wm0;
2026 	struct skl_wm_level *levels = plane_wm->wm;
2027 	unsigned int latency = 0;
2028 
2029 	if (i915->display.sagv.block_time_us)
2030 		latency = i915->display.sagv.block_time_us +
2031 			skl_wm_latency(i915, 0, wm_params);
2032 
2033 	skl_compute_plane_wm(crtc_state, plane, 0, latency,
2034 			     wm_params, &levels[0],
2035 			     sagv_wm);
2036 }
2037 
2038 static void skl_compute_transition_wm(struct drm_i915_private *i915,
2039 				      struct skl_wm_level *trans_wm,
2040 				      const struct skl_wm_level *wm0,
2041 				      const struct skl_wm_params *wp)
2042 {
2043 	u16 trans_min, trans_amount, trans_y_tile_min;
2044 	u16 wm0_blocks, trans_offset, blocks;
2045 
2046 	/* Transition WM don't make any sense if ipc is disabled */
2047 	if (!skl_watermark_ipc_enabled(i915))
2048 		return;
2049 
2050 	/*
2051 	 * WaDisableTWM:skl,kbl,cfl,bxt
2052 	 * Transition WM are not recommended by HW team for GEN9
2053 	 */
2054 	if (DISPLAY_VER(i915) == 9)
2055 		return;
2056 
2057 	if (DISPLAY_VER(i915) >= 11)
2058 		trans_min = 4;
2059 	else
2060 		trans_min = 14;
2061 
2062 	/* Display WA #1140: glk,cnl */
2063 	if (DISPLAY_VER(i915) == 10)
2064 		trans_amount = 0;
2065 	else
2066 		trans_amount = 10; /* This is configurable amount */
2067 
2068 	trans_offset = trans_min + trans_amount;
2069 
2070 	/*
2071 	 * The spec asks for Selected Result Blocks for wm0 (the real value),
2072 	 * not Result Blocks (the integer value). Pay attention to the capital
2073 	 * letters. The value wm_l0->blocks is actually Result Blocks, but
2074 	 * since Result Blocks is the ceiling of Selected Result Blocks plus 1,
2075 	 * and since we later will have to get the ceiling of the sum in the
2076 	 * transition watermarks calculation, we can just pretend Selected
2077 	 * Result Blocks is Result Blocks minus 1 and it should work for the
2078 	 * current platforms.
2079 	 */
2080 	wm0_blocks = wm0->blocks - 1;
2081 
2082 	if (wp->y_tiled) {
2083 		trans_y_tile_min =
2084 			(u16)mul_round_up_u32_fixed16(2, wp->y_tile_minimum);
2085 		blocks = max(wm0_blocks, trans_y_tile_min) + trans_offset;
2086 	} else {
2087 		blocks = wm0_blocks + trans_offset;
2088 	}
2089 	blocks++;
2090 
2091 	/*
2092 	 * Just assume we can enable the transition watermark.  After
2093 	 * computing the DDB we'll come back and disable it if that
2094 	 * assumption turns out to be false.
2095 	 */
2096 	trans_wm->blocks = blocks;
2097 	trans_wm->min_ddb_alloc = max_t(u16, wm0->min_ddb_alloc, blocks + 1);
2098 	trans_wm->enable = true;
2099 }
2100 
2101 static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state,
2102 				     const struct intel_plane_state *plane_state,
2103 				     struct intel_plane *plane, int color_plane)
2104 {
2105 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2106 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
2107 	struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane->id];
2108 	struct skl_wm_params wm_params;
2109 	int ret;
2110 
2111 	ret = skl_compute_plane_wm_params(crtc_state, plane_state,
2112 					  &wm_params, color_plane);
2113 	if (ret)
2114 		return ret;
2115 
2116 	skl_compute_wm_levels(crtc_state, plane, &wm_params, wm->wm);
2117 
2118 	skl_compute_transition_wm(i915, &wm->trans_wm,
2119 				  &wm->wm[0], &wm_params);
2120 
2121 	if (DISPLAY_VER(i915) >= 12) {
2122 		tgl_compute_sagv_wm(crtc_state, plane, &wm_params, wm);
2123 
2124 		skl_compute_transition_wm(i915, &wm->sagv.trans_wm,
2125 					  &wm->sagv.wm0, &wm_params);
2126 	}
2127 
2128 	return 0;
2129 }
2130 
2131 static int skl_build_plane_wm_uv(struct intel_crtc_state *crtc_state,
2132 				 const struct intel_plane_state *plane_state,
2133 				 struct intel_plane *plane)
2134 {
2135 	struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane->id];
2136 	struct skl_wm_params wm_params;
2137 	int ret;
2138 
2139 	wm->is_planar = true;
2140 
2141 	/* uv plane watermarks must also be validated for NV12/Planar */
2142 	ret = skl_compute_plane_wm_params(crtc_state, plane_state,
2143 					  &wm_params, 1);
2144 	if (ret)
2145 		return ret;
2146 
2147 	skl_compute_wm_levels(crtc_state, plane, &wm_params, wm->uv_wm);
2148 
2149 	return 0;
2150 }
2151 
2152 static int skl_build_plane_wm(struct intel_crtc_state *crtc_state,
2153 			      const struct intel_plane_state *plane_state)
2154 {
2155 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
2156 	enum plane_id plane_id = plane->id;
2157 	struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane_id];
2158 	const struct drm_framebuffer *fb = plane_state->hw.fb;
2159 	int ret;
2160 
2161 	memset(wm, 0, sizeof(*wm));
2162 
2163 	if (!intel_wm_plane_visible(crtc_state, plane_state))
2164 		return 0;
2165 
2166 	ret = skl_build_plane_wm_single(crtc_state, plane_state,
2167 					plane, 0);
2168 	if (ret)
2169 		return ret;
2170 
2171 	if (fb->format->is_yuv && fb->format->num_planes > 1) {
2172 		ret = skl_build_plane_wm_uv(crtc_state, plane_state,
2173 					    plane);
2174 		if (ret)
2175 			return ret;
2176 	}
2177 
2178 	return 0;
2179 }
2180 
2181 static int icl_build_plane_wm(struct intel_crtc_state *crtc_state,
2182 			      const struct intel_plane_state *plane_state)
2183 {
2184 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
2185 	struct drm_i915_private *i915 = to_i915(plane->base.dev);
2186 	enum plane_id plane_id = plane->id;
2187 	struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane_id];
2188 	int ret;
2189 
2190 	/* Watermarks calculated in master */
2191 	if (plane_state->planar_slave)
2192 		return 0;
2193 
2194 	memset(wm, 0, sizeof(*wm));
2195 
2196 	if (plane_state->planar_linked_plane) {
2197 		const struct drm_framebuffer *fb = plane_state->hw.fb;
2198 
2199 		drm_WARN_ON(&i915->drm,
2200 			    !intel_wm_plane_visible(crtc_state, plane_state));
2201 		drm_WARN_ON(&i915->drm, !fb->format->is_yuv ||
2202 			    fb->format->num_planes == 1);
2203 
2204 		ret = skl_build_plane_wm_single(crtc_state, plane_state,
2205 						plane_state->planar_linked_plane, 0);
2206 		if (ret)
2207 			return ret;
2208 
2209 		ret = skl_build_plane_wm_single(crtc_state, plane_state,
2210 						plane, 1);
2211 		if (ret)
2212 			return ret;
2213 	} else if (intel_wm_plane_visible(crtc_state, plane_state)) {
2214 		ret = skl_build_plane_wm_single(crtc_state, plane_state,
2215 						plane, 0);
2216 		if (ret)
2217 			return ret;
2218 	}
2219 
2220 	return 0;
2221 }
2222 
2223 static bool
2224 skl_is_vblank_too_short(const struct intel_crtc_state *crtc_state,
2225 			int wm0_lines, int latency)
2226 {
2227 	const struct drm_display_mode *adjusted_mode =
2228 		&crtc_state->hw.adjusted_mode;
2229 
2230 	/* FIXME missing scaler and DSC pre-fill time */
2231 	return crtc_state->framestart_delay +
2232 		intel_usecs_to_scanlines(adjusted_mode, latency) +
2233 		wm0_lines >
2234 		adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vblank_start;
2235 }
2236 
2237 static int skl_max_wm0_lines(const struct intel_crtc_state *crtc_state)
2238 {
2239 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2240 	enum plane_id plane_id;
2241 	int wm0_lines = 0;
2242 
2243 	for_each_plane_id_on_crtc(crtc, plane_id) {
2244 		const struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id];
2245 
2246 		/* FIXME what about !skl_wm_has_lines() platforms? */
2247 		wm0_lines = max_t(int, wm0_lines, wm->wm[0].lines);
2248 	}
2249 
2250 	return wm0_lines;
2251 }
2252 
2253 static int skl_max_wm_level_for_vblank(struct intel_crtc_state *crtc_state,
2254 				       int wm0_lines)
2255 {
2256 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2257 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
2258 	int level;
2259 
2260 	for (level = i915->display.wm.num_levels - 1; level >= 0; level--) {
2261 		int latency;
2262 
2263 		/* FIXME should we care about the latency w/a's? */
2264 		latency = skl_wm_latency(i915, level, NULL);
2265 		if (latency == 0)
2266 			continue;
2267 
2268 		/* FIXME is it correct to use 0 latency for wm0 here? */
2269 		if (level == 0)
2270 			latency = 0;
2271 
2272 		if (!skl_is_vblank_too_short(crtc_state, wm0_lines, latency))
2273 			return level;
2274 	}
2275 
2276 	return -EINVAL;
2277 }
2278 
2279 static int skl_wm_check_vblank(struct intel_crtc_state *crtc_state)
2280 {
2281 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2282 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
2283 	int wm0_lines, level;
2284 
2285 	if (!crtc_state->hw.active)
2286 		return 0;
2287 
2288 	wm0_lines = skl_max_wm0_lines(crtc_state);
2289 
2290 	level = skl_max_wm_level_for_vblank(crtc_state, wm0_lines);
2291 	if (level < 0)
2292 		return level;
2293 
2294 	/*
2295 	 * PSR needs to toggle LATENCY_REPORTING_REMOVED_PIPE_*
2296 	 * based on whether we're limited by the vblank duration.
2297 	 */
2298 	crtc_state->wm_level_disabled = level < i915->display.wm.num_levels - 1;
2299 
2300 	for (level++; level < i915->display.wm.num_levels; level++) {
2301 		enum plane_id plane_id;
2302 
2303 		for_each_plane_id_on_crtc(crtc, plane_id) {
2304 			struct skl_plane_wm *wm =
2305 				&crtc_state->wm.skl.optimal.planes[plane_id];
2306 
2307 			/*
2308 			 * FIXME just clear enable or flag the entire
2309 			 * thing as bad via min_ddb_alloc=U16_MAX?
2310 			 */
2311 			wm->wm[level].enable = false;
2312 			wm->uv_wm[level].enable = false;
2313 		}
2314 	}
2315 
2316 	if (DISPLAY_VER(i915) >= 12 &&
2317 	    i915->display.sagv.block_time_us &&
2318 	    skl_is_vblank_too_short(crtc_state, wm0_lines,
2319 				    i915->display.sagv.block_time_us)) {
2320 		enum plane_id plane_id;
2321 
2322 		for_each_plane_id_on_crtc(crtc, plane_id) {
2323 			struct skl_plane_wm *wm =
2324 				&crtc_state->wm.skl.optimal.planes[plane_id];
2325 
2326 			wm->sagv.wm0.enable = false;
2327 			wm->sagv.trans_wm.enable = false;
2328 		}
2329 	}
2330 
2331 	return 0;
2332 }
2333 
2334 static int skl_build_pipe_wm(struct intel_atomic_state *state,
2335 			     struct intel_crtc *crtc)
2336 {
2337 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
2338 	struct intel_crtc_state *crtc_state =
2339 		intel_atomic_get_new_crtc_state(state, crtc);
2340 	const struct intel_plane_state *plane_state;
2341 	struct intel_plane *plane;
2342 	int ret, i;
2343 
2344 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
2345 		/*
2346 		 * FIXME should perhaps check {old,new}_plane_crtc->hw.crtc
2347 		 * instead but we don't populate that correctly for NV12 Y
2348 		 * planes so for now hack this.
2349 		 */
2350 		if (plane->pipe != crtc->pipe)
2351 			continue;
2352 
2353 		if (DISPLAY_VER(i915) >= 11)
2354 			ret = icl_build_plane_wm(crtc_state, plane_state);
2355 		else
2356 			ret = skl_build_plane_wm(crtc_state, plane_state);
2357 		if (ret)
2358 			return ret;
2359 	}
2360 
2361 	crtc_state->wm.skl.optimal = crtc_state->wm.skl.raw;
2362 
2363 	return skl_wm_check_vblank(crtc_state);
2364 }
2365 
2366 static void skl_ddb_entry_write(struct drm_i915_private *i915,
2367 				i915_reg_t reg,
2368 				const struct skl_ddb_entry *entry)
2369 {
2370 	if (entry->end)
2371 		intel_de_write_fw(i915, reg,
2372 				  PLANE_BUF_END(entry->end - 1) |
2373 				  PLANE_BUF_START(entry->start));
2374 	else
2375 		intel_de_write_fw(i915, reg, 0);
2376 }
2377 
2378 static void skl_write_wm_level(struct drm_i915_private *i915,
2379 			       i915_reg_t reg,
2380 			       const struct skl_wm_level *level)
2381 {
2382 	u32 val = 0;
2383 
2384 	if (level->enable)
2385 		val |= PLANE_WM_EN;
2386 	if (level->ignore_lines)
2387 		val |= PLANE_WM_IGNORE_LINES;
2388 	val |= REG_FIELD_PREP(PLANE_WM_BLOCKS_MASK, level->blocks);
2389 	val |= REG_FIELD_PREP(PLANE_WM_LINES_MASK, level->lines);
2390 
2391 	intel_de_write_fw(i915, reg, val);
2392 }
2393 
2394 void skl_write_plane_wm(struct intel_plane *plane,
2395 			const struct intel_crtc_state *crtc_state)
2396 {
2397 	struct drm_i915_private *i915 = to_i915(plane->base.dev);
2398 	enum plane_id plane_id = plane->id;
2399 	enum pipe pipe = plane->pipe;
2400 	const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
2401 	const struct skl_ddb_entry *ddb =
2402 		&crtc_state->wm.skl.plane_ddb[plane_id];
2403 	const struct skl_ddb_entry *ddb_y =
2404 		&crtc_state->wm.skl.plane_ddb_y[plane_id];
2405 	int level;
2406 
2407 	for (level = 0; level < i915->display.wm.num_levels; level++)
2408 		skl_write_wm_level(i915, PLANE_WM(pipe, plane_id, level),
2409 				   skl_plane_wm_level(pipe_wm, plane_id, level));
2410 
2411 	skl_write_wm_level(i915, PLANE_WM_TRANS(pipe, plane_id),
2412 			   skl_plane_trans_wm(pipe_wm, plane_id));
2413 
2414 	if (HAS_HW_SAGV_WM(i915)) {
2415 		const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
2416 
2417 		skl_write_wm_level(i915, PLANE_WM_SAGV(pipe, plane_id),
2418 				   &wm->sagv.wm0);
2419 		skl_write_wm_level(i915, PLANE_WM_SAGV_TRANS(pipe, plane_id),
2420 				   &wm->sagv.trans_wm);
2421 	}
2422 
2423 	skl_ddb_entry_write(i915,
2424 			    PLANE_BUF_CFG(pipe, plane_id), ddb);
2425 
2426 	if (DISPLAY_VER(i915) < 11)
2427 		skl_ddb_entry_write(i915,
2428 				    PLANE_NV12_BUF_CFG(pipe, plane_id), ddb_y);
2429 }
2430 
2431 void skl_write_cursor_wm(struct intel_plane *plane,
2432 			 const struct intel_crtc_state *crtc_state)
2433 {
2434 	struct drm_i915_private *i915 = to_i915(plane->base.dev);
2435 	enum plane_id plane_id = plane->id;
2436 	enum pipe pipe = plane->pipe;
2437 	const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
2438 	const struct skl_ddb_entry *ddb =
2439 		&crtc_state->wm.skl.plane_ddb[plane_id];
2440 	int level;
2441 
2442 	for (level = 0; level < i915->display.wm.num_levels; level++)
2443 		skl_write_wm_level(i915, CUR_WM(pipe, level),
2444 				   skl_plane_wm_level(pipe_wm, plane_id, level));
2445 
2446 	skl_write_wm_level(i915, CUR_WM_TRANS(pipe),
2447 			   skl_plane_trans_wm(pipe_wm, plane_id));
2448 
2449 	if (HAS_HW_SAGV_WM(i915)) {
2450 		const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
2451 
2452 		skl_write_wm_level(i915, CUR_WM_SAGV(pipe),
2453 				   &wm->sagv.wm0);
2454 		skl_write_wm_level(i915, CUR_WM_SAGV_TRANS(pipe),
2455 				   &wm->sagv.trans_wm);
2456 	}
2457 
2458 	skl_ddb_entry_write(i915, CUR_BUF_CFG(pipe), ddb);
2459 }
2460 
2461 static bool skl_wm_level_equals(const struct skl_wm_level *l1,
2462 				const struct skl_wm_level *l2)
2463 {
2464 	return l1->enable == l2->enable &&
2465 		l1->ignore_lines == l2->ignore_lines &&
2466 		l1->lines == l2->lines &&
2467 		l1->blocks == l2->blocks;
2468 }
2469 
2470 static bool skl_plane_wm_equals(struct drm_i915_private *i915,
2471 				const struct skl_plane_wm *wm1,
2472 				const struct skl_plane_wm *wm2)
2473 {
2474 	int level;
2475 
2476 	for (level = 0; level < i915->display.wm.num_levels; level++) {
2477 		/*
2478 		 * We don't check uv_wm as the hardware doesn't actually
2479 		 * use it. It only gets used for calculating the required
2480 		 * ddb allocation.
2481 		 */
2482 		if (!skl_wm_level_equals(&wm1->wm[level], &wm2->wm[level]))
2483 			return false;
2484 	}
2485 
2486 	return skl_wm_level_equals(&wm1->trans_wm, &wm2->trans_wm) &&
2487 		skl_wm_level_equals(&wm1->sagv.wm0, &wm2->sagv.wm0) &&
2488 		skl_wm_level_equals(&wm1->sagv.trans_wm, &wm2->sagv.trans_wm);
2489 }
2490 
2491 static bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
2492 				    const struct skl_ddb_entry *b)
2493 {
2494 	return a->start < b->end && b->start < a->end;
2495 }
2496 
2497 static void skl_ddb_entry_union(struct skl_ddb_entry *a,
2498 				const struct skl_ddb_entry *b)
2499 {
2500 	if (a->end && b->end) {
2501 		a->start = min(a->start, b->start);
2502 		a->end = max(a->end, b->end);
2503 	} else if (b->end) {
2504 		a->start = b->start;
2505 		a->end = b->end;
2506 	}
2507 }
2508 
2509 bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
2510 				 const struct skl_ddb_entry *entries,
2511 				 int num_entries, int ignore_idx)
2512 {
2513 	int i;
2514 
2515 	for (i = 0; i < num_entries; i++) {
2516 		if (i != ignore_idx &&
2517 		    skl_ddb_entries_overlap(ddb, &entries[i]))
2518 			return true;
2519 	}
2520 
2521 	return false;
2522 }
2523 
2524 static int
2525 skl_ddb_add_affected_planes(const struct intel_crtc_state *old_crtc_state,
2526 			    struct intel_crtc_state *new_crtc_state)
2527 {
2528 	struct intel_atomic_state *state = to_intel_atomic_state(new_crtc_state->uapi.state);
2529 	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
2530 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
2531 	struct intel_plane *plane;
2532 
2533 	for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
2534 		struct intel_plane_state *plane_state;
2535 		enum plane_id plane_id = plane->id;
2536 
2537 		if (skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb[plane_id],
2538 					&new_crtc_state->wm.skl.plane_ddb[plane_id]) &&
2539 		    skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb_y[plane_id],
2540 					&new_crtc_state->wm.skl.plane_ddb_y[plane_id]))
2541 			continue;
2542 
2543 		plane_state = intel_atomic_get_plane_state(state, plane);
2544 		if (IS_ERR(plane_state))
2545 			return PTR_ERR(plane_state);
2546 
2547 		new_crtc_state->update_planes |= BIT(plane_id);
2548 		new_crtc_state->async_flip_planes = 0;
2549 		new_crtc_state->do_async_flip = false;
2550 	}
2551 
2552 	return 0;
2553 }
2554 
2555 static u8 intel_dbuf_enabled_slices(const struct intel_dbuf_state *dbuf_state)
2556 {
2557 	struct drm_i915_private *i915 = to_i915(dbuf_state->base.state->base.dev);
2558 	u8 enabled_slices;
2559 	enum pipe pipe;
2560 
2561 	/*
2562 	 * FIXME: For now we always enable slice S1 as per
2563 	 * the Bspec display initialization sequence.
2564 	 */
2565 	enabled_slices = BIT(DBUF_S1);
2566 
2567 	for_each_pipe(i915, pipe)
2568 		enabled_slices |= dbuf_state->slices[pipe];
2569 
2570 	return enabled_slices;
2571 }
2572 
2573 static int
2574 skl_compute_ddb(struct intel_atomic_state *state)
2575 {
2576 	struct drm_i915_private *i915 = to_i915(state->base.dev);
2577 	const struct intel_dbuf_state *old_dbuf_state;
2578 	struct intel_dbuf_state *new_dbuf_state = NULL;
2579 	const struct intel_crtc_state *old_crtc_state;
2580 	struct intel_crtc_state *new_crtc_state;
2581 	struct intel_crtc *crtc;
2582 	int ret, i;
2583 
2584 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
2585 		new_dbuf_state = intel_atomic_get_dbuf_state(state);
2586 		if (IS_ERR(new_dbuf_state))
2587 			return PTR_ERR(new_dbuf_state);
2588 
2589 		old_dbuf_state = intel_atomic_get_old_dbuf_state(state);
2590 		break;
2591 	}
2592 
2593 	if (!new_dbuf_state)
2594 		return 0;
2595 
2596 	new_dbuf_state->active_pipes =
2597 		intel_calc_active_pipes(state, old_dbuf_state->active_pipes);
2598 
2599 	if (old_dbuf_state->active_pipes != new_dbuf_state->active_pipes) {
2600 		ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
2601 		if (ret)
2602 			return ret;
2603 	}
2604 
2605 	if (HAS_MBUS_JOINING(i915)) {
2606 		new_dbuf_state->joined_mbus =
2607 			adlp_check_mbus_joined(new_dbuf_state->active_pipes);
2608 
2609 		if (old_dbuf_state->joined_mbus != new_dbuf_state->joined_mbus) {
2610 			ret = intel_cdclk_state_set_joined_mbus(state, new_dbuf_state->joined_mbus);
2611 			if (ret)
2612 				return ret;
2613 		}
2614 	}
2615 
2616 	for_each_intel_crtc(&i915->drm, crtc) {
2617 		enum pipe pipe = crtc->pipe;
2618 
2619 		new_dbuf_state->slices[pipe] =
2620 			skl_compute_dbuf_slices(crtc, new_dbuf_state->active_pipes,
2621 						new_dbuf_state->joined_mbus);
2622 
2623 		if (old_dbuf_state->slices[pipe] == new_dbuf_state->slices[pipe])
2624 			continue;
2625 
2626 		ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
2627 		if (ret)
2628 			return ret;
2629 	}
2630 
2631 	new_dbuf_state->enabled_slices = intel_dbuf_enabled_slices(new_dbuf_state);
2632 
2633 	if (old_dbuf_state->enabled_slices != new_dbuf_state->enabled_slices ||
2634 	    old_dbuf_state->joined_mbus != new_dbuf_state->joined_mbus) {
2635 		ret = intel_atomic_serialize_global_state(&new_dbuf_state->base);
2636 		if (ret)
2637 			return ret;
2638 
2639 		drm_dbg_kms(&i915->drm,
2640 			    "Enabled dbuf slices 0x%x -> 0x%x (total dbuf slices 0x%x), mbus joined? %s->%s\n",
2641 			    old_dbuf_state->enabled_slices,
2642 			    new_dbuf_state->enabled_slices,
2643 			    DISPLAY_INFO(i915)->dbuf.slice_mask,
2644 			    str_yes_no(old_dbuf_state->joined_mbus),
2645 			    str_yes_no(new_dbuf_state->joined_mbus));
2646 	}
2647 
2648 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
2649 		enum pipe pipe = crtc->pipe;
2650 
2651 		new_dbuf_state->weight[pipe] = intel_crtc_ddb_weight(new_crtc_state);
2652 
2653 		if (old_dbuf_state->weight[pipe] == new_dbuf_state->weight[pipe])
2654 			continue;
2655 
2656 		ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
2657 		if (ret)
2658 			return ret;
2659 	}
2660 
2661 	for_each_intel_crtc(&i915->drm, crtc) {
2662 		ret = skl_crtc_allocate_ddb(state, crtc);
2663 		if (ret)
2664 			return ret;
2665 	}
2666 
2667 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
2668 					    new_crtc_state, i) {
2669 		ret = skl_crtc_allocate_plane_ddb(state, crtc);
2670 		if (ret)
2671 			return ret;
2672 
2673 		ret = skl_ddb_add_affected_planes(old_crtc_state,
2674 						  new_crtc_state);
2675 		if (ret)
2676 			return ret;
2677 	}
2678 
2679 	return 0;
2680 }
2681 
2682 static char enast(bool enable)
2683 {
2684 	return enable ? '*' : ' ';
2685 }
2686 
2687 static void
2688 skl_print_wm_changes(struct intel_atomic_state *state)
2689 {
2690 	struct drm_i915_private *i915 = to_i915(state->base.dev);
2691 	const struct intel_crtc_state *old_crtc_state;
2692 	const struct intel_crtc_state *new_crtc_state;
2693 	struct intel_plane *plane;
2694 	struct intel_crtc *crtc;
2695 	int i;
2696 
2697 	if (!drm_debug_enabled(DRM_UT_KMS))
2698 		return;
2699 
2700 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
2701 					    new_crtc_state, i) {
2702 		const struct skl_pipe_wm *old_pipe_wm, *new_pipe_wm;
2703 
2704 		old_pipe_wm = &old_crtc_state->wm.skl.optimal;
2705 		new_pipe_wm = &new_crtc_state->wm.skl.optimal;
2706 
2707 		for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
2708 			enum plane_id plane_id = plane->id;
2709 			const struct skl_ddb_entry *old, *new;
2710 
2711 			old = &old_crtc_state->wm.skl.plane_ddb[plane_id];
2712 			new = &new_crtc_state->wm.skl.plane_ddb[plane_id];
2713 
2714 			if (skl_ddb_entry_equal(old, new))
2715 				continue;
2716 
2717 			drm_dbg_kms(&i915->drm,
2718 				    "[PLANE:%d:%s] ddb (%4d - %4d) -> (%4d - %4d), size %4d -> %4d\n",
2719 				    plane->base.base.id, plane->base.name,
2720 				    old->start, old->end, new->start, new->end,
2721 				    skl_ddb_entry_size(old), skl_ddb_entry_size(new));
2722 		}
2723 
2724 		for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
2725 			enum plane_id plane_id = plane->id;
2726 			const struct skl_plane_wm *old_wm, *new_wm;
2727 
2728 			old_wm = &old_pipe_wm->planes[plane_id];
2729 			new_wm = &new_pipe_wm->planes[plane_id];
2730 
2731 			if (skl_plane_wm_equals(i915, old_wm, new_wm))
2732 				continue;
2733 
2734 			drm_dbg_kms(&i915->drm,
2735 				    "[PLANE:%d:%s]   level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm"
2736 				    " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm\n",
2737 				    plane->base.base.id, plane->base.name,
2738 				    enast(old_wm->wm[0].enable), enast(old_wm->wm[1].enable),
2739 				    enast(old_wm->wm[2].enable), enast(old_wm->wm[3].enable),
2740 				    enast(old_wm->wm[4].enable), enast(old_wm->wm[5].enable),
2741 				    enast(old_wm->wm[6].enable), enast(old_wm->wm[7].enable),
2742 				    enast(old_wm->trans_wm.enable),
2743 				    enast(old_wm->sagv.wm0.enable),
2744 				    enast(old_wm->sagv.trans_wm.enable),
2745 				    enast(new_wm->wm[0].enable), enast(new_wm->wm[1].enable),
2746 				    enast(new_wm->wm[2].enable), enast(new_wm->wm[3].enable),
2747 				    enast(new_wm->wm[4].enable), enast(new_wm->wm[5].enable),
2748 				    enast(new_wm->wm[6].enable), enast(new_wm->wm[7].enable),
2749 				    enast(new_wm->trans_wm.enable),
2750 				    enast(new_wm->sagv.wm0.enable),
2751 				    enast(new_wm->sagv.trans_wm.enable));
2752 
2753 			drm_dbg_kms(&i915->drm,
2754 				    "[PLANE:%d:%s]   lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d"
2755 				      " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d\n",
2756 				    plane->base.base.id, plane->base.name,
2757 				    enast(old_wm->wm[0].ignore_lines), old_wm->wm[0].lines,
2758 				    enast(old_wm->wm[1].ignore_lines), old_wm->wm[1].lines,
2759 				    enast(old_wm->wm[2].ignore_lines), old_wm->wm[2].lines,
2760 				    enast(old_wm->wm[3].ignore_lines), old_wm->wm[3].lines,
2761 				    enast(old_wm->wm[4].ignore_lines), old_wm->wm[4].lines,
2762 				    enast(old_wm->wm[5].ignore_lines), old_wm->wm[5].lines,
2763 				    enast(old_wm->wm[6].ignore_lines), old_wm->wm[6].lines,
2764 				    enast(old_wm->wm[7].ignore_lines), old_wm->wm[7].lines,
2765 				    enast(old_wm->trans_wm.ignore_lines), old_wm->trans_wm.lines,
2766 				    enast(old_wm->sagv.wm0.ignore_lines), old_wm->sagv.wm0.lines,
2767 				    enast(old_wm->sagv.trans_wm.ignore_lines), old_wm->sagv.trans_wm.lines,
2768 				    enast(new_wm->wm[0].ignore_lines), new_wm->wm[0].lines,
2769 				    enast(new_wm->wm[1].ignore_lines), new_wm->wm[1].lines,
2770 				    enast(new_wm->wm[2].ignore_lines), new_wm->wm[2].lines,
2771 				    enast(new_wm->wm[3].ignore_lines), new_wm->wm[3].lines,
2772 				    enast(new_wm->wm[4].ignore_lines), new_wm->wm[4].lines,
2773 				    enast(new_wm->wm[5].ignore_lines), new_wm->wm[5].lines,
2774 				    enast(new_wm->wm[6].ignore_lines), new_wm->wm[6].lines,
2775 				    enast(new_wm->wm[7].ignore_lines), new_wm->wm[7].lines,
2776 				    enast(new_wm->trans_wm.ignore_lines), new_wm->trans_wm.lines,
2777 				    enast(new_wm->sagv.wm0.ignore_lines), new_wm->sagv.wm0.lines,
2778 				    enast(new_wm->sagv.trans_wm.ignore_lines), new_wm->sagv.trans_wm.lines);
2779 
2780 			drm_dbg_kms(&i915->drm,
2781 				    "[PLANE:%d:%s]  blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d"
2782 				    " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n",
2783 				    plane->base.base.id, plane->base.name,
2784 				    old_wm->wm[0].blocks, old_wm->wm[1].blocks,
2785 				    old_wm->wm[2].blocks, old_wm->wm[3].blocks,
2786 				    old_wm->wm[4].blocks, old_wm->wm[5].blocks,
2787 				    old_wm->wm[6].blocks, old_wm->wm[7].blocks,
2788 				    old_wm->trans_wm.blocks,
2789 				    old_wm->sagv.wm0.blocks,
2790 				    old_wm->sagv.trans_wm.blocks,
2791 				    new_wm->wm[0].blocks, new_wm->wm[1].blocks,
2792 				    new_wm->wm[2].blocks, new_wm->wm[3].blocks,
2793 				    new_wm->wm[4].blocks, new_wm->wm[5].blocks,
2794 				    new_wm->wm[6].blocks, new_wm->wm[7].blocks,
2795 				    new_wm->trans_wm.blocks,
2796 				    new_wm->sagv.wm0.blocks,
2797 				    new_wm->sagv.trans_wm.blocks);
2798 
2799 			drm_dbg_kms(&i915->drm,
2800 				    "[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d"
2801 				    " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n",
2802 				    plane->base.base.id, plane->base.name,
2803 				    old_wm->wm[0].min_ddb_alloc, old_wm->wm[1].min_ddb_alloc,
2804 				    old_wm->wm[2].min_ddb_alloc, old_wm->wm[3].min_ddb_alloc,
2805 				    old_wm->wm[4].min_ddb_alloc, old_wm->wm[5].min_ddb_alloc,
2806 				    old_wm->wm[6].min_ddb_alloc, old_wm->wm[7].min_ddb_alloc,
2807 				    old_wm->trans_wm.min_ddb_alloc,
2808 				    old_wm->sagv.wm0.min_ddb_alloc,
2809 				    old_wm->sagv.trans_wm.min_ddb_alloc,
2810 				    new_wm->wm[0].min_ddb_alloc, new_wm->wm[1].min_ddb_alloc,
2811 				    new_wm->wm[2].min_ddb_alloc, new_wm->wm[3].min_ddb_alloc,
2812 				    new_wm->wm[4].min_ddb_alloc, new_wm->wm[5].min_ddb_alloc,
2813 				    new_wm->wm[6].min_ddb_alloc, new_wm->wm[7].min_ddb_alloc,
2814 				    new_wm->trans_wm.min_ddb_alloc,
2815 				    new_wm->sagv.wm0.min_ddb_alloc,
2816 				    new_wm->sagv.trans_wm.min_ddb_alloc);
2817 		}
2818 	}
2819 }
2820 
2821 static bool skl_plane_selected_wm_equals(struct intel_plane *plane,
2822 					 const struct skl_pipe_wm *old_pipe_wm,
2823 					 const struct skl_pipe_wm *new_pipe_wm)
2824 {
2825 	struct drm_i915_private *i915 = to_i915(plane->base.dev);
2826 	int level;
2827 
2828 	for (level = 0; level < i915->display.wm.num_levels; level++) {
2829 		/*
2830 		 * We don't check uv_wm as the hardware doesn't actually
2831 		 * use it. It only gets used for calculating the required
2832 		 * ddb allocation.
2833 		 */
2834 		if (!skl_wm_level_equals(skl_plane_wm_level(old_pipe_wm, plane->id, level),
2835 					 skl_plane_wm_level(new_pipe_wm, plane->id, level)))
2836 			return false;
2837 	}
2838 
2839 	if (HAS_HW_SAGV_WM(i915)) {
2840 		const struct skl_plane_wm *old_wm = &old_pipe_wm->planes[plane->id];
2841 		const struct skl_plane_wm *new_wm = &new_pipe_wm->planes[plane->id];
2842 
2843 		if (!skl_wm_level_equals(&old_wm->sagv.wm0, &new_wm->sagv.wm0) ||
2844 		    !skl_wm_level_equals(&old_wm->sagv.trans_wm, &new_wm->sagv.trans_wm))
2845 			return false;
2846 	}
2847 
2848 	return skl_wm_level_equals(skl_plane_trans_wm(old_pipe_wm, plane->id),
2849 				   skl_plane_trans_wm(new_pipe_wm, plane->id));
2850 }
2851 
2852 /*
2853  * To make sure the cursor watermark registers are always consistent
2854  * with our computed state the following scenario needs special
2855  * treatment:
2856  *
2857  * 1. enable cursor
2858  * 2. move cursor entirely offscreen
2859  * 3. disable cursor
2860  *
2861  * Step 2. does call .disable_plane() but does not zero the watermarks
2862  * (since we consider an offscreen cursor still active for the purposes
2863  * of watermarks). Step 3. would not normally call .disable_plane()
2864  * because the actual plane visibility isn't changing, and we don't
2865  * deallocate the cursor ddb until the pipe gets disabled. So we must
2866  * force step 3. to call .disable_plane() to update the watermark
2867  * registers properly.
2868  *
2869  * Other planes do not suffer from this issues as their watermarks are
2870  * calculated based on the actual plane visibility. The only time this
2871  * can trigger for the other planes is during the initial readout as the
2872  * default value of the watermarks registers is not zero.
2873  */
2874 static int skl_wm_add_affected_planes(struct intel_atomic_state *state,
2875 				      struct intel_crtc *crtc)
2876 {
2877 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
2878 	const struct intel_crtc_state *old_crtc_state =
2879 		intel_atomic_get_old_crtc_state(state, crtc);
2880 	struct intel_crtc_state *new_crtc_state =
2881 		intel_atomic_get_new_crtc_state(state, crtc);
2882 	struct intel_plane *plane;
2883 
2884 	for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
2885 		struct intel_plane_state *plane_state;
2886 		enum plane_id plane_id = plane->id;
2887 
2888 		/*
2889 		 * Force a full wm update for every plane on modeset.
2890 		 * Required because the reset value of the wm registers
2891 		 * is non-zero, whereas we want all disabled planes to
2892 		 * have zero watermarks. So if we turn off the relevant
2893 		 * power well the hardware state will go out of sync
2894 		 * with the software state.
2895 		 */
2896 		if (!intel_crtc_needs_modeset(new_crtc_state) &&
2897 		    skl_plane_selected_wm_equals(plane,
2898 						 &old_crtc_state->wm.skl.optimal,
2899 						 &new_crtc_state->wm.skl.optimal))
2900 			continue;
2901 
2902 		plane_state = intel_atomic_get_plane_state(state, plane);
2903 		if (IS_ERR(plane_state))
2904 			return PTR_ERR(plane_state);
2905 
2906 		new_crtc_state->update_planes |= BIT(plane_id);
2907 		new_crtc_state->async_flip_planes = 0;
2908 		new_crtc_state->do_async_flip = false;
2909 	}
2910 
2911 	return 0;
2912 }
2913 
2914 /*
2915  * If Fixed Refresh Rate:
2916  * Program DEEP PKG_C_LATENCY Pkg C with highest valid latency from
2917  * watermark level1 and up and above. If watermark level 1 is
2918  * invalid program it with all 1's.
2919  * Program PKG_C_LATENCY Added Wake Time = DSB execution time
2920  * If Variable Refresh Rate:
2921  * Program DEEP PKG_C_LATENCY Pkg C with all 1's.
2922  * Program PKG_C_LATENCY Added Wake Time = 0
2923  */
2924 static void
2925 skl_program_dpkgc_latency(struct drm_i915_private *i915, bool vrr_enabled)
2926 {
2927 	u32 max_latency = 0;
2928 	u32 clear = 0, val = 0;
2929 	u32 added_wake_time = 0;
2930 
2931 	if (DISPLAY_VER(i915) < 20)
2932 		return;
2933 
2934 	if (vrr_enabled) {
2935 		max_latency = LNL_PKG_C_LATENCY_MASK;
2936 		added_wake_time = 0;
2937 	} else {
2938 		max_latency = skl_watermark_max_latency(i915, 1);
2939 		if (max_latency == 0)
2940 			max_latency = LNL_PKG_C_LATENCY_MASK;
2941 		added_wake_time = DSB_EXE_TIME +
2942 			i915->display.sagv.block_time_us;
2943 	}
2944 
2945 	clear |= LNL_ADDED_WAKE_TIME_MASK | LNL_PKG_C_LATENCY_MASK;
2946 	val |= REG_FIELD_PREP(LNL_PKG_C_LATENCY_MASK, max_latency);
2947 	val |= REG_FIELD_PREP(LNL_ADDED_WAKE_TIME_MASK, added_wake_time);
2948 
2949 	intel_uncore_rmw(&i915->uncore, LNL_PKG_C_LATENCY, clear, val);
2950 }
2951 
2952 static int
2953 skl_compute_wm(struct intel_atomic_state *state)
2954 {
2955 	struct intel_crtc *crtc;
2956 	struct intel_crtc_state __maybe_unused *new_crtc_state;
2957 	int ret, i;
2958 	bool vrr_enabled = false;
2959 
2960 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
2961 		ret = skl_build_pipe_wm(state, crtc);
2962 		if (ret)
2963 			return ret;
2964 	}
2965 
2966 	ret = skl_compute_ddb(state);
2967 	if (ret)
2968 		return ret;
2969 
2970 	ret = intel_compute_sagv_mask(state);
2971 	if (ret)
2972 		return ret;
2973 
2974 	/*
2975 	 * skl_compute_ddb() will have adjusted the final watermarks
2976 	 * based on how much ddb is available. Now we can actually
2977 	 * check if the final watermarks changed.
2978 	 */
2979 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
2980 		ret = skl_wm_add_affected_planes(state, crtc);
2981 		if (ret)
2982 			return ret;
2983 
2984 		if (new_crtc_state->vrr.enable)
2985 			vrr_enabled = true;
2986 	}
2987 
2988 	skl_program_dpkgc_latency(to_i915(state->base.dev), vrr_enabled);
2989 
2990 	skl_print_wm_changes(state);
2991 
2992 	return 0;
2993 }
2994 
2995 static void skl_wm_level_from_reg_val(u32 val, struct skl_wm_level *level)
2996 {
2997 	level->enable = val & PLANE_WM_EN;
2998 	level->ignore_lines = val & PLANE_WM_IGNORE_LINES;
2999 	level->blocks = REG_FIELD_GET(PLANE_WM_BLOCKS_MASK, val);
3000 	level->lines = REG_FIELD_GET(PLANE_WM_LINES_MASK, val);
3001 }
3002 
3003 static void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
3004 				     struct skl_pipe_wm *out)
3005 {
3006 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
3007 	enum pipe pipe = crtc->pipe;
3008 	enum plane_id plane_id;
3009 	int level;
3010 	u32 val;
3011 
3012 	for_each_plane_id_on_crtc(crtc, plane_id) {
3013 		struct skl_plane_wm *wm = &out->planes[plane_id];
3014 
3015 		for (level = 0; level < i915->display.wm.num_levels; level++) {
3016 			if (plane_id != PLANE_CURSOR)
3017 				val = intel_de_read(i915, PLANE_WM(pipe, plane_id, level));
3018 			else
3019 				val = intel_de_read(i915, CUR_WM(pipe, level));
3020 
3021 			skl_wm_level_from_reg_val(val, &wm->wm[level]);
3022 		}
3023 
3024 		if (plane_id != PLANE_CURSOR)
3025 			val = intel_de_read(i915, PLANE_WM_TRANS(pipe, plane_id));
3026 		else
3027 			val = intel_de_read(i915, CUR_WM_TRANS(pipe));
3028 
3029 		skl_wm_level_from_reg_val(val, &wm->trans_wm);
3030 
3031 		if (HAS_HW_SAGV_WM(i915)) {
3032 			if (plane_id != PLANE_CURSOR)
3033 				val = intel_de_read(i915, PLANE_WM_SAGV(pipe, plane_id));
3034 			else
3035 				val = intel_de_read(i915, CUR_WM_SAGV(pipe));
3036 
3037 			skl_wm_level_from_reg_val(val, &wm->sagv.wm0);
3038 
3039 			if (plane_id != PLANE_CURSOR)
3040 				val = intel_de_read(i915, PLANE_WM_SAGV_TRANS(pipe, plane_id));
3041 			else
3042 				val = intel_de_read(i915, CUR_WM_SAGV_TRANS(pipe));
3043 
3044 			skl_wm_level_from_reg_val(val, &wm->sagv.trans_wm);
3045 		} else if (DISPLAY_VER(i915) >= 12) {
3046 			wm->sagv.wm0 = wm->wm[0];
3047 			wm->sagv.trans_wm = wm->trans_wm;
3048 		}
3049 	}
3050 }
3051 
3052 static void skl_wm_get_hw_state(struct drm_i915_private *i915)
3053 {
3054 	struct intel_dbuf_state *dbuf_state =
3055 		to_intel_dbuf_state(i915->display.dbuf.obj.state);
3056 	struct intel_crtc *crtc;
3057 
3058 	if (HAS_MBUS_JOINING(i915))
3059 		dbuf_state->joined_mbus = intel_de_read(i915, MBUS_CTL) & MBUS_JOIN;
3060 
3061 	dbuf_state->mdclk_cdclk_ratio = intel_mdclk_cdclk_ratio(i915, &i915->display.cdclk.hw);
3062 
3063 	for_each_intel_crtc(&i915->drm, crtc) {
3064 		struct intel_crtc_state *crtc_state =
3065 			to_intel_crtc_state(crtc->base.state);
3066 		enum pipe pipe = crtc->pipe;
3067 		unsigned int mbus_offset;
3068 		enum plane_id plane_id;
3069 		u8 slices;
3070 
3071 		memset(&crtc_state->wm.skl.optimal, 0,
3072 		       sizeof(crtc_state->wm.skl.optimal));
3073 		if (crtc_state->hw.active)
3074 			skl_pipe_wm_get_hw_state(crtc, &crtc_state->wm.skl.optimal);
3075 		crtc_state->wm.skl.raw = crtc_state->wm.skl.optimal;
3076 
3077 		memset(&dbuf_state->ddb[pipe], 0, sizeof(dbuf_state->ddb[pipe]));
3078 
3079 		for_each_plane_id_on_crtc(crtc, plane_id) {
3080 			struct skl_ddb_entry *ddb =
3081 				&crtc_state->wm.skl.plane_ddb[plane_id];
3082 			struct skl_ddb_entry *ddb_y =
3083 				&crtc_state->wm.skl.plane_ddb_y[plane_id];
3084 
3085 			if (!crtc_state->hw.active)
3086 				continue;
3087 
3088 			skl_ddb_get_hw_plane_state(i915, crtc->pipe,
3089 						   plane_id, ddb, ddb_y);
3090 
3091 			skl_ddb_entry_union(&dbuf_state->ddb[pipe], ddb);
3092 			skl_ddb_entry_union(&dbuf_state->ddb[pipe], ddb_y);
3093 		}
3094 
3095 		dbuf_state->weight[pipe] = intel_crtc_ddb_weight(crtc_state);
3096 
3097 		/*
3098 		 * Used for checking overlaps, so we need absolute
3099 		 * offsets instead of MBUS relative offsets.
3100 		 */
3101 		slices = skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes,
3102 						 dbuf_state->joined_mbus);
3103 		mbus_offset = mbus_ddb_offset(i915, slices);
3104 		crtc_state->wm.skl.ddb.start = mbus_offset + dbuf_state->ddb[pipe].start;
3105 		crtc_state->wm.skl.ddb.end = mbus_offset + dbuf_state->ddb[pipe].end;
3106 
3107 		/* The slices actually used by the planes on the pipe */
3108 		dbuf_state->slices[pipe] =
3109 			skl_ddb_dbuf_slice_mask(i915, &crtc_state->wm.skl.ddb);
3110 
3111 		drm_dbg_kms(&i915->drm,
3112 			    "[CRTC:%d:%s] dbuf slices 0x%x, ddb (%d - %d), active pipes 0x%x, mbus joined: %s\n",
3113 			    crtc->base.base.id, crtc->base.name,
3114 			    dbuf_state->slices[pipe], dbuf_state->ddb[pipe].start,
3115 			    dbuf_state->ddb[pipe].end, dbuf_state->active_pipes,
3116 			    str_yes_no(dbuf_state->joined_mbus));
3117 	}
3118 
3119 	dbuf_state->enabled_slices = i915->display.dbuf.enabled_slices;
3120 }
3121 
3122 static bool skl_dbuf_is_misconfigured(struct drm_i915_private *i915)
3123 {
3124 	const struct intel_dbuf_state *dbuf_state =
3125 		to_intel_dbuf_state(i915->display.dbuf.obj.state);
3126 	struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
3127 	struct intel_crtc *crtc;
3128 
3129 	for_each_intel_crtc(&i915->drm, crtc) {
3130 		const struct intel_crtc_state *crtc_state =
3131 			to_intel_crtc_state(crtc->base.state);
3132 
3133 		entries[crtc->pipe] = crtc_state->wm.skl.ddb;
3134 	}
3135 
3136 	for_each_intel_crtc(&i915->drm, crtc) {
3137 		const struct intel_crtc_state *crtc_state =
3138 			to_intel_crtc_state(crtc->base.state);
3139 		u8 slices;
3140 
3141 		slices = skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes,
3142 						 dbuf_state->joined_mbus);
3143 		if (dbuf_state->slices[crtc->pipe] & ~slices)
3144 			return true;
3145 
3146 		if (skl_ddb_allocation_overlaps(&crtc_state->wm.skl.ddb, entries,
3147 						I915_MAX_PIPES, crtc->pipe))
3148 			return true;
3149 	}
3150 
3151 	return false;
3152 }
3153 
3154 static void skl_wm_sanitize(struct drm_i915_private *i915)
3155 {
3156 	struct intel_crtc *crtc;
3157 
3158 	/*
3159 	 * On TGL/RKL (at least) the BIOS likes to assign the planes
3160 	 * to the wrong DBUF slices. This will cause an infinite loop
3161 	 * in skl_commit_modeset_enables() as it can't find a way to
3162 	 * transition between the old bogus DBUF layout to the new
3163 	 * proper DBUF layout without DBUF allocation overlaps between
3164 	 * the planes (which cannot be allowed or else the hardware
3165 	 * may hang). If we detect a bogus DBUF layout just turn off
3166 	 * all the planes so that skl_commit_modeset_enables() can
3167 	 * simply ignore them.
3168 	 */
3169 	if (!skl_dbuf_is_misconfigured(i915))
3170 		return;
3171 
3172 	drm_dbg_kms(&i915->drm, "BIOS has misprogrammed the DBUF, disabling all planes\n");
3173 
3174 	for_each_intel_crtc(&i915->drm, crtc) {
3175 		struct intel_plane *plane = to_intel_plane(crtc->base.primary);
3176 		const struct intel_plane_state *plane_state =
3177 			to_intel_plane_state(plane->base.state);
3178 		struct intel_crtc_state *crtc_state =
3179 			to_intel_crtc_state(crtc->base.state);
3180 
3181 		if (plane_state->uapi.visible)
3182 			intel_plane_disable_noatomic(crtc, plane);
3183 
3184 		drm_WARN_ON(&i915->drm, crtc_state->active_planes != 0);
3185 
3186 		memset(&crtc_state->wm.skl.ddb, 0, sizeof(crtc_state->wm.skl.ddb));
3187 	}
3188 }
3189 
3190 static void skl_wm_get_hw_state_and_sanitize(struct drm_i915_private *i915)
3191 {
3192 	skl_wm_get_hw_state(i915);
3193 	skl_wm_sanitize(i915);
3194 }
3195 
3196 void intel_wm_state_verify(struct intel_atomic_state *state,
3197 			   struct intel_crtc *crtc)
3198 {
3199 	struct drm_i915_private *i915 = to_i915(state->base.dev);
3200 	const struct intel_crtc_state *new_crtc_state =
3201 		intel_atomic_get_new_crtc_state(state, crtc);
3202 	struct skl_hw_state {
3203 		struct skl_ddb_entry ddb[I915_MAX_PLANES];
3204 		struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
3205 		struct skl_pipe_wm wm;
3206 	} *hw;
3207 	const struct skl_pipe_wm *sw_wm = &new_crtc_state->wm.skl.optimal;
3208 	struct intel_plane *plane;
3209 	u8 hw_enabled_slices;
3210 	int level;
3211 
3212 	if (DISPLAY_VER(i915) < 9 || !new_crtc_state->hw.active)
3213 		return;
3214 
3215 	hw = kzalloc(sizeof(*hw), GFP_KERNEL);
3216 	if (!hw)
3217 		return;
3218 
3219 	skl_pipe_wm_get_hw_state(crtc, &hw->wm);
3220 
3221 	skl_pipe_ddb_get_hw_state(crtc, hw->ddb, hw->ddb_y);
3222 
3223 	hw_enabled_slices = intel_enabled_dbuf_slices_mask(i915);
3224 
3225 	if (DISPLAY_VER(i915) >= 11 &&
3226 	    hw_enabled_slices != i915->display.dbuf.enabled_slices)
3227 		drm_err(&i915->drm,
3228 			"mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n",
3229 			i915->display.dbuf.enabled_slices,
3230 			hw_enabled_slices);
3231 
3232 	for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
3233 		const struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
3234 		const struct skl_wm_level *hw_wm_level, *sw_wm_level;
3235 
3236 		/* Watermarks */
3237 		for (level = 0; level < i915->display.wm.num_levels; level++) {
3238 			hw_wm_level = &hw->wm.planes[plane->id].wm[level];
3239 			sw_wm_level = skl_plane_wm_level(sw_wm, plane->id, level);
3240 
3241 			if (skl_wm_level_equals(hw_wm_level, sw_wm_level))
3242 				continue;
3243 
3244 			drm_err(&i915->drm,
3245 				"[PLANE:%d:%s] mismatch in WM%d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
3246 				plane->base.base.id, plane->base.name, level,
3247 				sw_wm_level->enable,
3248 				sw_wm_level->blocks,
3249 				sw_wm_level->lines,
3250 				hw_wm_level->enable,
3251 				hw_wm_level->blocks,
3252 				hw_wm_level->lines);
3253 		}
3254 
3255 		hw_wm_level = &hw->wm.planes[plane->id].trans_wm;
3256 		sw_wm_level = skl_plane_trans_wm(sw_wm, plane->id);
3257 
3258 		if (!skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
3259 			drm_err(&i915->drm,
3260 				"[PLANE:%d:%s] mismatch in trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
3261 				plane->base.base.id, plane->base.name,
3262 				sw_wm_level->enable,
3263 				sw_wm_level->blocks,
3264 				sw_wm_level->lines,
3265 				hw_wm_level->enable,
3266 				hw_wm_level->blocks,
3267 				hw_wm_level->lines);
3268 		}
3269 
3270 		hw_wm_level = &hw->wm.planes[plane->id].sagv.wm0;
3271 		sw_wm_level = &sw_wm->planes[plane->id].sagv.wm0;
3272 
3273 		if (HAS_HW_SAGV_WM(i915) &&
3274 		    !skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
3275 			drm_err(&i915->drm,
3276 				"[PLANE:%d:%s] mismatch in SAGV WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
3277 				plane->base.base.id, plane->base.name,
3278 				sw_wm_level->enable,
3279 				sw_wm_level->blocks,
3280 				sw_wm_level->lines,
3281 				hw_wm_level->enable,
3282 				hw_wm_level->blocks,
3283 				hw_wm_level->lines);
3284 		}
3285 
3286 		hw_wm_level = &hw->wm.planes[plane->id].sagv.trans_wm;
3287 		sw_wm_level = &sw_wm->planes[plane->id].sagv.trans_wm;
3288 
3289 		if (HAS_HW_SAGV_WM(i915) &&
3290 		    !skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
3291 			drm_err(&i915->drm,
3292 				"[PLANE:%d:%s] mismatch in SAGV trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
3293 				plane->base.base.id, plane->base.name,
3294 				sw_wm_level->enable,
3295 				sw_wm_level->blocks,
3296 				sw_wm_level->lines,
3297 				hw_wm_level->enable,
3298 				hw_wm_level->blocks,
3299 				hw_wm_level->lines);
3300 		}
3301 
3302 		/* DDB */
3303 		hw_ddb_entry = &hw->ddb[PLANE_CURSOR];
3304 		sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb[PLANE_CURSOR];
3305 
3306 		if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
3307 			drm_err(&i915->drm,
3308 				"[PLANE:%d:%s] mismatch in DDB (expected (%u,%u), found (%u,%u))\n",
3309 				plane->base.base.id, plane->base.name,
3310 				sw_ddb_entry->start, sw_ddb_entry->end,
3311 				hw_ddb_entry->start, hw_ddb_entry->end);
3312 		}
3313 	}
3314 
3315 	kfree(hw);
3316 }
3317 
3318 bool skl_watermark_ipc_enabled(struct drm_i915_private *i915)
3319 {
3320 	return i915->display.wm.ipc_enabled;
3321 }
3322 
3323 void skl_watermark_ipc_update(struct drm_i915_private *i915)
3324 {
3325 	if (!HAS_IPC(i915))
3326 		return;
3327 
3328 	intel_de_rmw(i915, DISP_ARB_CTL2, DISP_IPC_ENABLE,
3329 		     skl_watermark_ipc_enabled(i915) ? DISP_IPC_ENABLE : 0);
3330 }
3331 
3332 static bool skl_watermark_ipc_can_enable(struct drm_i915_private *i915)
3333 {
3334 	/* Display WA #0477 WaDisableIPC: skl */
3335 	if (IS_SKYLAKE(i915))
3336 		return false;
3337 
3338 	/* Display WA #1141: SKL:all KBL:all CFL */
3339 	if (IS_KABYLAKE(i915) ||
3340 	    IS_COFFEELAKE(i915) ||
3341 	    IS_COMETLAKE(i915))
3342 		return i915->dram_info.symmetric_memory;
3343 
3344 	return true;
3345 }
3346 
3347 void skl_watermark_ipc_init(struct drm_i915_private *i915)
3348 {
3349 	if (!HAS_IPC(i915))
3350 		return;
3351 
3352 	i915->display.wm.ipc_enabled = skl_watermark_ipc_can_enable(i915);
3353 
3354 	skl_watermark_ipc_update(i915);
3355 }
3356 
3357 static void
3358 adjust_wm_latency(struct drm_i915_private *i915,
3359 		  u16 wm[], int num_levels, int read_latency)
3360 {
3361 	bool wm_lv_0_adjust_needed = i915->dram_info.wm_lv_0_adjust_needed;
3362 	int i, level;
3363 
3364 	/*
3365 	 * If a level n (n > 1) has a 0us latency, all levels m (m >= n)
3366 	 * need to be disabled. We make sure to sanitize the values out
3367 	 * of the punit to satisfy this requirement.
3368 	 */
3369 	for (level = 1; level < num_levels; level++) {
3370 		if (wm[level] == 0) {
3371 			for (i = level + 1; i < num_levels; i++)
3372 				wm[i] = 0;
3373 
3374 			num_levels = level;
3375 			break;
3376 		}
3377 	}
3378 
3379 	/*
3380 	 * WaWmMemoryReadLatency
3381 	 *
3382 	 * punit doesn't take into account the read latency so we need
3383 	 * to add proper adjustement to each valid level we retrieve
3384 	 * from the punit when level 0 response data is 0us.
3385 	 */
3386 	if (wm[0] == 0) {
3387 		for (level = 0; level < num_levels; level++)
3388 			wm[level] += read_latency;
3389 	}
3390 
3391 	/*
3392 	 * WA Level-0 adjustment for 16GB DIMMs: SKL+
3393 	 * If we could not get dimm info enable this WA to prevent from
3394 	 * any underrun. If not able to get Dimm info assume 16GB dimm
3395 	 * to avoid any underrun.
3396 	 */
3397 	if (wm_lv_0_adjust_needed)
3398 		wm[0] += 1;
3399 }
3400 
3401 static void mtl_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
3402 {
3403 	int num_levels = i915->display.wm.num_levels;
3404 	u32 val;
3405 
3406 	val = intel_de_read(i915, MTL_LATENCY_LP0_LP1);
3407 	wm[0] = REG_FIELD_GET(MTL_LATENCY_LEVEL_EVEN_MASK, val);
3408 	wm[1] = REG_FIELD_GET(MTL_LATENCY_LEVEL_ODD_MASK, val);
3409 
3410 	val = intel_de_read(i915, MTL_LATENCY_LP2_LP3);
3411 	wm[2] = REG_FIELD_GET(MTL_LATENCY_LEVEL_EVEN_MASK, val);
3412 	wm[3] = REG_FIELD_GET(MTL_LATENCY_LEVEL_ODD_MASK, val);
3413 
3414 	val = intel_de_read(i915, MTL_LATENCY_LP4_LP5);
3415 	wm[4] = REG_FIELD_GET(MTL_LATENCY_LEVEL_EVEN_MASK, val);
3416 	wm[5] = REG_FIELD_GET(MTL_LATENCY_LEVEL_ODD_MASK, val);
3417 
3418 	adjust_wm_latency(i915, wm, num_levels, 6);
3419 }
3420 
3421 static void skl_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
3422 {
3423 	int num_levels = i915->display.wm.num_levels;
3424 	int read_latency = DISPLAY_VER(i915) >= 12 ? 3 : 2;
3425 	int mult = IS_DG2(i915) ? 2 : 1;
3426 	u32 val;
3427 	int ret;
3428 
3429 	/* read the first set of memory latencies[0:3] */
3430 	val = 0; /* data0 to be programmed to 0 for first set */
3431 	ret = snb_pcode_read(&i915->uncore, GEN9_PCODE_READ_MEM_LATENCY, &val, NULL);
3432 	if (ret) {
3433 		drm_err(&i915->drm, "SKL Mailbox read error = %d\n", ret);
3434 		return;
3435 	}
3436 
3437 	wm[0] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_0_4_MASK, val) * mult;
3438 	wm[1] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_1_5_MASK, val) * mult;
3439 	wm[2] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_2_6_MASK, val) * mult;
3440 	wm[3] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_3_7_MASK, val) * mult;
3441 
3442 	/* read the second set of memory latencies[4:7] */
3443 	val = 1; /* data0 to be programmed to 1 for second set */
3444 	ret = snb_pcode_read(&i915->uncore, GEN9_PCODE_READ_MEM_LATENCY, &val, NULL);
3445 	if (ret) {
3446 		drm_err(&i915->drm, "SKL Mailbox read error = %d\n", ret);
3447 		return;
3448 	}
3449 
3450 	wm[4] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_0_4_MASK, val) * mult;
3451 	wm[5] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_1_5_MASK, val) * mult;
3452 	wm[6] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_2_6_MASK, val) * mult;
3453 	wm[7] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_3_7_MASK, val) * mult;
3454 
3455 	adjust_wm_latency(i915, wm, num_levels, read_latency);
3456 }
3457 
3458 static void skl_setup_wm_latency(struct drm_i915_private *i915)
3459 {
3460 	if (HAS_HW_SAGV_WM(i915))
3461 		i915->display.wm.num_levels = 6;
3462 	else
3463 		i915->display.wm.num_levels = 8;
3464 
3465 	if (DISPLAY_VER(i915) >= 14)
3466 		mtl_read_wm_latency(i915, i915->display.wm.skl_latency);
3467 	else
3468 		skl_read_wm_latency(i915, i915->display.wm.skl_latency);
3469 
3470 	intel_print_wm_latency(i915, "Gen9 Plane", i915->display.wm.skl_latency);
3471 }
3472 
3473 static const struct intel_wm_funcs skl_wm_funcs = {
3474 	.compute_global_watermarks = skl_compute_wm,
3475 	.get_hw_state = skl_wm_get_hw_state_and_sanitize,
3476 };
3477 
3478 void skl_wm_init(struct drm_i915_private *i915)
3479 {
3480 	intel_sagv_init(i915);
3481 
3482 	skl_setup_wm_latency(i915);
3483 
3484 	i915->display.funcs.wm = &skl_wm_funcs;
3485 }
3486 
3487 static struct intel_global_state *intel_dbuf_duplicate_state(struct intel_global_obj *obj)
3488 {
3489 	struct intel_dbuf_state *dbuf_state;
3490 
3491 	dbuf_state = kmemdup(obj->state, sizeof(*dbuf_state), GFP_KERNEL);
3492 	if (!dbuf_state)
3493 		return NULL;
3494 
3495 	return &dbuf_state->base;
3496 }
3497 
3498 static void intel_dbuf_destroy_state(struct intel_global_obj *obj,
3499 				     struct intel_global_state *state)
3500 {
3501 	kfree(state);
3502 }
3503 
3504 static const struct intel_global_state_funcs intel_dbuf_funcs = {
3505 	.atomic_duplicate_state = intel_dbuf_duplicate_state,
3506 	.atomic_destroy_state = intel_dbuf_destroy_state,
3507 };
3508 
3509 struct intel_dbuf_state *
3510 intel_atomic_get_dbuf_state(struct intel_atomic_state *state)
3511 {
3512 	struct drm_i915_private *i915 = to_i915(state->base.dev);
3513 	struct intel_global_state *dbuf_state;
3514 
3515 	dbuf_state = intel_atomic_get_global_obj_state(state, &i915->display.dbuf.obj);
3516 	if (IS_ERR(dbuf_state))
3517 		return ERR_CAST(dbuf_state);
3518 
3519 	return to_intel_dbuf_state(dbuf_state);
3520 }
3521 
3522 int intel_dbuf_init(struct drm_i915_private *i915)
3523 {
3524 	struct intel_dbuf_state *dbuf_state;
3525 
3526 	dbuf_state = kzalloc(sizeof(*dbuf_state), GFP_KERNEL);
3527 	if (!dbuf_state)
3528 		return -ENOMEM;
3529 
3530 	intel_atomic_global_obj_init(i915, &i915->display.dbuf.obj,
3531 				     &dbuf_state->base, &intel_dbuf_funcs);
3532 
3533 	return 0;
3534 }
3535 
3536 static bool xelpdp_is_only_pipe_per_dbuf_bank(enum pipe pipe, u8 active_pipes)
3537 {
3538 	switch (pipe) {
3539 	case PIPE_A:
3540 		return !(active_pipes & BIT(PIPE_D));
3541 	case PIPE_D:
3542 		return !(active_pipes & BIT(PIPE_A));
3543 	case PIPE_B:
3544 		return !(active_pipes & BIT(PIPE_C));
3545 	case PIPE_C:
3546 		return !(active_pipes & BIT(PIPE_B));
3547 	default: /* to suppress compiler warning */
3548 		MISSING_CASE(pipe);
3549 		break;
3550 	}
3551 
3552 	return false;
3553 }
3554 
3555 static void intel_mbus_dbox_update(struct intel_atomic_state *state)
3556 {
3557 	struct drm_i915_private *i915 = to_i915(state->base.dev);
3558 	const struct intel_dbuf_state *new_dbuf_state, *old_dbuf_state;
3559 	const struct intel_crtc *crtc;
3560 	u32 val = 0;
3561 
3562 	if (DISPLAY_VER(i915) < 11)
3563 		return;
3564 
3565 	new_dbuf_state = intel_atomic_get_new_dbuf_state(state);
3566 	old_dbuf_state = intel_atomic_get_old_dbuf_state(state);
3567 	if (!new_dbuf_state ||
3568 	    (new_dbuf_state->joined_mbus == old_dbuf_state->joined_mbus &&
3569 	     new_dbuf_state->active_pipes == old_dbuf_state->active_pipes))
3570 		return;
3571 
3572 	if (DISPLAY_VER(i915) >= 14)
3573 		val |= MBUS_DBOX_I_CREDIT(2);
3574 
3575 	if (DISPLAY_VER(i915) >= 12) {
3576 		val |= MBUS_DBOX_B2B_TRANSACTIONS_MAX(16);
3577 		val |= MBUS_DBOX_B2B_TRANSACTIONS_DELAY(1);
3578 		val |= MBUS_DBOX_REGULATE_B2B_TRANSACTIONS_EN;
3579 	}
3580 
3581 	if (DISPLAY_VER(i915) >= 14)
3582 		val |= new_dbuf_state->joined_mbus ? MBUS_DBOX_A_CREDIT(12) :
3583 						     MBUS_DBOX_A_CREDIT(8);
3584 	else if (IS_ALDERLAKE_P(i915))
3585 		/* Wa_22010947358:adl-p */
3586 		val |= new_dbuf_state->joined_mbus ? MBUS_DBOX_A_CREDIT(6) :
3587 						     MBUS_DBOX_A_CREDIT(4);
3588 	else
3589 		val |= MBUS_DBOX_A_CREDIT(2);
3590 
3591 	if (DISPLAY_VER(i915) >= 14) {
3592 		val |= MBUS_DBOX_B_CREDIT(0xA);
3593 	} else if (IS_ALDERLAKE_P(i915)) {
3594 		val |= MBUS_DBOX_BW_CREDIT(2);
3595 		val |= MBUS_DBOX_B_CREDIT(8);
3596 	} else if (DISPLAY_VER(i915) >= 12) {
3597 		val |= MBUS_DBOX_BW_CREDIT(2);
3598 		val |= MBUS_DBOX_B_CREDIT(12);
3599 	} else {
3600 		val |= MBUS_DBOX_BW_CREDIT(1);
3601 		val |= MBUS_DBOX_B_CREDIT(8);
3602 	}
3603 
3604 	for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, new_dbuf_state->active_pipes) {
3605 		u32 pipe_val = val;
3606 
3607 		if (DISPLAY_VER(i915) >= 14) {
3608 			if (xelpdp_is_only_pipe_per_dbuf_bank(crtc->pipe,
3609 							      new_dbuf_state->active_pipes))
3610 				pipe_val |= MBUS_DBOX_BW_8CREDITS_MTL;
3611 			else
3612 				pipe_val |= MBUS_DBOX_BW_4CREDITS_MTL;
3613 		}
3614 
3615 		intel_de_write(i915, PIPE_MBUS_DBOX_CTL(crtc->pipe), pipe_val);
3616 	}
3617 }
3618 
3619 int intel_dbuf_state_set_mdclk_cdclk_ratio(struct intel_atomic_state *state,
3620 					   int ratio)
3621 {
3622 	struct intel_dbuf_state *dbuf_state;
3623 
3624 	dbuf_state = intel_atomic_get_dbuf_state(state);
3625 	if (IS_ERR(dbuf_state))
3626 		return PTR_ERR(dbuf_state);
3627 
3628 	dbuf_state->mdclk_cdclk_ratio = ratio;
3629 
3630 	return intel_atomic_lock_global_state(&dbuf_state->base);
3631 }
3632 
3633 void intel_dbuf_mdclk_cdclk_ratio_update(struct drm_i915_private *i915,
3634 					 int ratio, bool joined_mbus)
3635 {
3636 	enum dbuf_slice slice;
3637 
3638 	if (!HAS_MBUS_JOINING(i915))
3639 		return;
3640 
3641 	if (DISPLAY_VER(i915) >= 20)
3642 		intel_de_rmw(i915, MBUS_CTL, MBUS_TRANSLATION_THROTTLE_MIN_MASK,
3643 			     MBUS_TRANSLATION_THROTTLE_MIN(ratio - 1));
3644 
3645 	if (joined_mbus)
3646 		ratio *= 2;
3647 
3648 	drm_dbg_kms(&i915->drm, "Updating dbuf ratio to %d (mbus joined: %s)\n",
3649 		    ratio, str_yes_no(joined_mbus));
3650 
3651 	for_each_dbuf_slice(i915, slice)
3652 		intel_de_rmw(i915, DBUF_CTL_S(slice),
3653 			     DBUF_MIN_TRACKER_STATE_SERVICE_MASK,
3654 			     DBUF_MIN_TRACKER_STATE_SERVICE(ratio - 1));
3655 }
3656 
3657 static void intel_dbuf_mdclk_min_tracker_update(struct intel_atomic_state *state)
3658 {
3659 	struct drm_i915_private *i915 = to_i915(state->base.dev);
3660 	const struct intel_dbuf_state *old_dbuf_state =
3661 		intel_atomic_get_old_dbuf_state(state);
3662 	const struct intel_dbuf_state *new_dbuf_state =
3663 		intel_atomic_get_new_dbuf_state(state);
3664 	int mdclk_cdclk_ratio;
3665 
3666 	if (intel_cdclk_is_decreasing_later(state)) {
3667 		/* cdclk/mdclk will be changed later by intel_set_cdclk_post_plane_update() */
3668 		mdclk_cdclk_ratio = old_dbuf_state->mdclk_cdclk_ratio;
3669 	} else {
3670 		/* cdclk/mdclk already changed by intel_set_cdclk_pre_plane_update() */
3671 		mdclk_cdclk_ratio = new_dbuf_state->mdclk_cdclk_ratio;
3672 	}
3673 
3674 	intel_dbuf_mdclk_cdclk_ratio_update(i915, mdclk_cdclk_ratio,
3675 					    new_dbuf_state->joined_mbus);
3676 }
3677 
3678 static enum pipe intel_mbus_joined_pipe(struct intel_atomic_state *state,
3679 					const struct intel_dbuf_state *dbuf_state)
3680 {
3681 	struct drm_i915_private *i915 = to_i915(state->base.dev);
3682 	enum pipe pipe = ffs(dbuf_state->active_pipes) - 1;
3683 	const struct intel_crtc_state *new_crtc_state;
3684 	struct intel_crtc *crtc;
3685 
3686 	drm_WARN_ON(&i915->drm, !dbuf_state->joined_mbus);
3687 	drm_WARN_ON(&i915->drm, !is_power_of_2(dbuf_state->active_pipes));
3688 
3689 	crtc = intel_crtc_for_pipe(i915, pipe);
3690 	new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
3691 
3692 	if (new_crtc_state && !intel_crtc_needs_modeset(new_crtc_state))
3693 		return pipe;
3694 	else
3695 		return INVALID_PIPE;
3696 }
3697 
3698 static void intel_dbuf_mbus_join_update(struct intel_atomic_state *state,
3699 					enum pipe pipe)
3700 {
3701 	struct drm_i915_private *i915 = to_i915(state->base.dev);
3702 	const struct intel_dbuf_state *old_dbuf_state =
3703 		intel_atomic_get_old_dbuf_state(state);
3704 	const struct intel_dbuf_state *new_dbuf_state =
3705 		intel_atomic_get_new_dbuf_state(state);
3706 	u32 mbus_ctl;
3707 
3708 	drm_dbg_kms(&i915->drm, "Changing mbus joined: %s -> %s (pipe: %c)\n",
3709 		    str_yes_no(old_dbuf_state->joined_mbus),
3710 		    str_yes_no(new_dbuf_state->joined_mbus),
3711 		    pipe != INVALID_PIPE ? pipe_name(pipe) : '*');
3712 
3713 	if (new_dbuf_state->joined_mbus)
3714 		mbus_ctl = MBUS_HASHING_MODE_1x4 | MBUS_JOIN;
3715 	else
3716 		mbus_ctl = MBUS_HASHING_MODE_2x2;
3717 
3718 	if (pipe != INVALID_PIPE)
3719 		mbus_ctl |= MBUS_JOIN_PIPE_SELECT(pipe);
3720 	else
3721 		mbus_ctl |= MBUS_JOIN_PIPE_SELECT_NONE;
3722 
3723 	intel_de_rmw(i915, MBUS_CTL,
3724 		     MBUS_HASHING_MODE_MASK | MBUS_JOIN |
3725 		     MBUS_JOIN_PIPE_SELECT_MASK, mbus_ctl);
3726 }
3727 
3728 void intel_dbuf_mbus_pre_ddb_update(struct intel_atomic_state *state)
3729 {
3730 	const struct intel_dbuf_state *new_dbuf_state =
3731 		intel_atomic_get_new_dbuf_state(state);
3732 	const struct intel_dbuf_state *old_dbuf_state =
3733 		intel_atomic_get_old_dbuf_state(state);
3734 
3735 	if (!new_dbuf_state)
3736 		return;
3737 
3738 	if (!old_dbuf_state->joined_mbus && new_dbuf_state->joined_mbus) {
3739 		enum pipe pipe = intel_mbus_joined_pipe(state, new_dbuf_state);
3740 
3741 		WARN_ON(!new_dbuf_state->base.changed);
3742 
3743 		intel_dbuf_mbus_join_update(state, pipe);
3744 		intel_mbus_dbox_update(state);
3745 		intel_dbuf_mdclk_min_tracker_update(state);
3746 	}
3747 }
3748 
3749 void intel_dbuf_mbus_post_ddb_update(struct intel_atomic_state *state)
3750 {
3751 	struct drm_i915_private *i915 = to_i915(state->base.dev);
3752 	const struct intel_dbuf_state *new_dbuf_state =
3753 		intel_atomic_get_new_dbuf_state(state);
3754 	const struct intel_dbuf_state *old_dbuf_state =
3755 		intel_atomic_get_old_dbuf_state(state);
3756 
3757 	if (!new_dbuf_state)
3758 		return;
3759 
3760 	if (old_dbuf_state->joined_mbus && !new_dbuf_state->joined_mbus) {
3761 		enum pipe pipe = intel_mbus_joined_pipe(state, old_dbuf_state);
3762 
3763 		WARN_ON(!new_dbuf_state->base.changed);
3764 
3765 		intel_dbuf_mdclk_min_tracker_update(state);
3766 		intel_mbus_dbox_update(state);
3767 		intel_dbuf_mbus_join_update(state, pipe);
3768 
3769 		if (pipe != INVALID_PIPE) {
3770 			struct intel_crtc *crtc = intel_crtc_for_pipe(i915, pipe);
3771 
3772 			intel_crtc_wait_for_next_vblank(crtc);
3773 		}
3774 	} else if (old_dbuf_state->joined_mbus == new_dbuf_state->joined_mbus &&
3775 		   old_dbuf_state->active_pipes != new_dbuf_state->active_pipes) {
3776 		WARN_ON(!new_dbuf_state->base.changed);
3777 
3778 		intel_dbuf_mdclk_min_tracker_update(state);
3779 		intel_mbus_dbox_update(state);
3780 	}
3781 
3782 }
3783 
3784 void intel_dbuf_pre_plane_update(struct intel_atomic_state *state)
3785 {
3786 	struct drm_i915_private *i915 = to_i915(state->base.dev);
3787 	const struct intel_dbuf_state *new_dbuf_state =
3788 		intel_atomic_get_new_dbuf_state(state);
3789 	const struct intel_dbuf_state *old_dbuf_state =
3790 		intel_atomic_get_old_dbuf_state(state);
3791 	u8 old_slices, new_slices;
3792 
3793 	if (!new_dbuf_state)
3794 		return;
3795 
3796 	old_slices = old_dbuf_state->enabled_slices;
3797 	new_slices = old_dbuf_state->enabled_slices | new_dbuf_state->enabled_slices;
3798 
3799 	if (old_slices == new_slices)
3800 		return;
3801 
3802 	WARN_ON(!new_dbuf_state->base.changed);
3803 
3804 	gen9_dbuf_slices_update(i915, new_slices);
3805 }
3806 
3807 void intel_dbuf_post_plane_update(struct intel_atomic_state *state)
3808 {
3809 	struct drm_i915_private *i915 = to_i915(state->base.dev);
3810 	const struct intel_dbuf_state *new_dbuf_state =
3811 		intel_atomic_get_new_dbuf_state(state);
3812 	const struct intel_dbuf_state *old_dbuf_state =
3813 		intel_atomic_get_old_dbuf_state(state);
3814 	u8 old_slices, new_slices;
3815 
3816 	if (!new_dbuf_state)
3817 		return;
3818 
3819 	old_slices = old_dbuf_state->enabled_slices | new_dbuf_state->enabled_slices;
3820 	new_slices = new_dbuf_state->enabled_slices;
3821 
3822 	if (old_slices == new_slices)
3823 		return;
3824 
3825 	WARN_ON(!new_dbuf_state->base.changed);
3826 
3827 	gen9_dbuf_slices_update(i915, new_slices);
3828 }
3829 
3830 static int skl_watermark_ipc_status_show(struct seq_file *m, void *data)
3831 {
3832 	struct drm_i915_private *i915 = m->private;
3833 
3834 	seq_printf(m, "Isochronous Priority Control: %s\n",
3835 		   str_yes_no(skl_watermark_ipc_enabled(i915)));
3836 	return 0;
3837 }
3838 
3839 static int skl_watermark_ipc_status_open(struct inode *inode, struct file *file)
3840 {
3841 	struct drm_i915_private *i915 = inode->i_private;
3842 
3843 	return single_open(file, skl_watermark_ipc_status_show, i915);
3844 }
3845 
3846 static ssize_t skl_watermark_ipc_status_write(struct file *file,
3847 					      const char __user *ubuf,
3848 					      size_t len, loff_t *offp)
3849 {
3850 	struct seq_file *m = file->private_data;
3851 	struct drm_i915_private *i915 = m->private;
3852 	intel_wakeref_t wakeref;
3853 	bool enable;
3854 	int ret;
3855 
3856 	ret = kstrtobool_from_user(ubuf, len, &enable);
3857 	if (ret < 0)
3858 		return ret;
3859 
3860 	with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
3861 		if (!skl_watermark_ipc_enabled(i915) && enable)
3862 			drm_info(&i915->drm,
3863 				 "Enabling IPC: WM will be proper only after next commit\n");
3864 		i915->display.wm.ipc_enabled = enable;
3865 		skl_watermark_ipc_update(i915);
3866 	}
3867 
3868 	return len;
3869 }
3870 
3871 static const struct file_operations skl_watermark_ipc_status_fops = {
3872 	.owner = THIS_MODULE,
3873 	.open = skl_watermark_ipc_status_open,
3874 	.read = seq_read,
3875 	.llseek = seq_lseek,
3876 	.release = single_release,
3877 	.write = skl_watermark_ipc_status_write
3878 };
3879 
3880 static int intel_sagv_status_show(struct seq_file *m, void *unused)
3881 {
3882 	struct drm_i915_private *i915 = m->private;
3883 	static const char * const sagv_status[] = {
3884 		[I915_SAGV_UNKNOWN] = "unknown",
3885 		[I915_SAGV_DISABLED] = "disabled",
3886 		[I915_SAGV_ENABLED] = "enabled",
3887 		[I915_SAGV_NOT_CONTROLLED] = "not controlled",
3888 	};
3889 
3890 	seq_printf(m, "SAGV available: %s\n", str_yes_no(intel_has_sagv(i915)));
3891 	seq_printf(m, "SAGV modparam: %s\n",
3892 		   str_enabled_disabled(i915->display.params.enable_sagv));
3893 	seq_printf(m, "SAGV status: %s\n", sagv_status[i915->display.sagv.status]);
3894 	seq_printf(m, "SAGV block time: %d usec\n", i915->display.sagv.block_time_us);
3895 
3896 	return 0;
3897 }
3898 
3899 DEFINE_SHOW_ATTRIBUTE(intel_sagv_status);
3900 
3901 void skl_watermark_debugfs_register(struct drm_i915_private *i915)
3902 {
3903 	struct drm_minor *minor = i915->drm.primary;
3904 
3905 	if (HAS_IPC(i915))
3906 		debugfs_create_file("i915_ipc_status", 0644, minor->debugfs_root, i915,
3907 				    &skl_watermark_ipc_status_fops);
3908 
3909 	if (HAS_SAGV(i915))
3910 		debugfs_create_file("i915_sagv_status", 0444, minor->debugfs_root, i915,
3911 				    &intel_sagv_status_fops);
3912 }
3913 
3914 unsigned int skl_watermark_max_latency(struct drm_i915_private *i915, int initial_wm_level)
3915 {
3916 	int level;
3917 
3918 	for (level = i915->display.wm.num_levels - 1; level >= initial_wm_level; level--) {
3919 		unsigned int latency = skl_wm_latency(i915, level, NULL);
3920 
3921 		if (latency)
3922 			return latency;
3923 	}
3924 
3925 	return 0;
3926 }
3927