xref: /linux/drivers/gpu/drm/i915/display/skl_watermark.c (revision 3a39d672e7f48b8d6b91a09afa4b55352773b4b5)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include <drm/drm_blend.h>
7 
8 #include "i915_drv.h"
9 #include "i915_reg.h"
10 #include "i9xx_wm.h"
11 #include "intel_atomic.h"
12 #include "intel_atomic_plane.h"
13 #include "intel_bw.h"
14 #include "intel_cdclk.h"
15 #include "intel_crtc.h"
16 #include "intel_cursor_regs.h"
17 #include "intel_de.h"
18 #include "intel_display.h"
19 #include "intel_display_power.h"
20 #include "intel_display_types.h"
21 #include "intel_fb.h"
22 #include "intel_fixed.h"
23 #include "intel_pcode.h"
24 #include "intel_wm.h"
25 #include "skl_universal_plane_regs.h"
26 #include "skl_watermark.h"
27 #include "skl_watermark_regs.h"
28 
29 /*It is expected that DSB can do posted writes to every register in
30  * the pipe and planes within 100us. For flip queue use case, the
31  * recommended DSB execution time is 100us + one SAGV block time.
32  */
33 #define DSB_EXE_TIME 100
34 
35 static void skl_sagv_disable(struct drm_i915_private *i915);
36 
37 /* Stores plane specific WM parameters */
38 struct skl_wm_params {
39 	bool x_tiled, y_tiled;
40 	bool rc_surface;
41 	bool is_planar;
42 	u32 width;
43 	u8 cpp;
44 	u32 plane_pixel_rate;
45 	u32 y_min_scanlines;
46 	u32 plane_bytes_per_line;
47 	uint_fixed_16_16_t plane_blocks_per_line;
48 	uint_fixed_16_16_t y_tile_minimum;
49 	u32 linetime_us;
50 	u32 dbuf_block_size;
51 };
52 
intel_enabled_dbuf_slices_mask(struct drm_i915_private * i915)53 u8 intel_enabled_dbuf_slices_mask(struct drm_i915_private *i915)
54 {
55 	u8 enabled_slices = 0;
56 	enum dbuf_slice slice;
57 
58 	for_each_dbuf_slice(i915, slice) {
59 		if (intel_de_read(i915, DBUF_CTL_S(slice)) & DBUF_POWER_STATE)
60 			enabled_slices |= BIT(slice);
61 	}
62 
63 	return enabled_slices;
64 }
65 
66 /*
67  * FIXME: We still don't have the proper code detect if we need to apply the WA,
68  * so assume we'll always need it in order to avoid underruns.
69  */
skl_needs_memory_bw_wa(struct drm_i915_private * i915)70 static bool skl_needs_memory_bw_wa(struct drm_i915_private *i915)
71 {
72 	return DISPLAY_VER(i915) == 9;
73 }
74 
75 bool
intel_has_sagv(struct drm_i915_private * i915)76 intel_has_sagv(struct drm_i915_private *i915)
77 {
78 	return HAS_SAGV(i915) &&
79 		i915->display.sagv.status != I915_SAGV_NOT_CONTROLLED;
80 }
81 
82 static u32
intel_sagv_block_time(struct drm_i915_private * i915)83 intel_sagv_block_time(struct drm_i915_private *i915)
84 {
85 	if (DISPLAY_VER(i915) >= 14) {
86 		u32 val;
87 
88 		val = intel_de_read(i915, MTL_LATENCY_SAGV);
89 
90 		return REG_FIELD_GET(MTL_LATENCY_QCLK_SAGV, val);
91 	} else if (DISPLAY_VER(i915) >= 12) {
92 		u32 val = 0;
93 		int ret;
94 
95 		ret = snb_pcode_read(&i915->uncore,
96 				     GEN12_PCODE_READ_SAGV_BLOCK_TIME_US,
97 				     &val, NULL);
98 		if (ret) {
99 			drm_dbg_kms(&i915->drm, "Couldn't read SAGV block time!\n");
100 			return 0;
101 		}
102 
103 		return val;
104 	} else if (DISPLAY_VER(i915) == 11) {
105 		return 10;
106 	} else if (HAS_SAGV(i915)) {
107 		return 30;
108 	} else {
109 		return 0;
110 	}
111 }
112 
intel_sagv_init(struct drm_i915_private * i915)113 static void intel_sagv_init(struct drm_i915_private *i915)
114 {
115 	if (!HAS_SAGV(i915))
116 		i915->display.sagv.status = I915_SAGV_NOT_CONTROLLED;
117 
118 	/*
119 	 * Probe to see if we have working SAGV control.
120 	 * For icl+ this was already determined by intel_bw_init_hw().
121 	 */
122 	if (DISPLAY_VER(i915) < 11)
123 		skl_sagv_disable(i915);
124 
125 	drm_WARN_ON(&i915->drm, i915->display.sagv.status == I915_SAGV_UNKNOWN);
126 
127 	i915->display.sagv.block_time_us = intel_sagv_block_time(i915);
128 
129 	drm_dbg_kms(&i915->drm, "SAGV supported: %s, original SAGV block time: %u us\n",
130 		    str_yes_no(intel_has_sagv(i915)), i915->display.sagv.block_time_us);
131 
132 	/* avoid overflow when adding with wm0 latency/etc. */
133 	if (drm_WARN(&i915->drm, i915->display.sagv.block_time_us > U16_MAX,
134 		     "Excessive SAGV block time %u, ignoring\n",
135 		     i915->display.sagv.block_time_us))
136 		i915->display.sagv.block_time_us = 0;
137 
138 	if (!intel_has_sagv(i915))
139 		i915->display.sagv.block_time_us = 0;
140 }
141 
142 /*
143  * SAGV dynamically adjusts the system agent voltage and clock frequencies
144  * depending on power and performance requirements. The display engine access
145  * to system memory is blocked during the adjustment time. Because of the
146  * blocking time, having this enabled can cause full system hangs and/or pipe
147  * underruns if we don't meet all of the following requirements:
148  *
149  *  - <= 1 pipe enabled
150  *  - All planes can enable watermarks for latencies >= SAGV engine block time
151  *  - We're not using an interlaced display configuration
152  */
skl_sagv_enable(struct drm_i915_private * i915)153 static void skl_sagv_enable(struct drm_i915_private *i915)
154 {
155 	int ret;
156 
157 	if (!intel_has_sagv(i915))
158 		return;
159 
160 	if (i915->display.sagv.status == I915_SAGV_ENABLED)
161 		return;
162 
163 	drm_dbg_kms(&i915->drm, "Enabling SAGV\n");
164 	ret = snb_pcode_write(&i915->uncore, GEN9_PCODE_SAGV_CONTROL,
165 			      GEN9_SAGV_ENABLE);
166 
167 	/* We don't need to wait for SAGV when enabling */
168 
169 	/*
170 	 * Some skl systems, pre-release machines in particular,
171 	 * don't actually have SAGV.
172 	 */
173 	if (IS_SKYLAKE(i915) && ret == -ENXIO) {
174 		drm_dbg(&i915->drm, "No SAGV found on system, ignoring\n");
175 		i915->display.sagv.status = I915_SAGV_NOT_CONTROLLED;
176 		return;
177 	} else if (ret < 0) {
178 		drm_err(&i915->drm, "Failed to enable SAGV\n");
179 		return;
180 	}
181 
182 	i915->display.sagv.status = I915_SAGV_ENABLED;
183 }
184 
skl_sagv_disable(struct drm_i915_private * i915)185 static void skl_sagv_disable(struct drm_i915_private *i915)
186 {
187 	int ret;
188 
189 	if (!intel_has_sagv(i915))
190 		return;
191 
192 	if (i915->display.sagv.status == I915_SAGV_DISABLED)
193 		return;
194 
195 	drm_dbg_kms(&i915->drm, "Disabling SAGV\n");
196 	/* bspec says to keep retrying for at least 1 ms */
197 	ret = skl_pcode_request(&i915->uncore, GEN9_PCODE_SAGV_CONTROL,
198 				GEN9_SAGV_DISABLE,
199 				GEN9_SAGV_IS_DISABLED, GEN9_SAGV_IS_DISABLED,
200 				1);
201 	/*
202 	 * Some skl systems, pre-release machines in particular,
203 	 * don't actually have SAGV.
204 	 */
205 	if (IS_SKYLAKE(i915) && ret == -ENXIO) {
206 		drm_dbg(&i915->drm, "No SAGV found on system, ignoring\n");
207 		i915->display.sagv.status = I915_SAGV_NOT_CONTROLLED;
208 		return;
209 	} else if (ret < 0) {
210 		drm_err(&i915->drm, "Failed to disable SAGV (%d)\n", ret);
211 		return;
212 	}
213 
214 	i915->display.sagv.status = I915_SAGV_DISABLED;
215 }
216 
skl_sagv_pre_plane_update(struct intel_atomic_state * state)217 static void skl_sagv_pre_plane_update(struct intel_atomic_state *state)
218 {
219 	struct drm_i915_private *i915 = to_i915(state->base.dev);
220 	const struct intel_bw_state *new_bw_state =
221 		intel_atomic_get_new_bw_state(state);
222 
223 	if (!new_bw_state)
224 		return;
225 
226 	if (!intel_can_enable_sagv(i915, new_bw_state))
227 		skl_sagv_disable(i915);
228 }
229 
skl_sagv_post_plane_update(struct intel_atomic_state * state)230 static void skl_sagv_post_plane_update(struct intel_atomic_state *state)
231 {
232 	struct drm_i915_private *i915 = to_i915(state->base.dev);
233 	const struct intel_bw_state *new_bw_state =
234 		intel_atomic_get_new_bw_state(state);
235 
236 	if (!new_bw_state)
237 		return;
238 
239 	if (intel_can_enable_sagv(i915, new_bw_state))
240 		skl_sagv_enable(i915);
241 }
242 
icl_sagv_pre_plane_update(struct intel_atomic_state * state)243 static void icl_sagv_pre_plane_update(struct intel_atomic_state *state)
244 {
245 	struct drm_i915_private *i915 = to_i915(state->base.dev);
246 	const struct intel_bw_state *old_bw_state =
247 		intel_atomic_get_old_bw_state(state);
248 	const struct intel_bw_state *new_bw_state =
249 		intel_atomic_get_new_bw_state(state);
250 	u16 old_mask, new_mask;
251 
252 	if (!new_bw_state)
253 		return;
254 
255 	old_mask = old_bw_state->qgv_points_mask;
256 	new_mask = old_bw_state->qgv_points_mask | new_bw_state->qgv_points_mask;
257 
258 	if (old_mask == new_mask)
259 		return;
260 
261 	WARN_ON(!new_bw_state->base.changed);
262 
263 	drm_dbg_kms(&i915->drm, "Restricting QGV points: 0x%x -> 0x%x\n",
264 		    old_mask, new_mask);
265 
266 	/*
267 	 * Restrict required qgv points before updating the configuration.
268 	 * According to BSpec we can't mask and unmask qgv points at the same
269 	 * time. Also masking should be done before updating the configuration
270 	 * and unmasking afterwards.
271 	 */
272 	icl_pcode_restrict_qgv_points(i915, new_mask);
273 }
274 
icl_sagv_post_plane_update(struct intel_atomic_state * state)275 static void icl_sagv_post_plane_update(struct intel_atomic_state *state)
276 {
277 	struct drm_i915_private *i915 = to_i915(state->base.dev);
278 	const struct intel_bw_state *old_bw_state =
279 		intel_atomic_get_old_bw_state(state);
280 	const struct intel_bw_state *new_bw_state =
281 		intel_atomic_get_new_bw_state(state);
282 	u16 old_mask, new_mask;
283 
284 	if (!new_bw_state)
285 		return;
286 
287 	old_mask = old_bw_state->qgv_points_mask | new_bw_state->qgv_points_mask;
288 	new_mask = new_bw_state->qgv_points_mask;
289 
290 	if (old_mask == new_mask)
291 		return;
292 
293 	WARN_ON(!new_bw_state->base.changed);
294 
295 	drm_dbg_kms(&i915->drm, "Relaxing QGV points: 0x%x -> 0x%x\n",
296 		    old_mask, new_mask);
297 
298 	/*
299 	 * Allow required qgv points after updating the configuration.
300 	 * According to BSpec we can't mask and unmask qgv points at the same
301 	 * time. Also masking should be done before updating the configuration
302 	 * and unmasking afterwards.
303 	 */
304 	icl_pcode_restrict_qgv_points(i915, new_mask);
305 }
306 
intel_sagv_pre_plane_update(struct intel_atomic_state * state)307 void intel_sagv_pre_plane_update(struct intel_atomic_state *state)
308 {
309 	struct drm_i915_private *i915 = to_i915(state->base.dev);
310 
311 	/*
312 	 * Just return if we can't control SAGV or don't have it.
313 	 * This is different from situation when we have SAGV but just can't
314 	 * afford it due to DBuf limitation - in case if SAGV is completely
315 	 * disabled in a BIOS, we are not even allowed to send a PCode request,
316 	 * as it will throw an error. So have to check it here.
317 	 */
318 	if (!intel_has_sagv(i915))
319 		return;
320 
321 	if (DISPLAY_VER(i915) >= 11)
322 		icl_sagv_pre_plane_update(state);
323 	else
324 		skl_sagv_pre_plane_update(state);
325 }
326 
intel_sagv_post_plane_update(struct intel_atomic_state * state)327 void intel_sagv_post_plane_update(struct intel_atomic_state *state)
328 {
329 	struct drm_i915_private *i915 = to_i915(state->base.dev);
330 
331 	/*
332 	 * Just return if we can't control SAGV or don't have it.
333 	 * This is different from situation when we have SAGV but just can't
334 	 * afford it due to DBuf limitation - in case if SAGV is completely
335 	 * disabled in a BIOS, we are not even allowed to send a PCode request,
336 	 * as it will throw an error. So have to check it here.
337 	 */
338 	if (!intel_has_sagv(i915))
339 		return;
340 
341 	if (DISPLAY_VER(i915) >= 11)
342 		icl_sagv_post_plane_update(state);
343 	else
344 		skl_sagv_post_plane_update(state);
345 }
346 
skl_crtc_can_enable_sagv(const struct intel_crtc_state * crtc_state)347 static bool skl_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
348 {
349 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
350 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
351 	enum plane_id plane_id;
352 	int max_level = INT_MAX;
353 
354 	if (!intel_has_sagv(i915))
355 		return false;
356 
357 	if (!crtc_state->hw.active)
358 		return true;
359 
360 	if (crtc_state->hw.pipe_mode.flags & DRM_MODE_FLAG_INTERLACE)
361 		return false;
362 
363 	for_each_plane_id_on_crtc(crtc, plane_id) {
364 		const struct skl_plane_wm *wm =
365 			&crtc_state->wm.skl.optimal.planes[plane_id];
366 		int level;
367 
368 		/* Skip this plane if it's not enabled */
369 		if (!wm->wm[0].enable)
370 			continue;
371 
372 		/* Find the highest enabled wm level for this plane */
373 		for (level = i915->display.wm.num_levels - 1;
374 		     !wm->wm[level].enable; --level)
375 		     { }
376 
377 		/* Highest common enabled wm level for all planes */
378 		max_level = min(level, max_level);
379 	}
380 
381 	/* No enabled planes? */
382 	if (max_level == INT_MAX)
383 		return true;
384 
385 	for_each_plane_id_on_crtc(crtc, plane_id) {
386 		const struct skl_plane_wm *wm =
387 			&crtc_state->wm.skl.optimal.planes[plane_id];
388 
389 		/*
390 		 * All enabled planes must have enabled a common wm level that
391 		 * can tolerate memory latencies higher than sagv_block_time_us
392 		 */
393 		if (wm->wm[0].enable && !wm->wm[max_level].can_sagv)
394 			return false;
395 	}
396 
397 	return true;
398 }
399 
tgl_crtc_can_enable_sagv(const struct intel_crtc_state * crtc_state)400 static bool tgl_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
401 {
402 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
403 	enum plane_id plane_id;
404 
405 	if (!crtc_state->hw.active)
406 		return true;
407 
408 	for_each_plane_id_on_crtc(crtc, plane_id) {
409 		const struct skl_plane_wm *wm =
410 			&crtc_state->wm.skl.optimal.planes[plane_id];
411 
412 		if (wm->wm[0].enable && !wm->sagv.wm0.enable)
413 			return false;
414 	}
415 
416 	return true;
417 }
418 
intel_crtc_can_enable_sagv(const struct intel_crtc_state * crtc_state)419 static bool intel_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
420 {
421 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
422 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
423 
424 	if (!i915->display.params.enable_sagv)
425 		return false;
426 
427 	if (DISPLAY_VER(i915) >= 12)
428 		return tgl_crtc_can_enable_sagv(crtc_state);
429 	else
430 		return skl_crtc_can_enable_sagv(crtc_state);
431 }
432 
intel_can_enable_sagv(struct drm_i915_private * i915,const struct intel_bw_state * bw_state)433 bool intel_can_enable_sagv(struct drm_i915_private *i915,
434 			   const struct intel_bw_state *bw_state)
435 {
436 	if (DISPLAY_VER(i915) < 11 &&
437 	    bw_state->active_pipes && !is_power_of_2(bw_state->active_pipes))
438 		return false;
439 
440 	return bw_state->pipe_sagv_reject == 0;
441 }
442 
intel_compute_sagv_mask(struct intel_atomic_state * state)443 static int intel_compute_sagv_mask(struct intel_atomic_state *state)
444 {
445 	struct drm_i915_private *i915 = to_i915(state->base.dev);
446 	int ret;
447 	struct intel_crtc *crtc;
448 	struct intel_crtc_state *new_crtc_state;
449 	struct intel_bw_state *new_bw_state = NULL;
450 	const struct intel_bw_state *old_bw_state = NULL;
451 	int i;
452 
453 	for_each_new_intel_crtc_in_state(state, crtc,
454 					 new_crtc_state, i) {
455 		struct skl_pipe_wm *pipe_wm = &new_crtc_state->wm.skl.optimal;
456 
457 		new_bw_state = intel_atomic_get_bw_state(state);
458 		if (IS_ERR(new_bw_state))
459 			return PTR_ERR(new_bw_state);
460 
461 		old_bw_state = intel_atomic_get_old_bw_state(state);
462 
463 		/*
464 		 * We store use_sagv_wm in the crtc state rather than relying on
465 		 * that bw state since we have no convenient way to get at the
466 		 * latter from the plane commit hooks (especially in the legacy
467 		 * cursor case).
468 		 *
469 		 * drm_atomic_check_only() gets upset if we pull more crtcs
470 		 * into the state, so we have to calculate this based on the
471 		 * individual intel_crtc_can_enable_sagv() rather than
472 		 * the overall intel_can_enable_sagv(). Otherwise the
473 		 * crtcs not included in the commit would not switch to the
474 		 * SAGV watermarks when we are about to enable SAGV, and that
475 		 * would lead to underruns. This does mean extra power draw
476 		 * when only a subset of the crtcs are blocking SAGV as the
477 		 * other crtcs can't be allowed to use the more optimal
478 		 * normal (ie. non-SAGV) watermarks.
479 		 */
480 		pipe_wm->use_sagv_wm = !HAS_HW_SAGV_WM(i915) &&
481 			DISPLAY_VER(i915) >= 12 &&
482 			intel_crtc_can_enable_sagv(new_crtc_state);
483 
484 		if (intel_crtc_can_enable_sagv(new_crtc_state))
485 			new_bw_state->pipe_sagv_reject &= ~BIT(crtc->pipe);
486 		else
487 			new_bw_state->pipe_sagv_reject |= BIT(crtc->pipe);
488 	}
489 
490 	if (!new_bw_state)
491 		return 0;
492 
493 	new_bw_state->active_pipes =
494 		intel_calc_active_pipes(state, old_bw_state->active_pipes);
495 
496 	if (new_bw_state->active_pipes != old_bw_state->active_pipes) {
497 		ret = intel_atomic_lock_global_state(&new_bw_state->base);
498 		if (ret)
499 			return ret;
500 	}
501 
502 	if (intel_can_enable_sagv(i915, new_bw_state) !=
503 	    intel_can_enable_sagv(i915, old_bw_state)) {
504 		ret = intel_atomic_serialize_global_state(&new_bw_state->base);
505 		if (ret)
506 			return ret;
507 	} else if (new_bw_state->pipe_sagv_reject != old_bw_state->pipe_sagv_reject) {
508 		ret = intel_atomic_lock_global_state(&new_bw_state->base);
509 		if (ret)
510 			return ret;
511 	}
512 
513 	return 0;
514 }
515 
skl_ddb_entry_init(struct skl_ddb_entry * entry,u16 start,u16 end)516 static u16 skl_ddb_entry_init(struct skl_ddb_entry *entry,
517 			      u16 start, u16 end)
518 {
519 	entry->start = start;
520 	entry->end = end;
521 
522 	return end;
523 }
524 
intel_dbuf_slice_size(struct drm_i915_private * i915)525 static int intel_dbuf_slice_size(struct drm_i915_private *i915)
526 {
527 	return DISPLAY_INFO(i915)->dbuf.size /
528 		hweight8(DISPLAY_INFO(i915)->dbuf.slice_mask);
529 }
530 
531 static void
skl_ddb_entry_for_slices(struct drm_i915_private * i915,u8 slice_mask,struct skl_ddb_entry * ddb)532 skl_ddb_entry_for_slices(struct drm_i915_private *i915, u8 slice_mask,
533 			 struct skl_ddb_entry *ddb)
534 {
535 	int slice_size = intel_dbuf_slice_size(i915);
536 
537 	if (!slice_mask) {
538 		ddb->start = 0;
539 		ddb->end = 0;
540 		return;
541 	}
542 
543 	ddb->start = (ffs(slice_mask) - 1) * slice_size;
544 	ddb->end = fls(slice_mask) * slice_size;
545 
546 	WARN_ON(ddb->start >= ddb->end);
547 	WARN_ON(ddb->end > DISPLAY_INFO(i915)->dbuf.size);
548 }
549 
mbus_ddb_offset(struct drm_i915_private * i915,u8 slice_mask)550 static unsigned int mbus_ddb_offset(struct drm_i915_private *i915, u8 slice_mask)
551 {
552 	struct skl_ddb_entry ddb;
553 
554 	if (slice_mask & (BIT(DBUF_S1) | BIT(DBUF_S2)))
555 		slice_mask = BIT(DBUF_S1);
556 	else if (slice_mask & (BIT(DBUF_S3) | BIT(DBUF_S4)))
557 		slice_mask = BIT(DBUF_S3);
558 
559 	skl_ddb_entry_for_slices(i915, slice_mask, &ddb);
560 
561 	return ddb.start;
562 }
563 
skl_ddb_dbuf_slice_mask(struct drm_i915_private * i915,const struct skl_ddb_entry * entry)564 u32 skl_ddb_dbuf_slice_mask(struct drm_i915_private *i915,
565 			    const struct skl_ddb_entry *entry)
566 {
567 	int slice_size = intel_dbuf_slice_size(i915);
568 	enum dbuf_slice start_slice, end_slice;
569 	u8 slice_mask = 0;
570 
571 	if (!skl_ddb_entry_size(entry))
572 		return 0;
573 
574 	start_slice = entry->start / slice_size;
575 	end_slice = (entry->end - 1) / slice_size;
576 
577 	/*
578 	 * Per plane DDB entry can in a really worst case be on multiple slices
579 	 * but single entry is anyway contigious.
580 	 */
581 	while (start_slice <= end_slice) {
582 		slice_mask |= BIT(start_slice);
583 		start_slice++;
584 	}
585 
586 	return slice_mask;
587 }
588 
intel_crtc_ddb_weight(const struct intel_crtc_state * crtc_state)589 static unsigned int intel_crtc_ddb_weight(const struct intel_crtc_state *crtc_state)
590 {
591 	const struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
592 	int hdisplay, vdisplay;
593 
594 	if (!crtc_state->hw.active)
595 		return 0;
596 
597 	/*
598 	 * Watermark/ddb requirement highly depends upon width of the
599 	 * framebuffer, So instead of allocating DDB equally among pipes
600 	 * distribute DDB based on resolution/width of the display.
601 	 */
602 	drm_mode_get_hv_timing(pipe_mode, &hdisplay, &vdisplay);
603 
604 	return hdisplay;
605 }
606 
intel_crtc_dbuf_weights(const struct intel_dbuf_state * dbuf_state,enum pipe for_pipe,unsigned int * weight_start,unsigned int * weight_end,unsigned int * weight_total)607 static void intel_crtc_dbuf_weights(const struct intel_dbuf_state *dbuf_state,
608 				    enum pipe for_pipe,
609 				    unsigned int *weight_start,
610 				    unsigned int *weight_end,
611 				    unsigned int *weight_total)
612 {
613 	struct drm_i915_private *i915 =
614 		to_i915(dbuf_state->base.state->base.dev);
615 	enum pipe pipe;
616 
617 	*weight_start = 0;
618 	*weight_end = 0;
619 	*weight_total = 0;
620 
621 	for_each_pipe(i915, pipe) {
622 		int weight = dbuf_state->weight[pipe];
623 
624 		/*
625 		 * Do not account pipes using other slice sets
626 		 * luckily as of current BSpec slice sets do not partially
627 		 * intersect(pipes share either same one slice or same slice set
628 		 * i.e no partial intersection), so it is enough to check for
629 		 * equality for now.
630 		 */
631 		if (dbuf_state->slices[pipe] != dbuf_state->slices[for_pipe])
632 			continue;
633 
634 		*weight_total += weight;
635 		if (pipe < for_pipe) {
636 			*weight_start += weight;
637 			*weight_end += weight;
638 		} else if (pipe == for_pipe) {
639 			*weight_end += weight;
640 		}
641 	}
642 }
643 
644 static int
skl_crtc_allocate_ddb(struct intel_atomic_state * state,struct intel_crtc * crtc)645 skl_crtc_allocate_ddb(struct intel_atomic_state *state, struct intel_crtc *crtc)
646 {
647 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
648 	unsigned int weight_total, weight_start, weight_end;
649 	const struct intel_dbuf_state *old_dbuf_state =
650 		intel_atomic_get_old_dbuf_state(state);
651 	struct intel_dbuf_state *new_dbuf_state =
652 		intel_atomic_get_new_dbuf_state(state);
653 	struct intel_crtc_state *crtc_state;
654 	struct skl_ddb_entry ddb_slices;
655 	enum pipe pipe = crtc->pipe;
656 	unsigned int mbus_offset = 0;
657 	u32 ddb_range_size;
658 	u32 dbuf_slice_mask;
659 	u32 start, end;
660 	int ret;
661 
662 	if (new_dbuf_state->weight[pipe] == 0) {
663 		skl_ddb_entry_init(&new_dbuf_state->ddb[pipe], 0, 0);
664 		goto out;
665 	}
666 
667 	dbuf_slice_mask = new_dbuf_state->slices[pipe];
668 
669 	skl_ddb_entry_for_slices(i915, dbuf_slice_mask, &ddb_slices);
670 	mbus_offset = mbus_ddb_offset(i915, dbuf_slice_mask);
671 	ddb_range_size = skl_ddb_entry_size(&ddb_slices);
672 
673 	intel_crtc_dbuf_weights(new_dbuf_state, pipe,
674 				&weight_start, &weight_end, &weight_total);
675 
676 	start = ddb_range_size * weight_start / weight_total;
677 	end = ddb_range_size * weight_end / weight_total;
678 
679 	skl_ddb_entry_init(&new_dbuf_state->ddb[pipe],
680 			   ddb_slices.start - mbus_offset + start,
681 			   ddb_slices.start - mbus_offset + end);
682 
683 out:
684 	if (old_dbuf_state->slices[pipe] == new_dbuf_state->slices[pipe] &&
685 	    skl_ddb_entry_equal(&old_dbuf_state->ddb[pipe],
686 				&new_dbuf_state->ddb[pipe]))
687 		return 0;
688 
689 	ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
690 	if (ret)
691 		return ret;
692 
693 	crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
694 	if (IS_ERR(crtc_state))
695 		return PTR_ERR(crtc_state);
696 
697 	/*
698 	 * Used for checking overlaps, so we need absolute
699 	 * offsets instead of MBUS relative offsets.
700 	 */
701 	crtc_state->wm.skl.ddb.start = mbus_offset + new_dbuf_state->ddb[pipe].start;
702 	crtc_state->wm.skl.ddb.end = mbus_offset + new_dbuf_state->ddb[pipe].end;
703 
704 	drm_dbg_kms(&i915->drm,
705 		    "[CRTC:%d:%s] dbuf slices 0x%x -> 0x%x, ddb (%d - %d) -> (%d - %d), active pipes 0x%x -> 0x%x\n",
706 		    crtc->base.base.id, crtc->base.name,
707 		    old_dbuf_state->slices[pipe], new_dbuf_state->slices[pipe],
708 		    old_dbuf_state->ddb[pipe].start, old_dbuf_state->ddb[pipe].end,
709 		    new_dbuf_state->ddb[pipe].start, new_dbuf_state->ddb[pipe].end,
710 		    old_dbuf_state->active_pipes, new_dbuf_state->active_pipes);
711 
712 	return 0;
713 }
714 
715 static int skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
716 				 int width, const struct drm_format_info *format,
717 				 u64 modifier, unsigned int rotation,
718 				 u32 plane_pixel_rate, struct skl_wm_params *wp,
719 				 int color_plane);
720 
721 static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
722 				 struct intel_plane *plane,
723 				 int level,
724 				 unsigned int latency,
725 				 const struct skl_wm_params *wp,
726 				 const struct skl_wm_level *result_prev,
727 				 struct skl_wm_level *result /* out */);
728 
skl_wm_latency(struct drm_i915_private * i915,int level,const struct skl_wm_params * wp)729 static unsigned int skl_wm_latency(struct drm_i915_private *i915, int level,
730 				   const struct skl_wm_params *wp)
731 {
732 	unsigned int latency = i915->display.wm.skl_latency[level];
733 
734 	if (latency == 0)
735 		return 0;
736 
737 	/*
738 	 * WaIncreaseLatencyIPCEnabled: kbl,cfl
739 	 * Display WA #1141: kbl,cfl
740 	 */
741 	if ((IS_KABYLAKE(i915) || IS_COFFEELAKE(i915) || IS_COMETLAKE(i915)) &&
742 	    skl_watermark_ipc_enabled(i915))
743 		latency += 4;
744 
745 	if (skl_needs_memory_bw_wa(i915) && wp && wp->x_tiled)
746 		latency += 15;
747 
748 	return latency;
749 }
750 
751 static unsigned int
skl_cursor_allocation(const struct intel_crtc_state * crtc_state,int num_active)752 skl_cursor_allocation(const struct intel_crtc_state *crtc_state,
753 		      int num_active)
754 {
755 	struct intel_plane *plane = to_intel_plane(crtc_state->uapi.crtc->cursor);
756 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
757 	struct skl_wm_level wm = {};
758 	int ret, min_ddb_alloc = 0;
759 	struct skl_wm_params wp;
760 	int level;
761 
762 	ret = skl_compute_wm_params(crtc_state, 256,
763 				    drm_format_info(DRM_FORMAT_ARGB8888),
764 				    DRM_FORMAT_MOD_LINEAR,
765 				    DRM_MODE_ROTATE_0,
766 				    crtc_state->pixel_rate, &wp, 0);
767 	drm_WARN_ON(&i915->drm, ret);
768 
769 	for (level = 0; level < i915->display.wm.num_levels; level++) {
770 		unsigned int latency = skl_wm_latency(i915, level, &wp);
771 
772 		skl_compute_plane_wm(crtc_state, plane, level, latency, &wp, &wm, &wm);
773 		if (wm.min_ddb_alloc == U16_MAX)
774 			break;
775 
776 		min_ddb_alloc = wm.min_ddb_alloc;
777 	}
778 
779 	return max(num_active == 1 ? 32 : 8, min_ddb_alloc);
780 }
781 
skl_ddb_entry_init_from_hw(struct skl_ddb_entry * entry,u32 reg)782 static void skl_ddb_entry_init_from_hw(struct skl_ddb_entry *entry, u32 reg)
783 {
784 	skl_ddb_entry_init(entry,
785 			   REG_FIELD_GET(PLANE_BUF_START_MASK, reg),
786 			   REG_FIELD_GET(PLANE_BUF_END_MASK, reg));
787 	if (entry->end)
788 		entry->end++;
789 }
790 
791 static void
skl_ddb_get_hw_plane_state(struct drm_i915_private * i915,const enum pipe pipe,const enum plane_id plane_id,struct skl_ddb_entry * ddb,struct skl_ddb_entry * ddb_y)792 skl_ddb_get_hw_plane_state(struct drm_i915_private *i915,
793 			   const enum pipe pipe,
794 			   const enum plane_id plane_id,
795 			   struct skl_ddb_entry *ddb,
796 			   struct skl_ddb_entry *ddb_y)
797 {
798 	u32 val;
799 
800 	/* Cursor doesn't support NV12/planar, so no extra calculation needed */
801 	if (plane_id == PLANE_CURSOR) {
802 		val = intel_de_read(i915, CUR_BUF_CFG(pipe));
803 		skl_ddb_entry_init_from_hw(ddb, val);
804 		return;
805 	}
806 
807 	val = intel_de_read(i915, PLANE_BUF_CFG(pipe, plane_id));
808 	skl_ddb_entry_init_from_hw(ddb, val);
809 
810 	if (DISPLAY_VER(i915) >= 11)
811 		return;
812 
813 	val = intel_de_read(i915, PLANE_NV12_BUF_CFG(pipe, plane_id));
814 	skl_ddb_entry_init_from_hw(ddb_y, val);
815 }
816 
skl_pipe_ddb_get_hw_state(struct intel_crtc * crtc,struct skl_ddb_entry * ddb,struct skl_ddb_entry * ddb_y)817 static void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
818 				      struct skl_ddb_entry *ddb,
819 				      struct skl_ddb_entry *ddb_y)
820 {
821 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
822 	enum intel_display_power_domain power_domain;
823 	enum pipe pipe = crtc->pipe;
824 	intel_wakeref_t wakeref;
825 	enum plane_id plane_id;
826 
827 	power_domain = POWER_DOMAIN_PIPE(pipe);
828 	wakeref = intel_display_power_get_if_enabled(i915, power_domain);
829 	if (!wakeref)
830 		return;
831 
832 	for_each_plane_id_on_crtc(crtc, plane_id)
833 		skl_ddb_get_hw_plane_state(i915, pipe,
834 					   plane_id,
835 					   &ddb[plane_id],
836 					   &ddb_y[plane_id]);
837 
838 	intel_display_power_put(i915, power_domain, wakeref);
839 }
840 
841 struct dbuf_slice_conf_entry {
842 	u8 active_pipes;
843 	u8 dbuf_mask[I915_MAX_PIPES];
844 	bool join_mbus;
845 };
846 
847 /*
848  * Table taken from Bspec 12716
849  * Pipes do have some preferred DBuf slice affinity,
850  * plus there are some hardcoded requirements on how
851  * those should be distributed for multipipe scenarios.
852  * For more DBuf slices algorithm can get even more messy
853  * and less readable, so decided to use a table almost
854  * as is from BSpec itself - that way it is at least easier
855  * to compare, change and check.
856  */
857 static const struct dbuf_slice_conf_entry icl_allowed_dbufs[] =
858 /* Autogenerated with igt/tools/intel_dbuf_map tool: */
859 {
860 	{
861 		.active_pipes = BIT(PIPE_A),
862 		.dbuf_mask = {
863 			[PIPE_A] = BIT(DBUF_S1),
864 		},
865 	},
866 	{
867 		.active_pipes = BIT(PIPE_B),
868 		.dbuf_mask = {
869 			[PIPE_B] = BIT(DBUF_S1),
870 		},
871 	},
872 	{
873 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
874 		.dbuf_mask = {
875 			[PIPE_A] = BIT(DBUF_S1),
876 			[PIPE_B] = BIT(DBUF_S2),
877 		},
878 	},
879 	{
880 		.active_pipes = BIT(PIPE_C),
881 		.dbuf_mask = {
882 			[PIPE_C] = BIT(DBUF_S2),
883 		},
884 	},
885 	{
886 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_C),
887 		.dbuf_mask = {
888 			[PIPE_A] = BIT(DBUF_S1),
889 			[PIPE_C] = BIT(DBUF_S2),
890 		},
891 	},
892 	{
893 		.active_pipes = BIT(PIPE_B) | BIT(PIPE_C),
894 		.dbuf_mask = {
895 			[PIPE_B] = BIT(DBUF_S1),
896 			[PIPE_C] = BIT(DBUF_S2),
897 		},
898 	},
899 	{
900 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
901 		.dbuf_mask = {
902 			[PIPE_A] = BIT(DBUF_S1),
903 			[PIPE_B] = BIT(DBUF_S1),
904 			[PIPE_C] = BIT(DBUF_S2),
905 		},
906 	},
907 	{}
908 };
909 
910 /*
911  * Table taken from Bspec 49255
912  * Pipes do have some preferred DBuf slice affinity,
913  * plus there are some hardcoded requirements on how
914  * those should be distributed for multipipe scenarios.
915  * For more DBuf slices algorithm can get even more messy
916  * and less readable, so decided to use a table almost
917  * as is from BSpec itself - that way it is at least easier
918  * to compare, change and check.
919  */
920 static const struct dbuf_slice_conf_entry tgl_allowed_dbufs[] =
921 /* Autogenerated with igt/tools/intel_dbuf_map tool: */
922 {
923 	{
924 		.active_pipes = BIT(PIPE_A),
925 		.dbuf_mask = {
926 			[PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
927 		},
928 	},
929 	{
930 		.active_pipes = BIT(PIPE_B),
931 		.dbuf_mask = {
932 			[PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
933 		},
934 	},
935 	{
936 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
937 		.dbuf_mask = {
938 			[PIPE_A] = BIT(DBUF_S2),
939 			[PIPE_B] = BIT(DBUF_S1),
940 		},
941 	},
942 	{
943 		.active_pipes = BIT(PIPE_C),
944 		.dbuf_mask = {
945 			[PIPE_C] = BIT(DBUF_S2) | BIT(DBUF_S1),
946 		},
947 	},
948 	{
949 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_C),
950 		.dbuf_mask = {
951 			[PIPE_A] = BIT(DBUF_S1),
952 			[PIPE_C] = BIT(DBUF_S2),
953 		},
954 	},
955 	{
956 		.active_pipes = BIT(PIPE_B) | BIT(PIPE_C),
957 		.dbuf_mask = {
958 			[PIPE_B] = BIT(DBUF_S1),
959 			[PIPE_C] = BIT(DBUF_S2),
960 		},
961 	},
962 	{
963 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
964 		.dbuf_mask = {
965 			[PIPE_A] = BIT(DBUF_S1),
966 			[PIPE_B] = BIT(DBUF_S1),
967 			[PIPE_C] = BIT(DBUF_S2),
968 		},
969 	},
970 	{
971 		.active_pipes = BIT(PIPE_D),
972 		.dbuf_mask = {
973 			[PIPE_D] = BIT(DBUF_S2) | BIT(DBUF_S1),
974 		},
975 	},
976 	{
977 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_D),
978 		.dbuf_mask = {
979 			[PIPE_A] = BIT(DBUF_S1),
980 			[PIPE_D] = BIT(DBUF_S2),
981 		},
982 	},
983 	{
984 		.active_pipes = BIT(PIPE_B) | BIT(PIPE_D),
985 		.dbuf_mask = {
986 			[PIPE_B] = BIT(DBUF_S1),
987 			[PIPE_D] = BIT(DBUF_S2),
988 		},
989 	},
990 	{
991 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_D),
992 		.dbuf_mask = {
993 			[PIPE_A] = BIT(DBUF_S1),
994 			[PIPE_B] = BIT(DBUF_S1),
995 			[PIPE_D] = BIT(DBUF_S2),
996 		},
997 	},
998 	{
999 		.active_pipes = BIT(PIPE_C) | BIT(PIPE_D),
1000 		.dbuf_mask = {
1001 			[PIPE_C] = BIT(DBUF_S1),
1002 			[PIPE_D] = BIT(DBUF_S2),
1003 		},
1004 	},
1005 	{
1006 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_C) | BIT(PIPE_D),
1007 		.dbuf_mask = {
1008 			[PIPE_A] = BIT(DBUF_S1),
1009 			[PIPE_C] = BIT(DBUF_S2),
1010 			[PIPE_D] = BIT(DBUF_S2),
1011 		},
1012 	},
1013 	{
1014 		.active_pipes = BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
1015 		.dbuf_mask = {
1016 			[PIPE_B] = BIT(DBUF_S1),
1017 			[PIPE_C] = BIT(DBUF_S2),
1018 			[PIPE_D] = BIT(DBUF_S2),
1019 		},
1020 	},
1021 	{
1022 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
1023 		.dbuf_mask = {
1024 			[PIPE_A] = BIT(DBUF_S1),
1025 			[PIPE_B] = BIT(DBUF_S1),
1026 			[PIPE_C] = BIT(DBUF_S2),
1027 			[PIPE_D] = BIT(DBUF_S2),
1028 		},
1029 	},
1030 	{}
1031 };
1032 
1033 static const struct dbuf_slice_conf_entry dg2_allowed_dbufs[] = {
1034 	{
1035 		.active_pipes = BIT(PIPE_A),
1036 		.dbuf_mask = {
1037 			[PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
1038 		},
1039 	},
1040 	{
1041 		.active_pipes = BIT(PIPE_B),
1042 		.dbuf_mask = {
1043 			[PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
1044 		},
1045 	},
1046 	{
1047 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
1048 		.dbuf_mask = {
1049 			[PIPE_A] = BIT(DBUF_S1),
1050 			[PIPE_B] = BIT(DBUF_S2),
1051 		},
1052 	},
1053 	{
1054 		.active_pipes = BIT(PIPE_C),
1055 		.dbuf_mask = {
1056 			[PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
1057 		},
1058 	},
1059 	{
1060 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_C),
1061 		.dbuf_mask = {
1062 			[PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
1063 			[PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
1064 		},
1065 	},
1066 	{
1067 		.active_pipes = BIT(PIPE_B) | BIT(PIPE_C),
1068 		.dbuf_mask = {
1069 			[PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
1070 			[PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
1071 		},
1072 	},
1073 	{
1074 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
1075 		.dbuf_mask = {
1076 			[PIPE_A] = BIT(DBUF_S1),
1077 			[PIPE_B] = BIT(DBUF_S2),
1078 			[PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
1079 		},
1080 	},
1081 	{
1082 		.active_pipes = BIT(PIPE_D),
1083 		.dbuf_mask = {
1084 			[PIPE_D] = BIT(DBUF_S3) | BIT(DBUF_S4),
1085 		},
1086 	},
1087 	{
1088 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_D),
1089 		.dbuf_mask = {
1090 			[PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
1091 			[PIPE_D] = BIT(DBUF_S3) | BIT(DBUF_S4),
1092 		},
1093 	},
1094 	{
1095 		.active_pipes = BIT(PIPE_B) | BIT(PIPE_D),
1096 		.dbuf_mask = {
1097 			[PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
1098 			[PIPE_D] = BIT(DBUF_S3) | BIT(DBUF_S4),
1099 		},
1100 	},
1101 	{
1102 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_D),
1103 		.dbuf_mask = {
1104 			[PIPE_A] = BIT(DBUF_S1),
1105 			[PIPE_B] = BIT(DBUF_S2),
1106 			[PIPE_D] = BIT(DBUF_S3) | BIT(DBUF_S4),
1107 		},
1108 	},
1109 	{
1110 		.active_pipes = BIT(PIPE_C) | BIT(PIPE_D),
1111 		.dbuf_mask = {
1112 			[PIPE_C] = BIT(DBUF_S3),
1113 			[PIPE_D] = BIT(DBUF_S4),
1114 		},
1115 	},
1116 	{
1117 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_C) | BIT(PIPE_D),
1118 		.dbuf_mask = {
1119 			[PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
1120 			[PIPE_C] = BIT(DBUF_S3),
1121 			[PIPE_D] = BIT(DBUF_S4),
1122 		},
1123 	},
1124 	{
1125 		.active_pipes = BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
1126 		.dbuf_mask = {
1127 			[PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
1128 			[PIPE_C] = BIT(DBUF_S3),
1129 			[PIPE_D] = BIT(DBUF_S4),
1130 		},
1131 	},
1132 	{
1133 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
1134 		.dbuf_mask = {
1135 			[PIPE_A] = BIT(DBUF_S1),
1136 			[PIPE_B] = BIT(DBUF_S2),
1137 			[PIPE_C] = BIT(DBUF_S3),
1138 			[PIPE_D] = BIT(DBUF_S4),
1139 		},
1140 	},
1141 	{}
1142 };
1143 
1144 static const struct dbuf_slice_conf_entry adlp_allowed_dbufs[] = {
1145 	/*
1146 	 * Keep the join_mbus cases first so check_mbus_joined()
1147 	 * will prefer them over the !join_mbus cases.
1148 	 */
1149 	{
1150 		.active_pipes = BIT(PIPE_A),
1151 		.dbuf_mask = {
1152 			[PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2) | BIT(DBUF_S3) | BIT(DBUF_S4),
1153 		},
1154 		.join_mbus = true,
1155 	},
1156 	{
1157 		.active_pipes = BIT(PIPE_B),
1158 		.dbuf_mask = {
1159 			[PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2) | BIT(DBUF_S3) | BIT(DBUF_S4),
1160 		},
1161 		.join_mbus = true,
1162 	},
1163 	{
1164 		.active_pipes = BIT(PIPE_A),
1165 		.dbuf_mask = {
1166 			[PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
1167 		},
1168 		.join_mbus = false,
1169 	},
1170 	{
1171 		.active_pipes = BIT(PIPE_B),
1172 		.dbuf_mask = {
1173 			[PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
1174 		},
1175 		.join_mbus = false,
1176 	},
1177 	{
1178 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
1179 		.dbuf_mask = {
1180 			[PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
1181 			[PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
1182 		},
1183 	},
1184 	{
1185 		.active_pipes = BIT(PIPE_C),
1186 		.dbuf_mask = {
1187 			[PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
1188 		},
1189 	},
1190 	{
1191 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_C),
1192 		.dbuf_mask = {
1193 			[PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
1194 			[PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
1195 		},
1196 	},
1197 	{
1198 		.active_pipes = BIT(PIPE_B) | BIT(PIPE_C),
1199 		.dbuf_mask = {
1200 			[PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
1201 			[PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
1202 		},
1203 	},
1204 	{
1205 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
1206 		.dbuf_mask = {
1207 			[PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
1208 			[PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
1209 			[PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
1210 		},
1211 	},
1212 	{
1213 		.active_pipes = BIT(PIPE_D),
1214 		.dbuf_mask = {
1215 			[PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
1216 		},
1217 	},
1218 	{
1219 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_D),
1220 		.dbuf_mask = {
1221 			[PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
1222 			[PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
1223 		},
1224 	},
1225 	{
1226 		.active_pipes = BIT(PIPE_B) | BIT(PIPE_D),
1227 		.dbuf_mask = {
1228 			[PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
1229 			[PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
1230 		},
1231 	},
1232 	{
1233 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_D),
1234 		.dbuf_mask = {
1235 			[PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
1236 			[PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
1237 			[PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
1238 		},
1239 	},
1240 	{
1241 		.active_pipes = BIT(PIPE_C) | BIT(PIPE_D),
1242 		.dbuf_mask = {
1243 			[PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
1244 			[PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
1245 		},
1246 	},
1247 	{
1248 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_C) | BIT(PIPE_D),
1249 		.dbuf_mask = {
1250 			[PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
1251 			[PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
1252 			[PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
1253 		},
1254 	},
1255 	{
1256 		.active_pipes = BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
1257 		.dbuf_mask = {
1258 			[PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
1259 			[PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
1260 			[PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
1261 		},
1262 	},
1263 	{
1264 		.active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
1265 		.dbuf_mask = {
1266 			[PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
1267 			[PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
1268 			[PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
1269 			[PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
1270 		},
1271 	},
1272 	{}
1273 
1274 };
1275 
check_mbus_joined(u8 active_pipes,const struct dbuf_slice_conf_entry * dbuf_slices)1276 static bool check_mbus_joined(u8 active_pipes,
1277 			      const struct dbuf_slice_conf_entry *dbuf_slices)
1278 {
1279 	int i;
1280 
1281 	for (i = 0; dbuf_slices[i].active_pipes != 0; i++) {
1282 		if (dbuf_slices[i].active_pipes == active_pipes)
1283 			return dbuf_slices[i].join_mbus;
1284 	}
1285 	return false;
1286 }
1287 
adlp_check_mbus_joined(u8 active_pipes)1288 static bool adlp_check_mbus_joined(u8 active_pipes)
1289 {
1290 	return check_mbus_joined(active_pipes, adlp_allowed_dbufs);
1291 }
1292 
compute_dbuf_slices(enum pipe pipe,u8 active_pipes,bool join_mbus,const struct dbuf_slice_conf_entry * dbuf_slices)1293 static u8 compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus,
1294 			      const struct dbuf_slice_conf_entry *dbuf_slices)
1295 {
1296 	int i;
1297 
1298 	for (i = 0; dbuf_slices[i].active_pipes != 0; i++) {
1299 		if (dbuf_slices[i].active_pipes == active_pipes &&
1300 		    dbuf_slices[i].join_mbus == join_mbus)
1301 			return dbuf_slices[i].dbuf_mask[pipe];
1302 	}
1303 	return 0;
1304 }
1305 
1306 /*
1307  * This function finds an entry with same enabled pipe configuration and
1308  * returns correspondent DBuf slice mask as stated in BSpec for particular
1309  * platform.
1310  */
icl_compute_dbuf_slices(enum pipe pipe,u8 active_pipes,bool join_mbus)1311 static u8 icl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus)
1312 {
1313 	/*
1314 	 * FIXME: For ICL this is still a bit unclear as prev BSpec revision
1315 	 * required calculating "pipe ratio" in order to determine
1316 	 * if one or two slices can be used for single pipe configurations
1317 	 * as additional constraint to the existing table.
1318 	 * However based on recent info, it should be not "pipe ratio"
1319 	 * but rather ratio between pixel_rate and cdclk with additional
1320 	 * constants, so for now we are using only table until this is
1321 	 * clarified. Also this is the reason why crtc_state param is
1322 	 * still here - we will need it once those additional constraints
1323 	 * pop up.
1324 	 */
1325 	return compute_dbuf_slices(pipe, active_pipes, join_mbus,
1326 				   icl_allowed_dbufs);
1327 }
1328 
tgl_compute_dbuf_slices(enum pipe pipe,u8 active_pipes,bool join_mbus)1329 static u8 tgl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus)
1330 {
1331 	return compute_dbuf_slices(pipe, active_pipes, join_mbus,
1332 				   tgl_allowed_dbufs);
1333 }
1334 
adlp_compute_dbuf_slices(enum pipe pipe,u8 active_pipes,bool join_mbus)1335 static u8 adlp_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus)
1336 {
1337 	return compute_dbuf_slices(pipe, active_pipes, join_mbus,
1338 				   adlp_allowed_dbufs);
1339 }
1340 
dg2_compute_dbuf_slices(enum pipe pipe,u8 active_pipes,bool join_mbus)1341 static u8 dg2_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus)
1342 {
1343 	return compute_dbuf_slices(pipe, active_pipes, join_mbus,
1344 				   dg2_allowed_dbufs);
1345 }
1346 
skl_compute_dbuf_slices(struct intel_crtc * crtc,u8 active_pipes,bool join_mbus)1347 static u8 skl_compute_dbuf_slices(struct intel_crtc *crtc, u8 active_pipes, bool join_mbus)
1348 {
1349 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
1350 	enum pipe pipe = crtc->pipe;
1351 
1352 	if (IS_DG2(i915))
1353 		return dg2_compute_dbuf_slices(pipe, active_pipes, join_mbus);
1354 	else if (DISPLAY_VER(i915) >= 13)
1355 		return adlp_compute_dbuf_slices(pipe, active_pipes, join_mbus);
1356 	else if (DISPLAY_VER(i915) == 12)
1357 		return tgl_compute_dbuf_slices(pipe, active_pipes, join_mbus);
1358 	else if (DISPLAY_VER(i915) == 11)
1359 		return icl_compute_dbuf_slices(pipe, active_pipes, join_mbus);
1360 	/*
1361 	 * For anything else just return one slice yet.
1362 	 * Should be extended for other platforms.
1363 	 */
1364 	return active_pipes & BIT(pipe) ? BIT(DBUF_S1) : 0;
1365 }
1366 
1367 static bool
use_minimal_wm0_only(const struct intel_crtc_state * crtc_state,struct intel_plane * plane)1368 use_minimal_wm0_only(const struct intel_crtc_state *crtc_state,
1369 		     struct intel_plane *plane)
1370 {
1371 	struct drm_i915_private *i915 = to_i915(plane->base.dev);
1372 
1373 	return DISPLAY_VER(i915) >= 13 &&
1374 	       crtc_state->uapi.async_flip &&
1375 	       plane->async_flip;
1376 }
1377 
1378 static u64
skl_total_relative_data_rate(const struct intel_crtc_state * crtc_state)1379 skl_total_relative_data_rate(const struct intel_crtc_state *crtc_state)
1380 {
1381 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1382 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
1383 	enum plane_id plane_id;
1384 	u64 data_rate = 0;
1385 
1386 	for_each_plane_id_on_crtc(crtc, plane_id) {
1387 		if (plane_id == PLANE_CURSOR)
1388 			continue;
1389 
1390 		data_rate += crtc_state->rel_data_rate[plane_id];
1391 
1392 		if (DISPLAY_VER(i915) < 11)
1393 			data_rate += crtc_state->rel_data_rate_y[plane_id];
1394 	}
1395 
1396 	return data_rate;
1397 }
1398 
1399 const struct skl_wm_level *
skl_plane_wm_level(const struct skl_pipe_wm * pipe_wm,enum plane_id plane_id,int level)1400 skl_plane_wm_level(const struct skl_pipe_wm *pipe_wm,
1401 		   enum plane_id plane_id,
1402 		   int level)
1403 {
1404 	const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
1405 
1406 	if (level == 0 && pipe_wm->use_sagv_wm)
1407 		return &wm->sagv.wm0;
1408 
1409 	return &wm->wm[level];
1410 }
1411 
1412 const struct skl_wm_level *
skl_plane_trans_wm(const struct skl_pipe_wm * pipe_wm,enum plane_id plane_id)1413 skl_plane_trans_wm(const struct skl_pipe_wm *pipe_wm,
1414 		   enum plane_id plane_id)
1415 {
1416 	const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
1417 
1418 	if (pipe_wm->use_sagv_wm)
1419 		return &wm->sagv.trans_wm;
1420 
1421 	return &wm->trans_wm;
1422 }
1423 
1424 /*
1425  * We only disable the watermarks for each plane if
1426  * they exceed the ddb allocation of said plane. This
1427  * is done so that we don't end up touching cursor
1428  * watermarks needlessly when some other plane reduces
1429  * our max possible watermark level.
1430  *
1431  * Bspec has this to say about the PLANE_WM enable bit:
1432  * "All the watermarks at this level for all enabled
1433  *  planes must be enabled before the level will be used."
1434  * So this is actually safe to do.
1435  */
1436 static void
skl_check_wm_level(struct skl_wm_level * wm,const struct skl_ddb_entry * ddb)1437 skl_check_wm_level(struct skl_wm_level *wm, const struct skl_ddb_entry *ddb)
1438 {
1439 	if (wm->min_ddb_alloc > skl_ddb_entry_size(ddb))
1440 		memset(wm, 0, sizeof(*wm));
1441 }
1442 
1443 static void
skl_check_nv12_wm_level(struct skl_wm_level * wm,struct skl_wm_level * uv_wm,const struct skl_ddb_entry * ddb_y,const struct skl_ddb_entry * ddb)1444 skl_check_nv12_wm_level(struct skl_wm_level *wm, struct skl_wm_level *uv_wm,
1445 			const struct skl_ddb_entry *ddb_y, const struct skl_ddb_entry *ddb)
1446 {
1447 	if (wm->min_ddb_alloc > skl_ddb_entry_size(ddb_y) ||
1448 	    uv_wm->min_ddb_alloc > skl_ddb_entry_size(ddb)) {
1449 		memset(wm, 0, sizeof(*wm));
1450 		memset(uv_wm, 0, sizeof(*uv_wm));
1451 	}
1452 }
1453 
skl_need_wm_copy_wa(struct drm_i915_private * i915,int level,const struct skl_plane_wm * wm)1454 static bool skl_need_wm_copy_wa(struct drm_i915_private *i915, int level,
1455 				const struct skl_plane_wm *wm)
1456 {
1457 	/*
1458 	 * Wa_1408961008:icl, ehl
1459 	 * Wa_14012656716:tgl, adl
1460 	 * Wa_14017887344:icl
1461 	 * Wa_14017868169:adl, tgl
1462 	 * Due to some power saving optimizations, different subsystems
1463 	 * like PSR, might still use even disabled wm level registers,
1464 	 * for "reference", so lets keep at least the values sane.
1465 	 * Considering amount of WA requiring us to do similar things, was
1466 	 * decided to simply do it for all of the platforms, as those wm
1467 	 * levels are disabled, this isn't going to do harm anyway.
1468 	 */
1469 	return level > 0 && !wm->wm[level].enable;
1470 }
1471 
1472 struct skl_plane_ddb_iter {
1473 	u64 data_rate;
1474 	u16 start, size;
1475 };
1476 
1477 static void
skl_allocate_plane_ddb(struct skl_plane_ddb_iter * iter,struct skl_ddb_entry * ddb,const struct skl_wm_level * wm,u64 data_rate)1478 skl_allocate_plane_ddb(struct skl_plane_ddb_iter *iter,
1479 		       struct skl_ddb_entry *ddb,
1480 		       const struct skl_wm_level *wm,
1481 		       u64 data_rate)
1482 {
1483 	u16 size, extra = 0;
1484 
1485 	if (data_rate) {
1486 		extra = min_t(u16, iter->size,
1487 			      DIV64_U64_ROUND_UP(iter->size * data_rate,
1488 						 iter->data_rate));
1489 		iter->size -= extra;
1490 		iter->data_rate -= data_rate;
1491 	}
1492 
1493 	/*
1494 	 * Keep ddb entry of all disabled planes explicitly zeroed
1495 	 * to avoid skl_ddb_add_affected_planes() adding them to
1496 	 * the state when other planes change their allocations.
1497 	 */
1498 	size = wm->min_ddb_alloc + extra;
1499 	if (size)
1500 		iter->start = skl_ddb_entry_init(ddb, iter->start,
1501 						 iter->start + size);
1502 }
1503 
1504 static int
skl_crtc_allocate_plane_ddb(struct intel_atomic_state * state,struct intel_crtc * crtc)1505 skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
1506 			    struct intel_crtc *crtc)
1507 {
1508 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
1509 	struct intel_crtc_state *crtc_state =
1510 		intel_atomic_get_new_crtc_state(state, crtc);
1511 	const struct intel_dbuf_state *dbuf_state =
1512 		intel_atomic_get_new_dbuf_state(state);
1513 	const struct skl_ddb_entry *alloc = &dbuf_state->ddb[crtc->pipe];
1514 	int num_active = hweight8(dbuf_state->active_pipes);
1515 	struct skl_plane_ddb_iter iter;
1516 	enum plane_id plane_id;
1517 	u16 cursor_size;
1518 	u32 blocks;
1519 	int level;
1520 
1521 	/* Clear the partitioning for disabled planes. */
1522 	memset(crtc_state->wm.skl.plane_ddb, 0, sizeof(crtc_state->wm.skl.plane_ddb));
1523 	memset(crtc_state->wm.skl.plane_ddb_y, 0, sizeof(crtc_state->wm.skl.plane_ddb_y));
1524 
1525 	if (!crtc_state->hw.active)
1526 		return 0;
1527 
1528 	iter.start = alloc->start;
1529 	iter.size = skl_ddb_entry_size(alloc);
1530 	if (iter.size == 0)
1531 		return 0;
1532 
1533 	/* Allocate fixed number of blocks for cursor. */
1534 	cursor_size = skl_cursor_allocation(crtc_state, num_active);
1535 	iter.size -= cursor_size;
1536 	skl_ddb_entry_init(&crtc_state->wm.skl.plane_ddb[PLANE_CURSOR],
1537 			   alloc->end - cursor_size, alloc->end);
1538 
1539 	iter.data_rate = skl_total_relative_data_rate(crtc_state);
1540 
1541 	/*
1542 	 * Find the highest watermark level for which we can satisfy the block
1543 	 * requirement of active planes.
1544 	 */
1545 	for (level = i915->display.wm.num_levels - 1; level >= 0; level--) {
1546 		blocks = 0;
1547 		for_each_plane_id_on_crtc(crtc, plane_id) {
1548 			const struct skl_plane_wm *wm =
1549 				&crtc_state->wm.skl.optimal.planes[plane_id];
1550 
1551 			if (plane_id == PLANE_CURSOR) {
1552 				const struct skl_ddb_entry *ddb =
1553 					&crtc_state->wm.skl.plane_ddb[plane_id];
1554 
1555 				if (wm->wm[level].min_ddb_alloc > skl_ddb_entry_size(ddb)) {
1556 					drm_WARN_ON(&i915->drm,
1557 						    wm->wm[level].min_ddb_alloc != U16_MAX);
1558 					blocks = U32_MAX;
1559 					break;
1560 				}
1561 				continue;
1562 			}
1563 
1564 			blocks += wm->wm[level].min_ddb_alloc;
1565 			blocks += wm->uv_wm[level].min_ddb_alloc;
1566 		}
1567 
1568 		if (blocks <= iter.size) {
1569 			iter.size -= blocks;
1570 			break;
1571 		}
1572 	}
1573 
1574 	if (level < 0) {
1575 		drm_dbg_kms(&i915->drm,
1576 			    "Requested display configuration exceeds system DDB limitations");
1577 		drm_dbg_kms(&i915->drm, "minimum required %d/%d\n",
1578 			    blocks, iter.size);
1579 		return -EINVAL;
1580 	}
1581 
1582 	/* avoid the WARN later when we don't allocate any extra DDB */
1583 	if (iter.data_rate == 0)
1584 		iter.size = 0;
1585 
1586 	/*
1587 	 * Grant each plane the blocks it requires at the highest achievable
1588 	 * watermark level, plus an extra share of the leftover blocks
1589 	 * proportional to its relative data rate.
1590 	 */
1591 	for_each_plane_id_on_crtc(crtc, plane_id) {
1592 		struct skl_ddb_entry *ddb =
1593 			&crtc_state->wm.skl.plane_ddb[plane_id];
1594 		struct skl_ddb_entry *ddb_y =
1595 			&crtc_state->wm.skl.plane_ddb_y[plane_id];
1596 		const struct skl_plane_wm *wm =
1597 			&crtc_state->wm.skl.optimal.planes[plane_id];
1598 
1599 		if (plane_id == PLANE_CURSOR)
1600 			continue;
1601 
1602 		if (DISPLAY_VER(i915) < 11 &&
1603 		    crtc_state->nv12_planes & BIT(plane_id)) {
1604 			skl_allocate_plane_ddb(&iter, ddb_y, &wm->wm[level],
1605 					       crtc_state->rel_data_rate_y[plane_id]);
1606 			skl_allocate_plane_ddb(&iter, ddb, &wm->uv_wm[level],
1607 					       crtc_state->rel_data_rate[plane_id]);
1608 		} else {
1609 			skl_allocate_plane_ddb(&iter, ddb, &wm->wm[level],
1610 					       crtc_state->rel_data_rate[plane_id]);
1611 		}
1612 	}
1613 	drm_WARN_ON(&i915->drm, iter.size != 0 || iter.data_rate != 0);
1614 
1615 	/*
1616 	 * When we calculated watermark values we didn't know how high
1617 	 * of a level we'd actually be able to hit, so we just marked
1618 	 * all levels as "enabled."  Go back now and disable the ones
1619 	 * that aren't actually possible.
1620 	 */
1621 	for (level++; level < i915->display.wm.num_levels; level++) {
1622 		for_each_plane_id_on_crtc(crtc, plane_id) {
1623 			const struct skl_ddb_entry *ddb =
1624 				&crtc_state->wm.skl.plane_ddb[plane_id];
1625 			const struct skl_ddb_entry *ddb_y =
1626 				&crtc_state->wm.skl.plane_ddb_y[plane_id];
1627 			struct skl_plane_wm *wm =
1628 				&crtc_state->wm.skl.optimal.planes[plane_id];
1629 
1630 			if (DISPLAY_VER(i915) < 11 &&
1631 			    crtc_state->nv12_planes & BIT(plane_id))
1632 				skl_check_nv12_wm_level(&wm->wm[level],
1633 							&wm->uv_wm[level],
1634 							ddb_y, ddb);
1635 			else
1636 				skl_check_wm_level(&wm->wm[level], ddb);
1637 
1638 			if (skl_need_wm_copy_wa(i915, level, wm)) {
1639 				wm->wm[level].blocks = wm->wm[level - 1].blocks;
1640 				wm->wm[level].lines = wm->wm[level - 1].lines;
1641 				wm->wm[level].ignore_lines = wm->wm[level - 1].ignore_lines;
1642 			}
1643 		}
1644 	}
1645 
1646 	/*
1647 	 * Go back and disable the transition and SAGV watermarks
1648 	 * if it turns out we don't have enough DDB blocks for them.
1649 	 */
1650 	for_each_plane_id_on_crtc(crtc, plane_id) {
1651 		const struct skl_ddb_entry *ddb =
1652 			&crtc_state->wm.skl.plane_ddb[plane_id];
1653 		const struct skl_ddb_entry *ddb_y =
1654 			&crtc_state->wm.skl.plane_ddb_y[plane_id];
1655 		struct skl_plane_wm *wm =
1656 			&crtc_state->wm.skl.optimal.planes[plane_id];
1657 
1658 		if (DISPLAY_VER(i915) < 11 &&
1659 		    crtc_state->nv12_planes & BIT(plane_id)) {
1660 			skl_check_wm_level(&wm->trans_wm, ddb_y);
1661 		} else {
1662 			WARN_ON(skl_ddb_entry_size(ddb_y));
1663 
1664 			skl_check_wm_level(&wm->trans_wm, ddb);
1665 		}
1666 
1667 		skl_check_wm_level(&wm->sagv.wm0, ddb);
1668 		skl_check_wm_level(&wm->sagv.trans_wm, ddb);
1669 	}
1670 
1671 	return 0;
1672 }
1673 
1674 /*
1675  * The max latency should be 257 (max the punit can code is 255 and we add 2us
1676  * for the read latency) and cpp should always be <= 8, so that
1677  * should allow pixel_rate up to ~2 GHz which seems sufficient since max
1678  * 2xcdclk is 1350 MHz and the pixel rate should never exceed that.
1679  */
1680 static uint_fixed_16_16_t
skl_wm_method1(const struct drm_i915_private * i915,u32 pixel_rate,u8 cpp,u32 latency,u32 dbuf_block_size)1681 skl_wm_method1(const struct drm_i915_private *i915, u32 pixel_rate,
1682 	       u8 cpp, u32 latency, u32 dbuf_block_size)
1683 {
1684 	u32 wm_intermediate_val;
1685 	uint_fixed_16_16_t ret;
1686 
1687 	if (latency == 0)
1688 		return FP_16_16_MAX;
1689 
1690 	wm_intermediate_val = latency * pixel_rate * cpp;
1691 	ret = div_fixed16(wm_intermediate_val, 1000 * dbuf_block_size);
1692 
1693 	if (DISPLAY_VER(i915) >= 10)
1694 		ret = add_fixed16_u32(ret, 1);
1695 
1696 	return ret;
1697 }
1698 
1699 static uint_fixed_16_16_t
skl_wm_method2(u32 pixel_rate,u32 pipe_htotal,u32 latency,uint_fixed_16_16_t plane_blocks_per_line)1700 skl_wm_method2(u32 pixel_rate, u32 pipe_htotal, u32 latency,
1701 	       uint_fixed_16_16_t plane_blocks_per_line)
1702 {
1703 	u32 wm_intermediate_val;
1704 	uint_fixed_16_16_t ret;
1705 
1706 	if (latency == 0)
1707 		return FP_16_16_MAX;
1708 
1709 	wm_intermediate_val = latency * pixel_rate;
1710 	wm_intermediate_val = DIV_ROUND_UP(wm_intermediate_val,
1711 					   pipe_htotal * 1000);
1712 	ret = mul_u32_fixed16(wm_intermediate_val, plane_blocks_per_line);
1713 	return ret;
1714 }
1715 
1716 static uint_fixed_16_16_t
intel_get_linetime_us(const struct intel_crtc_state * crtc_state)1717 intel_get_linetime_us(const struct intel_crtc_state *crtc_state)
1718 {
1719 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1720 	u32 pixel_rate;
1721 	u32 crtc_htotal;
1722 	uint_fixed_16_16_t linetime_us;
1723 
1724 	if (!crtc_state->hw.active)
1725 		return u32_to_fixed16(0);
1726 
1727 	pixel_rate = crtc_state->pixel_rate;
1728 
1729 	if (drm_WARN_ON(&i915->drm, pixel_rate == 0))
1730 		return u32_to_fixed16(0);
1731 
1732 	crtc_htotal = crtc_state->hw.pipe_mode.crtc_htotal;
1733 	linetime_us = div_fixed16(crtc_htotal * 1000, pixel_rate);
1734 
1735 	return linetime_us;
1736 }
1737 
1738 static int
skl_compute_wm_params(const struct intel_crtc_state * crtc_state,int width,const struct drm_format_info * format,u64 modifier,unsigned int rotation,u32 plane_pixel_rate,struct skl_wm_params * wp,int color_plane)1739 skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
1740 		      int width, const struct drm_format_info *format,
1741 		      u64 modifier, unsigned int rotation,
1742 		      u32 plane_pixel_rate, struct skl_wm_params *wp,
1743 		      int color_plane)
1744 {
1745 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1746 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
1747 	u32 interm_pbpl;
1748 
1749 	/* only planar format has two planes */
1750 	if (color_plane == 1 &&
1751 	    !intel_format_info_is_yuv_semiplanar(format, modifier)) {
1752 		drm_dbg_kms(&i915->drm,
1753 			    "Non planar format have single plane\n");
1754 		return -EINVAL;
1755 	}
1756 
1757 	wp->x_tiled = modifier == I915_FORMAT_MOD_X_TILED;
1758 	wp->y_tiled = modifier != I915_FORMAT_MOD_X_TILED &&
1759 		intel_fb_is_tiled_modifier(modifier);
1760 	wp->rc_surface = intel_fb_is_ccs_modifier(modifier);
1761 	wp->is_planar = intel_format_info_is_yuv_semiplanar(format, modifier);
1762 
1763 	wp->width = width;
1764 	if (color_plane == 1 && wp->is_planar)
1765 		wp->width /= 2;
1766 
1767 	wp->cpp = format->cpp[color_plane];
1768 	wp->plane_pixel_rate = plane_pixel_rate;
1769 
1770 	if (DISPLAY_VER(i915) >= 11 &&
1771 	    modifier == I915_FORMAT_MOD_Yf_TILED  && wp->cpp == 1)
1772 		wp->dbuf_block_size = 256;
1773 	else
1774 		wp->dbuf_block_size = 512;
1775 
1776 	if (drm_rotation_90_or_270(rotation)) {
1777 		switch (wp->cpp) {
1778 		case 1:
1779 			wp->y_min_scanlines = 16;
1780 			break;
1781 		case 2:
1782 			wp->y_min_scanlines = 8;
1783 			break;
1784 		case 4:
1785 			wp->y_min_scanlines = 4;
1786 			break;
1787 		default:
1788 			MISSING_CASE(wp->cpp);
1789 			return -EINVAL;
1790 		}
1791 	} else {
1792 		wp->y_min_scanlines = 4;
1793 	}
1794 
1795 	if (skl_needs_memory_bw_wa(i915))
1796 		wp->y_min_scanlines *= 2;
1797 
1798 	wp->plane_bytes_per_line = wp->width * wp->cpp;
1799 	if (wp->y_tiled) {
1800 		interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line *
1801 					   wp->y_min_scanlines,
1802 					   wp->dbuf_block_size);
1803 
1804 		if (DISPLAY_VER(i915) >= 10)
1805 			interm_pbpl++;
1806 
1807 		wp->plane_blocks_per_line = div_fixed16(interm_pbpl,
1808 							wp->y_min_scanlines);
1809 	} else {
1810 		interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line,
1811 					   wp->dbuf_block_size);
1812 
1813 		if (!wp->x_tiled || DISPLAY_VER(i915) >= 10)
1814 			interm_pbpl++;
1815 
1816 		wp->plane_blocks_per_line = u32_to_fixed16(interm_pbpl);
1817 	}
1818 
1819 	wp->y_tile_minimum = mul_u32_fixed16(wp->y_min_scanlines,
1820 					     wp->plane_blocks_per_line);
1821 
1822 	wp->linetime_us = fixed16_to_u32_round_up(intel_get_linetime_us(crtc_state));
1823 
1824 	return 0;
1825 }
1826 
1827 static int
skl_compute_plane_wm_params(const struct intel_crtc_state * crtc_state,const struct intel_plane_state * plane_state,struct skl_wm_params * wp,int color_plane)1828 skl_compute_plane_wm_params(const struct intel_crtc_state *crtc_state,
1829 			    const struct intel_plane_state *plane_state,
1830 			    struct skl_wm_params *wp, int color_plane)
1831 {
1832 	const struct drm_framebuffer *fb = plane_state->hw.fb;
1833 	int width;
1834 
1835 	/*
1836 	 * Src coordinates are already rotated by 270 degrees for
1837 	 * the 90/270 degree plane rotation cases (to match the
1838 	 * GTT mapping), hence no need to account for rotation here.
1839 	 */
1840 	width = drm_rect_width(&plane_state->uapi.src) >> 16;
1841 
1842 	return skl_compute_wm_params(crtc_state, width,
1843 				     fb->format, fb->modifier,
1844 				     plane_state->hw.rotation,
1845 				     intel_plane_pixel_rate(crtc_state, plane_state),
1846 				     wp, color_plane);
1847 }
1848 
skl_wm_has_lines(struct drm_i915_private * i915,int level)1849 static bool skl_wm_has_lines(struct drm_i915_private *i915, int level)
1850 {
1851 	if (DISPLAY_VER(i915) >= 10)
1852 		return true;
1853 
1854 	/* The number of lines are ignored for the level 0 watermark. */
1855 	return level > 0;
1856 }
1857 
skl_wm_max_lines(struct drm_i915_private * i915)1858 static int skl_wm_max_lines(struct drm_i915_private *i915)
1859 {
1860 	if (DISPLAY_VER(i915) >= 13)
1861 		return 255;
1862 	else
1863 		return 31;
1864 }
1865 
skl_compute_plane_wm(const struct intel_crtc_state * crtc_state,struct intel_plane * plane,int level,unsigned int latency,const struct skl_wm_params * wp,const struct skl_wm_level * result_prev,struct skl_wm_level * result)1866 static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
1867 				 struct intel_plane *plane,
1868 				 int level,
1869 				 unsigned int latency,
1870 				 const struct skl_wm_params *wp,
1871 				 const struct skl_wm_level *result_prev,
1872 				 struct skl_wm_level *result /* out */)
1873 {
1874 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1875 	uint_fixed_16_16_t method1, method2;
1876 	uint_fixed_16_16_t selected_result;
1877 	u32 blocks, lines, min_ddb_alloc = 0;
1878 
1879 	if (latency == 0 ||
1880 	    (use_minimal_wm0_only(crtc_state, plane) && level > 0)) {
1881 		/* reject it */
1882 		result->min_ddb_alloc = U16_MAX;
1883 		return;
1884 	}
1885 
1886 	method1 = skl_wm_method1(i915, wp->plane_pixel_rate,
1887 				 wp->cpp, latency, wp->dbuf_block_size);
1888 	method2 = skl_wm_method2(wp->plane_pixel_rate,
1889 				 crtc_state->hw.pipe_mode.crtc_htotal,
1890 				 latency,
1891 				 wp->plane_blocks_per_line);
1892 
1893 	if (wp->y_tiled) {
1894 		selected_result = max_fixed16(method2, wp->y_tile_minimum);
1895 	} else {
1896 		if ((wp->cpp * crtc_state->hw.pipe_mode.crtc_htotal /
1897 		     wp->dbuf_block_size < 1) &&
1898 		     (wp->plane_bytes_per_line / wp->dbuf_block_size < 1)) {
1899 			selected_result = method2;
1900 		} else if (latency >= wp->linetime_us) {
1901 			if (DISPLAY_VER(i915) == 9)
1902 				selected_result = min_fixed16(method1, method2);
1903 			else
1904 				selected_result = method2;
1905 		} else {
1906 			selected_result = method1;
1907 		}
1908 	}
1909 
1910 	blocks = fixed16_to_u32_round_up(selected_result) + 1;
1911 	/*
1912 	 * Lets have blocks at minimum equivalent to plane_blocks_per_line
1913 	 * as there will be at minimum one line for lines configuration. This
1914 	 * is a work around for FIFO underruns observed with resolutions like
1915 	 * 4k 60 Hz in single channel DRAM configurations.
1916 	 *
1917 	 * As per the Bspec 49325, if the ddb allocation can hold at least
1918 	 * one plane_blocks_per_line, we should have selected method2 in
1919 	 * the above logic. Assuming that modern versions have enough dbuf
1920 	 * and method2 guarantees blocks equivalent to at least 1 line,
1921 	 * select the blocks as plane_blocks_per_line.
1922 	 *
1923 	 * TODO: Revisit the logic when we have better understanding on DRAM
1924 	 * channels' impact on the level 0 memory latency and the relevant
1925 	 * wm calculations.
1926 	 */
1927 	if (skl_wm_has_lines(i915, level))
1928 		blocks = max(blocks,
1929 			     fixed16_to_u32_round_up(wp->plane_blocks_per_line));
1930 	lines = div_round_up_fixed16(selected_result,
1931 				     wp->plane_blocks_per_line);
1932 
1933 	if (DISPLAY_VER(i915) == 9) {
1934 		/* Display WA #1125: skl,bxt,kbl */
1935 		if (level == 0 && wp->rc_surface)
1936 			blocks += fixed16_to_u32_round_up(wp->y_tile_minimum);
1937 
1938 		/* Display WA #1126: skl,bxt,kbl */
1939 		if (level >= 1 && level <= 7) {
1940 			if (wp->y_tiled) {
1941 				blocks += fixed16_to_u32_round_up(wp->y_tile_minimum);
1942 				lines += wp->y_min_scanlines;
1943 			} else {
1944 				blocks++;
1945 			}
1946 
1947 			/*
1948 			 * Make sure result blocks for higher latency levels are
1949 			 * at least as high as level below the current level.
1950 			 * Assumption in DDB algorithm optimization for special
1951 			 * cases. Also covers Display WA #1125 for RC.
1952 			 */
1953 			if (result_prev->blocks > blocks)
1954 				blocks = result_prev->blocks;
1955 		}
1956 	}
1957 
1958 	if (DISPLAY_VER(i915) >= 11) {
1959 		if (wp->y_tiled) {
1960 			int extra_lines;
1961 
1962 			if (lines % wp->y_min_scanlines == 0)
1963 				extra_lines = wp->y_min_scanlines;
1964 			else
1965 				extra_lines = wp->y_min_scanlines * 2 -
1966 					lines % wp->y_min_scanlines;
1967 
1968 			min_ddb_alloc = mul_round_up_u32_fixed16(lines + extra_lines,
1969 								 wp->plane_blocks_per_line);
1970 		} else {
1971 			min_ddb_alloc = blocks + DIV_ROUND_UP(blocks, 10);
1972 		}
1973 	}
1974 
1975 	if (!skl_wm_has_lines(i915, level))
1976 		lines = 0;
1977 
1978 	if (lines > skl_wm_max_lines(i915)) {
1979 		/* reject it */
1980 		result->min_ddb_alloc = U16_MAX;
1981 		return;
1982 	}
1983 
1984 	/*
1985 	 * If lines is valid, assume we can use this watermark level
1986 	 * for now.  We'll come back and disable it after we calculate the
1987 	 * DDB allocation if it turns out we don't actually have enough
1988 	 * blocks to satisfy it.
1989 	 */
1990 	result->blocks = blocks;
1991 	result->lines = lines;
1992 	/* Bspec says: value >= plane ddb allocation -> invalid, hence the +1 here */
1993 	result->min_ddb_alloc = max(min_ddb_alloc, blocks) + 1;
1994 	result->enable = true;
1995 
1996 	if (DISPLAY_VER(i915) < 12 && i915->display.sagv.block_time_us)
1997 		result->can_sagv = latency >= i915->display.sagv.block_time_us;
1998 }
1999 
2000 static void
skl_compute_wm_levels(const struct intel_crtc_state * crtc_state,struct intel_plane * plane,const struct skl_wm_params * wm_params,struct skl_wm_level * levels)2001 skl_compute_wm_levels(const struct intel_crtc_state *crtc_state,
2002 		      struct intel_plane *plane,
2003 		      const struct skl_wm_params *wm_params,
2004 		      struct skl_wm_level *levels)
2005 {
2006 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2007 	struct skl_wm_level *result_prev = &levels[0];
2008 	int level;
2009 
2010 	for (level = 0; level < i915->display.wm.num_levels; level++) {
2011 		struct skl_wm_level *result = &levels[level];
2012 		unsigned int latency = skl_wm_latency(i915, level, wm_params);
2013 
2014 		skl_compute_plane_wm(crtc_state, plane, level, latency,
2015 				     wm_params, result_prev, result);
2016 
2017 		result_prev = result;
2018 	}
2019 }
2020 
tgl_compute_sagv_wm(const struct intel_crtc_state * crtc_state,struct intel_plane * plane,const struct skl_wm_params * wm_params,struct skl_plane_wm * plane_wm)2021 static void tgl_compute_sagv_wm(const struct intel_crtc_state *crtc_state,
2022 				struct intel_plane *plane,
2023 				const struct skl_wm_params *wm_params,
2024 				struct skl_plane_wm *plane_wm)
2025 {
2026 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2027 	struct skl_wm_level *sagv_wm = &plane_wm->sagv.wm0;
2028 	struct skl_wm_level *levels = plane_wm->wm;
2029 	unsigned int latency = 0;
2030 
2031 	if (i915->display.sagv.block_time_us)
2032 		latency = i915->display.sagv.block_time_us +
2033 			skl_wm_latency(i915, 0, wm_params);
2034 
2035 	skl_compute_plane_wm(crtc_state, plane, 0, latency,
2036 			     wm_params, &levels[0],
2037 			     sagv_wm);
2038 }
2039 
skl_compute_transition_wm(struct drm_i915_private * i915,struct skl_wm_level * trans_wm,const struct skl_wm_level * wm0,const struct skl_wm_params * wp)2040 static void skl_compute_transition_wm(struct drm_i915_private *i915,
2041 				      struct skl_wm_level *trans_wm,
2042 				      const struct skl_wm_level *wm0,
2043 				      const struct skl_wm_params *wp)
2044 {
2045 	u16 trans_min, trans_amount, trans_y_tile_min;
2046 	u16 wm0_blocks, trans_offset, blocks;
2047 
2048 	/* Transition WM don't make any sense if ipc is disabled */
2049 	if (!skl_watermark_ipc_enabled(i915))
2050 		return;
2051 
2052 	/*
2053 	 * WaDisableTWM:skl,kbl,cfl,bxt
2054 	 * Transition WM are not recommended by HW team for GEN9
2055 	 */
2056 	if (DISPLAY_VER(i915) == 9)
2057 		return;
2058 
2059 	if (DISPLAY_VER(i915) >= 11)
2060 		trans_min = 4;
2061 	else
2062 		trans_min = 14;
2063 
2064 	/* Display WA #1140: glk,cnl */
2065 	if (DISPLAY_VER(i915) == 10)
2066 		trans_amount = 0;
2067 	else
2068 		trans_amount = 10; /* This is configurable amount */
2069 
2070 	trans_offset = trans_min + trans_amount;
2071 
2072 	/*
2073 	 * The spec asks for Selected Result Blocks for wm0 (the real value),
2074 	 * not Result Blocks (the integer value). Pay attention to the capital
2075 	 * letters. The value wm_l0->blocks is actually Result Blocks, but
2076 	 * since Result Blocks is the ceiling of Selected Result Blocks plus 1,
2077 	 * and since we later will have to get the ceiling of the sum in the
2078 	 * transition watermarks calculation, we can just pretend Selected
2079 	 * Result Blocks is Result Blocks minus 1 and it should work for the
2080 	 * current platforms.
2081 	 */
2082 	wm0_blocks = wm0->blocks - 1;
2083 
2084 	if (wp->y_tiled) {
2085 		trans_y_tile_min =
2086 			(u16)mul_round_up_u32_fixed16(2, wp->y_tile_minimum);
2087 		blocks = max(wm0_blocks, trans_y_tile_min) + trans_offset;
2088 	} else {
2089 		blocks = wm0_blocks + trans_offset;
2090 	}
2091 	blocks++;
2092 
2093 	/*
2094 	 * Just assume we can enable the transition watermark.  After
2095 	 * computing the DDB we'll come back and disable it if that
2096 	 * assumption turns out to be false.
2097 	 */
2098 	trans_wm->blocks = blocks;
2099 	trans_wm->min_ddb_alloc = max_t(u16, wm0->min_ddb_alloc, blocks + 1);
2100 	trans_wm->enable = true;
2101 }
2102 
skl_build_plane_wm_single(struct intel_crtc_state * crtc_state,const struct intel_plane_state * plane_state,struct intel_plane * plane,int color_plane)2103 static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state,
2104 				     const struct intel_plane_state *plane_state,
2105 				     struct intel_plane *plane, int color_plane)
2106 {
2107 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2108 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
2109 	struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane->id];
2110 	struct skl_wm_params wm_params;
2111 	int ret;
2112 
2113 	ret = skl_compute_plane_wm_params(crtc_state, plane_state,
2114 					  &wm_params, color_plane);
2115 	if (ret)
2116 		return ret;
2117 
2118 	skl_compute_wm_levels(crtc_state, plane, &wm_params, wm->wm);
2119 
2120 	skl_compute_transition_wm(i915, &wm->trans_wm,
2121 				  &wm->wm[0], &wm_params);
2122 
2123 	if (DISPLAY_VER(i915) >= 12) {
2124 		tgl_compute_sagv_wm(crtc_state, plane, &wm_params, wm);
2125 
2126 		skl_compute_transition_wm(i915, &wm->sagv.trans_wm,
2127 					  &wm->sagv.wm0, &wm_params);
2128 	}
2129 
2130 	return 0;
2131 }
2132 
skl_build_plane_wm_uv(struct intel_crtc_state * crtc_state,const struct intel_plane_state * plane_state,struct intel_plane * plane)2133 static int skl_build_plane_wm_uv(struct intel_crtc_state *crtc_state,
2134 				 const struct intel_plane_state *plane_state,
2135 				 struct intel_plane *plane)
2136 {
2137 	struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane->id];
2138 	struct skl_wm_params wm_params;
2139 	int ret;
2140 
2141 	wm->is_planar = true;
2142 
2143 	/* uv plane watermarks must also be validated for NV12/Planar */
2144 	ret = skl_compute_plane_wm_params(crtc_state, plane_state,
2145 					  &wm_params, 1);
2146 	if (ret)
2147 		return ret;
2148 
2149 	skl_compute_wm_levels(crtc_state, plane, &wm_params, wm->uv_wm);
2150 
2151 	return 0;
2152 }
2153 
skl_build_plane_wm(struct intel_crtc_state * crtc_state,const struct intel_plane_state * plane_state)2154 static int skl_build_plane_wm(struct intel_crtc_state *crtc_state,
2155 			      const struct intel_plane_state *plane_state)
2156 {
2157 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
2158 	enum plane_id plane_id = plane->id;
2159 	struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane_id];
2160 	const struct drm_framebuffer *fb = plane_state->hw.fb;
2161 	int ret;
2162 
2163 	memset(wm, 0, sizeof(*wm));
2164 
2165 	if (!intel_wm_plane_visible(crtc_state, plane_state))
2166 		return 0;
2167 
2168 	ret = skl_build_plane_wm_single(crtc_state, plane_state,
2169 					plane, 0);
2170 	if (ret)
2171 		return ret;
2172 
2173 	if (fb->format->is_yuv && fb->format->num_planes > 1) {
2174 		ret = skl_build_plane_wm_uv(crtc_state, plane_state,
2175 					    plane);
2176 		if (ret)
2177 			return ret;
2178 	}
2179 
2180 	return 0;
2181 }
2182 
icl_build_plane_wm(struct intel_crtc_state * crtc_state,const struct intel_plane_state * plane_state)2183 static int icl_build_plane_wm(struct intel_crtc_state *crtc_state,
2184 			      const struct intel_plane_state *plane_state)
2185 {
2186 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
2187 	struct drm_i915_private *i915 = to_i915(plane->base.dev);
2188 	enum plane_id plane_id = plane->id;
2189 	struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane_id];
2190 	int ret;
2191 
2192 	/* Watermarks calculated in master */
2193 	if (plane_state->planar_slave)
2194 		return 0;
2195 
2196 	memset(wm, 0, sizeof(*wm));
2197 
2198 	if (plane_state->planar_linked_plane) {
2199 		const struct drm_framebuffer *fb = plane_state->hw.fb;
2200 
2201 		drm_WARN_ON(&i915->drm,
2202 			    !intel_wm_plane_visible(crtc_state, plane_state));
2203 		drm_WARN_ON(&i915->drm, !fb->format->is_yuv ||
2204 			    fb->format->num_planes == 1);
2205 
2206 		ret = skl_build_plane_wm_single(crtc_state, plane_state,
2207 						plane_state->planar_linked_plane, 0);
2208 		if (ret)
2209 			return ret;
2210 
2211 		ret = skl_build_plane_wm_single(crtc_state, plane_state,
2212 						plane, 1);
2213 		if (ret)
2214 			return ret;
2215 	} else if (intel_wm_plane_visible(crtc_state, plane_state)) {
2216 		ret = skl_build_plane_wm_single(crtc_state, plane_state,
2217 						plane, 0);
2218 		if (ret)
2219 			return ret;
2220 	}
2221 
2222 	return 0;
2223 }
2224 
2225 static bool
skl_is_vblank_too_short(const struct intel_crtc_state * crtc_state,int wm0_lines,int latency)2226 skl_is_vblank_too_short(const struct intel_crtc_state *crtc_state,
2227 			int wm0_lines, int latency)
2228 {
2229 	const struct drm_display_mode *adjusted_mode =
2230 		&crtc_state->hw.adjusted_mode;
2231 
2232 	/* FIXME missing scaler and DSC pre-fill time */
2233 	return crtc_state->framestart_delay +
2234 		intel_usecs_to_scanlines(adjusted_mode, latency) +
2235 		wm0_lines >
2236 		adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vblank_start;
2237 }
2238 
skl_max_wm0_lines(const struct intel_crtc_state * crtc_state)2239 static int skl_max_wm0_lines(const struct intel_crtc_state *crtc_state)
2240 {
2241 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2242 	enum plane_id plane_id;
2243 	int wm0_lines = 0;
2244 
2245 	for_each_plane_id_on_crtc(crtc, plane_id) {
2246 		const struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id];
2247 
2248 		/* FIXME what about !skl_wm_has_lines() platforms? */
2249 		wm0_lines = max_t(int, wm0_lines, wm->wm[0].lines);
2250 	}
2251 
2252 	return wm0_lines;
2253 }
2254 
skl_max_wm_level_for_vblank(struct intel_crtc_state * crtc_state,int wm0_lines)2255 static int skl_max_wm_level_for_vblank(struct intel_crtc_state *crtc_state,
2256 				       int wm0_lines)
2257 {
2258 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2259 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
2260 	int level;
2261 
2262 	for (level = i915->display.wm.num_levels - 1; level >= 0; level--) {
2263 		int latency;
2264 
2265 		/* FIXME should we care about the latency w/a's? */
2266 		latency = skl_wm_latency(i915, level, NULL);
2267 		if (latency == 0)
2268 			continue;
2269 
2270 		/* FIXME is it correct to use 0 latency for wm0 here? */
2271 		if (level == 0)
2272 			latency = 0;
2273 
2274 		if (!skl_is_vblank_too_short(crtc_state, wm0_lines, latency))
2275 			return level;
2276 	}
2277 
2278 	return -EINVAL;
2279 }
2280 
skl_wm_check_vblank(struct intel_crtc_state * crtc_state)2281 static int skl_wm_check_vblank(struct intel_crtc_state *crtc_state)
2282 {
2283 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2284 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
2285 	int wm0_lines, level;
2286 
2287 	if (!crtc_state->hw.active)
2288 		return 0;
2289 
2290 	wm0_lines = skl_max_wm0_lines(crtc_state);
2291 
2292 	level = skl_max_wm_level_for_vblank(crtc_state, wm0_lines);
2293 	if (level < 0)
2294 		return level;
2295 
2296 	/*
2297 	 * PSR needs to toggle LATENCY_REPORTING_REMOVED_PIPE_*
2298 	 * based on whether we're limited by the vblank duration.
2299 	 */
2300 	crtc_state->wm_level_disabled = level < i915->display.wm.num_levels - 1;
2301 
2302 	for (level++; level < i915->display.wm.num_levels; level++) {
2303 		enum plane_id plane_id;
2304 
2305 		for_each_plane_id_on_crtc(crtc, plane_id) {
2306 			struct skl_plane_wm *wm =
2307 				&crtc_state->wm.skl.optimal.planes[plane_id];
2308 
2309 			/*
2310 			 * FIXME just clear enable or flag the entire
2311 			 * thing as bad via min_ddb_alloc=U16_MAX?
2312 			 */
2313 			wm->wm[level].enable = false;
2314 			wm->uv_wm[level].enable = false;
2315 		}
2316 	}
2317 
2318 	if (DISPLAY_VER(i915) >= 12 &&
2319 	    i915->display.sagv.block_time_us &&
2320 	    skl_is_vblank_too_short(crtc_state, wm0_lines,
2321 				    i915->display.sagv.block_time_us)) {
2322 		enum plane_id plane_id;
2323 
2324 		for_each_plane_id_on_crtc(crtc, plane_id) {
2325 			struct skl_plane_wm *wm =
2326 				&crtc_state->wm.skl.optimal.planes[plane_id];
2327 
2328 			wm->sagv.wm0.enable = false;
2329 			wm->sagv.trans_wm.enable = false;
2330 		}
2331 	}
2332 
2333 	return 0;
2334 }
2335 
skl_build_pipe_wm(struct intel_atomic_state * state,struct intel_crtc * crtc)2336 static int skl_build_pipe_wm(struct intel_atomic_state *state,
2337 			     struct intel_crtc *crtc)
2338 {
2339 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
2340 	struct intel_crtc_state *crtc_state =
2341 		intel_atomic_get_new_crtc_state(state, crtc);
2342 	const struct intel_plane_state *plane_state;
2343 	struct intel_plane *plane;
2344 	int ret, i;
2345 
2346 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
2347 		/*
2348 		 * FIXME should perhaps check {old,new}_plane_crtc->hw.crtc
2349 		 * instead but we don't populate that correctly for NV12 Y
2350 		 * planes so for now hack this.
2351 		 */
2352 		if (plane->pipe != crtc->pipe)
2353 			continue;
2354 
2355 		if (DISPLAY_VER(i915) >= 11)
2356 			ret = icl_build_plane_wm(crtc_state, plane_state);
2357 		else
2358 			ret = skl_build_plane_wm(crtc_state, plane_state);
2359 		if (ret)
2360 			return ret;
2361 	}
2362 
2363 	crtc_state->wm.skl.optimal = crtc_state->wm.skl.raw;
2364 
2365 	return skl_wm_check_vblank(crtc_state);
2366 }
2367 
skl_wm_level_equals(const struct skl_wm_level * l1,const struct skl_wm_level * l2)2368 static bool skl_wm_level_equals(const struct skl_wm_level *l1,
2369 				const struct skl_wm_level *l2)
2370 {
2371 	return l1->enable == l2->enable &&
2372 		l1->ignore_lines == l2->ignore_lines &&
2373 		l1->lines == l2->lines &&
2374 		l1->blocks == l2->blocks;
2375 }
2376 
skl_plane_wm_equals(struct drm_i915_private * i915,const struct skl_plane_wm * wm1,const struct skl_plane_wm * wm2)2377 static bool skl_plane_wm_equals(struct drm_i915_private *i915,
2378 				const struct skl_plane_wm *wm1,
2379 				const struct skl_plane_wm *wm2)
2380 {
2381 	int level;
2382 
2383 	for (level = 0; level < i915->display.wm.num_levels; level++) {
2384 		/*
2385 		 * We don't check uv_wm as the hardware doesn't actually
2386 		 * use it. It only gets used for calculating the required
2387 		 * ddb allocation.
2388 		 */
2389 		if (!skl_wm_level_equals(&wm1->wm[level], &wm2->wm[level]))
2390 			return false;
2391 	}
2392 
2393 	return skl_wm_level_equals(&wm1->trans_wm, &wm2->trans_wm) &&
2394 		skl_wm_level_equals(&wm1->sagv.wm0, &wm2->sagv.wm0) &&
2395 		skl_wm_level_equals(&wm1->sagv.trans_wm, &wm2->sagv.trans_wm);
2396 }
2397 
skl_ddb_entries_overlap(const struct skl_ddb_entry * a,const struct skl_ddb_entry * b)2398 static bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
2399 				    const struct skl_ddb_entry *b)
2400 {
2401 	return a->start < b->end && b->start < a->end;
2402 }
2403 
skl_ddb_entry_union(struct skl_ddb_entry * a,const struct skl_ddb_entry * b)2404 static void skl_ddb_entry_union(struct skl_ddb_entry *a,
2405 				const struct skl_ddb_entry *b)
2406 {
2407 	if (a->end && b->end) {
2408 		a->start = min(a->start, b->start);
2409 		a->end = max(a->end, b->end);
2410 	} else if (b->end) {
2411 		a->start = b->start;
2412 		a->end = b->end;
2413 	}
2414 }
2415 
skl_ddb_allocation_overlaps(const struct skl_ddb_entry * ddb,const struct skl_ddb_entry * entries,int num_entries,int ignore_idx)2416 bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
2417 				 const struct skl_ddb_entry *entries,
2418 				 int num_entries, int ignore_idx)
2419 {
2420 	int i;
2421 
2422 	for (i = 0; i < num_entries; i++) {
2423 		if (i != ignore_idx &&
2424 		    skl_ddb_entries_overlap(ddb, &entries[i]))
2425 			return true;
2426 	}
2427 
2428 	return false;
2429 }
2430 
2431 static int
skl_ddb_add_affected_planes(struct intel_atomic_state * state,struct intel_crtc * crtc)2432 skl_ddb_add_affected_planes(struct intel_atomic_state *state,
2433 			    struct intel_crtc *crtc)
2434 {
2435 	struct drm_i915_private *i915 = to_i915(state->base.dev);
2436 	const struct intel_crtc_state *old_crtc_state =
2437 		intel_atomic_get_old_crtc_state(state, crtc);
2438 	struct intel_crtc_state *new_crtc_state =
2439 		intel_atomic_get_new_crtc_state(state, crtc);
2440 	struct intel_plane *plane;
2441 
2442 	for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
2443 		struct intel_plane_state *plane_state;
2444 		enum plane_id plane_id = plane->id;
2445 
2446 		if (skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb[plane_id],
2447 					&new_crtc_state->wm.skl.plane_ddb[plane_id]) &&
2448 		    skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb_y[plane_id],
2449 					&new_crtc_state->wm.skl.plane_ddb_y[plane_id]))
2450 			continue;
2451 
2452 		if (new_crtc_state->do_async_flip) {
2453 			drm_dbg_kms(&i915->drm, "[PLANE:%d:%s] Can't change DDB during async flip\n",
2454 				    plane->base.base.id, plane->base.name);
2455 			return -EINVAL;
2456 		}
2457 
2458 		plane_state = intel_atomic_get_plane_state(state, plane);
2459 		if (IS_ERR(plane_state))
2460 			return PTR_ERR(plane_state);
2461 
2462 		new_crtc_state->update_planes |= BIT(plane_id);
2463 		new_crtc_state->async_flip_planes = 0;
2464 		new_crtc_state->do_async_flip = false;
2465 	}
2466 
2467 	return 0;
2468 }
2469 
intel_dbuf_enabled_slices(const struct intel_dbuf_state * dbuf_state)2470 static u8 intel_dbuf_enabled_slices(const struct intel_dbuf_state *dbuf_state)
2471 {
2472 	struct drm_i915_private *i915 = to_i915(dbuf_state->base.state->base.dev);
2473 	u8 enabled_slices;
2474 	enum pipe pipe;
2475 
2476 	/*
2477 	 * FIXME: For now we always enable slice S1 as per
2478 	 * the Bspec display initialization sequence.
2479 	 */
2480 	enabled_slices = BIT(DBUF_S1);
2481 
2482 	for_each_pipe(i915, pipe)
2483 		enabled_slices |= dbuf_state->slices[pipe];
2484 
2485 	return enabled_slices;
2486 }
2487 
2488 static int
skl_compute_ddb(struct intel_atomic_state * state)2489 skl_compute_ddb(struct intel_atomic_state *state)
2490 {
2491 	struct drm_i915_private *i915 = to_i915(state->base.dev);
2492 	const struct intel_dbuf_state *old_dbuf_state;
2493 	struct intel_dbuf_state *new_dbuf_state = NULL;
2494 	struct intel_crtc_state *new_crtc_state;
2495 	struct intel_crtc *crtc;
2496 	int ret, i;
2497 
2498 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
2499 		new_dbuf_state = intel_atomic_get_dbuf_state(state);
2500 		if (IS_ERR(new_dbuf_state))
2501 			return PTR_ERR(new_dbuf_state);
2502 
2503 		old_dbuf_state = intel_atomic_get_old_dbuf_state(state);
2504 		break;
2505 	}
2506 
2507 	if (!new_dbuf_state)
2508 		return 0;
2509 
2510 	new_dbuf_state->active_pipes =
2511 		intel_calc_active_pipes(state, old_dbuf_state->active_pipes);
2512 
2513 	if (old_dbuf_state->active_pipes != new_dbuf_state->active_pipes) {
2514 		ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
2515 		if (ret)
2516 			return ret;
2517 	}
2518 
2519 	if (HAS_MBUS_JOINING(i915)) {
2520 		new_dbuf_state->joined_mbus =
2521 			adlp_check_mbus_joined(new_dbuf_state->active_pipes);
2522 
2523 		if (old_dbuf_state->joined_mbus != new_dbuf_state->joined_mbus) {
2524 			ret = intel_cdclk_state_set_joined_mbus(state, new_dbuf_state->joined_mbus);
2525 			if (ret)
2526 				return ret;
2527 		}
2528 	}
2529 
2530 	for_each_intel_crtc(&i915->drm, crtc) {
2531 		enum pipe pipe = crtc->pipe;
2532 
2533 		new_dbuf_state->slices[pipe] =
2534 			skl_compute_dbuf_slices(crtc, new_dbuf_state->active_pipes,
2535 						new_dbuf_state->joined_mbus);
2536 
2537 		if (old_dbuf_state->slices[pipe] == new_dbuf_state->slices[pipe])
2538 			continue;
2539 
2540 		ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
2541 		if (ret)
2542 			return ret;
2543 	}
2544 
2545 	new_dbuf_state->enabled_slices = intel_dbuf_enabled_slices(new_dbuf_state);
2546 
2547 	if (old_dbuf_state->enabled_slices != new_dbuf_state->enabled_slices ||
2548 	    old_dbuf_state->joined_mbus != new_dbuf_state->joined_mbus) {
2549 		ret = intel_atomic_serialize_global_state(&new_dbuf_state->base);
2550 		if (ret)
2551 			return ret;
2552 
2553 		drm_dbg_kms(&i915->drm,
2554 			    "Enabled dbuf slices 0x%x -> 0x%x (total dbuf slices 0x%x), mbus joined? %s->%s\n",
2555 			    old_dbuf_state->enabled_slices,
2556 			    new_dbuf_state->enabled_slices,
2557 			    DISPLAY_INFO(i915)->dbuf.slice_mask,
2558 			    str_yes_no(old_dbuf_state->joined_mbus),
2559 			    str_yes_no(new_dbuf_state->joined_mbus));
2560 	}
2561 
2562 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
2563 		enum pipe pipe = crtc->pipe;
2564 
2565 		new_dbuf_state->weight[pipe] = intel_crtc_ddb_weight(new_crtc_state);
2566 
2567 		if (old_dbuf_state->weight[pipe] == new_dbuf_state->weight[pipe])
2568 			continue;
2569 
2570 		ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
2571 		if (ret)
2572 			return ret;
2573 	}
2574 
2575 	for_each_intel_crtc(&i915->drm, crtc) {
2576 		ret = skl_crtc_allocate_ddb(state, crtc);
2577 		if (ret)
2578 			return ret;
2579 	}
2580 
2581 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
2582 		ret = skl_crtc_allocate_plane_ddb(state, crtc);
2583 		if (ret)
2584 			return ret;
2585 
2586 		ret = skl_ddb_add_affected_planes(state, crtc);
2587 		if (ret)
2588 			return ret;
2589 	}
2590 
2591 	return 0;
2592 }
2593 
enast(bool enable)2594 static char enast(bool enable)
2595 {
2596 	return enable ? '*' : ' ';
2597 }
2598 
2599 static void
skl_print_wm_changes(struct intel_atomic_state * state)2600 skl_print_wm_changes(struct intel_atomic_state *state)
2601 {
2602 	struct drm_i915_private *i915 = to_i915(state->base.dev);
2603 	const struct intel_crtc_state *old_crtc_state;
2604 	const struct intel_crtc_state *new_crtc_state;
2605 	struct intel_plane *plane;
2606 	struct intel_crtc *crtc;
2607 	int i;
2608 
2609 	if (!drm_debug_enabled(DRM_UT_KMS))
2610 		return;
2611 
2612 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
2613 					    new_crtc_state, i) {
2614 		const struct skl_pipe_wm *old_pipe_wm, *new_pipe_wm;
2615 
2616 		old_pipe_wm = &old_crtc_state->wm.skl.optimal;
2617 		new_pipe_wm = &new_crtc_state->wm.skl.optimal;
2618 
2619 		for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
2620 			enum plane_id plane_id = plane->id;
2621 			const struct skl_ddb_entry *old, *new;
2622 
2623 			old = &old_crtc_state->wm.skl.plane_ddb[plane_id];
2624 			new = &new_crtc_state->wm.skl.plane_ddb[plane_id];
2625 
2626 			if (skl_ddb_entry_equal(old, new))
2627 				continue;
2628 
2629 			drm_dbg_kms(&i915->drm,
2630 				    "[PLANE:%d:%s] ddb (%4d - %4d) -> (%4d - %4d), size %4d -> %4d\n",
2631 				    plane->base.base.id, plane->base.name,
2632 				    old->start, old->end, new->start, new->end,
2633 				    skl_ddb_entry_size(old), skl_ddb_entry_size(new));
2634 		}
2635 
2636 		for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
2637 			enum plane_id plane_id = plane->id;
2638 			const struct skl_plane_wm *old_wm, *new_wm;
2639 
2640 			old_wm = &old_pipe_wm->planes[plane_id];
2641 			new_wm = &new_pipe_wm->planes[plane_id];
2642 
2643 			if (skl_plane_wm_equals(i915, old_wm, new_wm))
2644 				continue;
2645 
2646 			drm_dbg_kms(&i915->drm,
2647 				    "[PLANE:%d:%s]   level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm"
2648 				    " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm\n",
2649 				    plane->base.base.id, plane->base.name,
2650 				    enast(old_wm->wm[0].enable), enast(old_wm->wm[1].enable),
2651 				    enast(old_wm->wm[2].enable), enast(old_wm->wm[3].enable),
2652 				    enast(old_wm->wm[4].enable), enast(old_wm->wm[5].enable),
2653 				    enast(old_wm->wm[6].enable), enast(old_wm->wm[7].enable),
2654 				    enast(old_wm->trans_wm.enable),
2655 				    enast(old_wm->sagv.wm0.enable),
2656 				    enast(old_wm->sagv.trans_wm.enable),
2657 				    enast(new_wm->wm[0].enable), enast(new_wm->wm[1].enable),
2658 				    enast(new_wm->wm[2].enable), enast(new_wm->wm[3].enable),
2659 				    enast(new_wm->wm[4].enable), enast(new_wm->wm[5].enable),
2660 				    enast(new_wm->wm[6].enable), enast(new_wm->wm[7].enable),
2661 				    enast(new_wm->trans_wm.enable),
2662 				    enast(new_wm->sagv.wm0.enable),
2663 				    enast(new_wm->sagv.trans_wm.enable));
2664 
2665 			drm_dbg_kms(&i915->drm,
2666 				    "[PLANE:%d:%s]   lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d"
2667 				      " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d\n",
2668 				    plane->base.base.id, plane->base.name,
2669 				    enast(old_wm->wm[0].ignore_lines), old_wm->wm[0].lines,
2670 				    enast(old_wm->wm[1].ignore_lines), old_wm->wm[1].lines,
2671 				    enast(old_wm->wm[2].ignore_lines), old_wm->wm[2].lines,
2672 				    enast(old_wm->wm[3].ignore_lines), old_wm->wm[3].lines,
2673 				    enast(old_wm->wm[4].ignore_lines), old_wm->wm[4].lines,
2674 				    enast(old_wm->wm[5].ignore_lines), old_wm->wm[5].lines,
2675 				    enast(old_wm->wm[6].ignore_lines), old_wm->wm[6].lines,
2676 				    enast(old_wm->wm[7].ignore_lines), old_wm->wm[7].lines,
2677 				    enast(old_wm->trans_wm.ignore_lines), old_wm->trans_wm.lines,
2678 				    enast(old_wm->sagv.wm0.ignore_lines), old_wm->sagv.wm0.lines,
2679 				    enast(old_wm->sagv.trans_wm.ignore_lines), old_wm->sagv.trans_wm.lines,
2680 				    enast(new_wm->wm[0].ignore_lines), new_wm->wm[0].lines,
2681 				    enast(new_wm->wm[1].ignore_lines), new_wm->wm[1].lines,
2682 				    enast(new_wm->wm[2].ignore_lines), new_wm->wm[2].lines,
2683 				    enast(new_wm->wm[3].ignore_lines), new_wm->wm[3].lines,
2684 				    enast(new_wm->wm[4].ignore_lines), new_wm->wm[4].lines,
2685 				    enast(new_wm->wm[5].ignore_lines), new_wm->wm[5].lines,
2686 				    enast(new_wm->wm[6].ignore_lines), new_wm->wm[6].lines,
2687 				    enast(new_wm->wm[7].ignore_lines), new_wm->wm[7].lines,
2688 				    enast(new_wm->trans_wm.ignore_lines), new_wm->trans_wm.lines,
2689 				    enast(new_wm->sagv.wm0.ignore_lines), new_wm->sagv.wm0.lines,
2690 				    enast(new_wm->sagv.trans_wm.ignore_lines), new_wm->sagv.trans_wm.lines);
2691 
2692 			drm_dbg_kms(&i915->drm,
2693 				    "[PLANE:%d:%s]  blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d"
2694 				    " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n",
2695 				    plane->base.base.id, plane->base.name,
2696 				    old_wm->wm[0].blocks, old_wm->wm[1].blocks,
2697 				    old_wm->wm[2].blocks, old_wm->wm[3].blocks,
2698 				    old_wm->wm[4].blocks, old_wm->wm[5].blocks,
2699 				    old_wm->wm[6].blocks, old_wm->wm[7].blocks,
2700 				    old_wm->trans_wm.blocks,
2701 				    old_wm->sagv.wm0.blocks,
2702 				    old_wm->sagv.trans_wm.blocks,
2703 				    new_wm->wm[0].blocks, new_wm->wm[1].blocks,
2704 				    new_wm->wm[2].blocks, new_wm->wm[3].blocks,
2705 				    new_wm->wm[4].blocks, new_wm->wm[5].blocks,
2706 				    new_wm->wm[6].blocks, new_wm->wm[7].blocks,
2707 				    new_wm->trans_wm.blocks,
2708 				    new_wm->sagv.wm0.blocks,
2709 				    new_wm->sagv.trans_wm.blocks);
2710 
2711 			drm_dbg_kms(&i915->drm,
2712 				    "[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d"
2713 				    " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n",
2714 				    plane->base.base.id, plane->base.name,
2715 				    old_wm->wm[0].min_ddb_alloc, old_wm->wm[1].min_ddb_alloc,
2716 				    old_wm->wm[2].min_ddb_alloc, old_wm->wm[3].min_ddb_alloc,
2717 				    old_wm->wm[4].min_ddb_alloc, old_wm->wm[5].min_ddb_alloc,
2718 				    old_wm->wm[6].min_ddb_alloc, old_wm->wm[7].min_ddb_alloc,
2719 				    old_wm->trans_wm.min_ddb_alloc,
2720 				    old_wm->sagv.wm0.min_ddb_alloc,
2721 				    old_wm->sagv.trans_wm.min_ddb_alloc,
2722 				    new_wm->wm[0].min_ddb_alloc, new_wm->wm[1].min_ddb_alloc,
2723 				    new_wm->wm[2].min_ddb_alloc, new_wm->wm[3].min_ddb_alloc,
2724 				    new_wm->wm[4].min_ddb_alloc, new_wm->wm[5].min_ddb_alloc,
2725 				    new_wm->wm[6].min_ddb_alloc, new_wm->wm[7].min_ddb_alloc,
2726 				    new_wm->trans_wm.min_ddb_alloc,
2727 				    new_wm->sagv.wm0.min_ddb_alloc,
2728 				    new_wm->sagv.trans_wm.min_ddb_alloc);
2729 		}
2730 	}
2731 }
2732 
skl_plane_selected_wm_equals(struct intel_plane * plane,const struct skl_pipe_wm * old_pipe_wm,const struct skl_pipe_wm * new_pipe_wm)2733 static bool skl_plane_selected_wm_equals(struct intel_plane *plane,
2734 					 const struct skl_pipe_wm *old_pipe_wm,
2735 					 const struct skl_pipe_wm *new_pipe_wm)
2736 {
2737 	struct drm_i915_private *i915 = to_i915(plane->base.dev);
2738 	int level;
2739 
2740 	for (level = 0; level < i915->display.wm.num_levels; level++) {
2741 		/*
2742 		 * We don't check uv_wm as the hardware doesn't actually
2743 		 * use it. It only gets used for calculating the required
2744 		 * ddb allocation.
2745 		 */
2746 		if (!skl_wm_level_equals(skl_plane_wm_level(old_pipe_wm, plane->id, level),
2747 					 skl_plane_wm_level(new_pipe_wm, plane->id, level)))
2748 			return false;
2749 	}
2750 
2751 	if (HAS_HW_SAGV_WM(i915)) {
2752 		const struct skl_plane_wm *old_wm = &old_pipe_wm->planes[plane->id];
2753 		const struct skl_plane_wm *new_wm = &new_pipe_wm->planes[plane->id];
2754 
2755 		if (!skl_wm_level_equals(&old_wm->sagv.wm0, &new_wm->sagv.wm0) ||
2756 		    !skl_wm_level_equals(&old_wm->sagv.trans_wm, &new_wm->sagv.trans_wm))
2757 			return false;
2758 	}
2759 
2760 	return skl_wm_level_equals(skl_plane_trans_wm(old_pipe_wm, plane->id),
2761 				   skl_plane_trans_wm(new_pipe_wm, plane->id));
2762 }
2763 
2764 /*
2765  * To make sure the cursor watermark registers are always consistent
2766  * with our computed state the following scenario needs special
2767  * treatment:
2768  *
2769  * 1. enable cursor
2770  * 2. move cursor entirely offscreen
2771  * 3. disable cursor
2772  *
2773  * Step 2. does call .disable_plane() but does not zero the watermarks
2774  * (since we consider an offscreen cursor still active for the purposes
2775  * of watermarks). Step 3. would not normally call .disable_plane()
2776  * because the actual plane visibility isn't changing, and we don't
2777  * deallocate the cursor ddb until the pipe gets disabled. So we must
2778  * force step 3. to call .disable_plane() to update the watermark
2779  * registers properly.
2780  *
2781  * Other planes do not suffer from this issues as their watermarks are
2782  * calculated based on the actual plane visibility. The only time this
2783  * can trigger for the other planes is during the initial readout as the
2784  * default value of the watermarks registers is not zero.
2785  */
skl_wm_add_affected_planes(struct intel_atomic_state * state,struct intel_crtc * crtc)2786 static int skl_wm_add_affected_planes(struct intel_atomic_state *state,
2787 				      struct intel_crtc *crtc)
2788 {
2789 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
2790 	const struct intel_crtc_state *old_crtc_state =
2791 		intel_atomic_get_old_crtc_state(state, crtc);
2792 	struct intel_crtc_state *new_crtc_state =
2793 		intel_atomic_get_new_crtc_state(state, crtc);
2794 	struct intel_plane *plane;
2795 
2796 	for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
2797 		struct intel_plane_state *plane_state;
2798 		enum plane_id plane_id = plane->id;
2799 
2800 		/*
2801 		 * Force a full wm update for every plane on modeset.
2802 		 * Required because the reset value of the wm registers
2803 		 * is non-zero, whereas we want all disabled planes to
2804 		 * have zero watermarks. So if we turn off the relevant
2805 		 * power well the hardware state will go out of sync
2806 		 * with the software state.
2807 		 */
2808 		if (!intel_crtc_needs_modeset(new_crtc_state) &&
2809 		    skl_plane_selected_wm_equals(plane,
2810 						 &old_crtc_state->wm.skl.optimal,
2811 						 &new_crtc_state->wm.skl.optimal))
2812 			continue;
2813 
2814 		if (new_crtc_state->do_async_flip) {
2815 			drm_dbg_kms(&i915->drm, "[PLANE:%d:%s] Can't change watermarks during async flip\n",
2816 				    plane->base.base.id, plane->base.name);
2817 			return -EINVAL;
2818 		}
2819 
2820 		plane_state = intel_atomic_get_plane_state(state, plane);
2821 		if (IS_ERR(plane_state))
2822 			return PTR_ERR(plane_state);
2823 
2824 		new_crtc_state->update_planes |= BIT(plane_id);
2825 		new_crtc_state->async_flip_planes = 0;
2826 		new_crtc_state->do_async_flip = false;
2827 	}
2828 
2829 	return 0;
2830 }
2831 
2832 /*
2833  * If Fixed Refresh Rate or For VRR case Vmin = Vmax = Flipline:
2834  * Program DEEP PKG_C_LATENCY Pkg C with highest valid latency from
2835  * watermark level1 and up and above. If watermark level 1 is
2836  * invalid program it with all 1's.
2837  * Program PKG_C_LATENCY Added Wake Time = DSB execution time
2838  * If Variable Refresh Rate where Vmin != Vmax != Flipline:
2839  * Program DEEP PKG_C_LATENCY Pkg C with all 1's.
2840  * Program PKG_C_LATENCY Added Wake Time = 0
2841  */
2842 static void
skl_program_dpkgc_latency(struct drm_i915_private * i915,bool enable_dpkgc)2843 skl_program_dpkgc_latency(struct drm_i915_private *i915, bool enable_dpkgc)
2844 {
2845 	u32 max_latency = 0;
2846 	u32 clear = 0, val = 0;
2847 	u32 added_wake_time = 0;
2848 
2849 	if (DISPLAY_VER(i915) < 20)
2850 		return;
2851 
2852 	if (enable_dpkgc) {
2853 		max_latency = skl_watermark_max_latency(i915, 1);
2854 		if (max_latency == 0)
2855 			max_latency = LNL_PKG_C_LATENCY_MASK;
2856 		added_wake_time = DSB_EXE_TIME +
2857 			i915->display.sagv.block_time_us;
2858 	} else {
2859 		max_latency = LNL_PKG_C_LATENCY_MASK;
2860 		added_wake_time = 0;
2861 	}
2862 
2863 	clear |= LNL_ADDED_WAKE_TIME_MASK | LNL_PKG_C_LATENCY_MASK;
2864 	val |= REG_FIELD_PREP(LNL_PKG_C_LATENCY_MASK, max_latency);
2865 	val |= REG_FIELD_PREP(LNL_ADDED_WAKE_TIME_MASK, added_wake_time);
2866 
2867 	intel_uncore_rmw(&i915->uncore, LNL_PKG_C_LATENCY, clear, val);
2868 }
2869 
2870 static int
skl_compute_wm(struct intel_atomic_state * state)2871 skl_compute_wm(struct intel_atomic_state *state)
2872 {
2873 	struct intel_crtc *crtc;
2874 	struct intel_crtc_state __maybe_unused *new_crtc_state;
2875 	int ret, i;
2876 	bool enable_dpkgc = false;
2877 
2878 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
2879 		ret = skl_build_pipe_wm(state, crtc);
2880 		if (ret)
2881 			return ret;
2882 	}
2883 
2884 	ret = skl_compute_ddb(state);
2885 	if (ret)
2886 		return ret;
2887 
2888 	ret = intel_compute_sagv_mask(state);
2889 	if (ret)
2890 		return ret;
2891 
2892 	/*
2893 	 * skl_compute_ddb() will have adjusted the final watermarks
2894 	 * based on how much ddb is available. Now we can actually
2895 	 * check if the final watermarks changed.
2896 	 */
2897 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
2898 		ret = skl_wm_add_affected_planes(state, crtc);
2899 		if (ret)
2900 			return ret;
2901 
2902 		if ((new_crtc_state->vrr.vmin == new_crtc_state->vrr.vmax &&
2903 		     new_crtc_state->vrr.vmin == new_crtc_state->vrr.flipline) ||
2904 		    !new_crtc_state->vrr.enable)
2905 			enable_dpkgc = true;
2906 	}
2907 
2908 	skl_program_dpkgc_latency(to_i915(state->base.dev), enable_dpkgc);
2909 
2910 	skl_print_wm_changes(state);
2911 
2912 	return 0;
2913 }
2914 
skl_wm_level_from_reg_val(u32 val,struct skl_wm_level * level)2915 static void skl_wm_level_from_reg_val(u32 val, struct skl_wm_level *level)
2916 {
2917 	level->enable = val & PLANE_WM_EN;
2918 	level->ignore_lines = val & PLANE_WM_IGNORE_LINES;
2919 	level->blocks = REG_FIELD_GET(PLANE_WM_BLOCKS_MASK, val);
2920 	level->lines = REG_FIELD_GET(PLANE_WM_LINES_MASK, val);
2921 }
2922 
skl_pipe_wm_get_hw_state(struct intel_crtc * crtc,struct skl_pipe_wm * out)2923 static void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
2924 				     struct skl_pipe_wm *out)
2925 {
2926 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
2927 	enum pipe pipe = crtc->pipe;
2928 	enum plane_id plane_id;
2929 	int level;
2930 	u32 val;
2931 
2932 	for_each_plane_id_on_crtc(crtc, plane_id) {
2933 		struct skl_plane_wm *wm = &out->planes[plane_id];
2934 
2935 		for (level = 0; level < i915->display.wm.num_levels; level++) {
2936 			if (plane_id != PLANE_CURSOR)
2937 				val = intel_de_read(i915, PLANE_WM(pipe, plane_id, level));
2938 			else
2939 				val = intel_de_read(i915, CUR_WM(pipe, level));
2940 
2941 			skl_wm_level_from_reg_val(val, &wm->wm[level]);
2942 		}
2943 
2944 		if (plane_id != PLANE_CURSOR)
2945 			val = intel_de_read(i915, PLANE_WM_TRANS(pipe, plane_id));
2946 		else
2947 			val = intel_de_read(i915, CUR_WM_TRANS(pipe));
2948 
2949 		skl_wm_level_from_reg_val(val, &wm->trans_wm);
2950 
2951 		if (HAS_HW_SAGV_WM(i915)) {
2952 			if (plane_id != PLANE_CURSOR)
2953 				val = intel_de_read(i915, PLANE_WM_SAGV(pipe, plane_id));
2954 			else
2955 				val = intel_de_read(i915, CUR_WM_SAGV(pipe));
2956 
2957 			skl_wm_level_from_reg_val(val, &wm->sagv.wm0);
2958 
2959 			if (plane_id != PLANE_CURSOR)
2960 				val = intel_de_read(i915, PLANE_WM_SAGV_TRANS(pipe, plane_id));
2961 			else
2962 				val = intel_de_read(i915, CUR_WM_SAGV_TRANS(pipe));
2963 
2964 			skl_wm_level_from_reg_val(val, &wm->sagv.trans_wm);
2965 		} else if (DISPLAY_VER(i915) >= 12) {
2966 			wm->sagv.wm0 = wm->wm[0];
2967 			wm->sagv.trans_wm = wm->trans_wm;
2968 		}
2969 	}
2970 }
2971 
skl_wm_get_hw_state(struct drm_i915_private * i915)2972 static void skl_wm_get_hw_state(struct drm_i915_private *i915)
2973 {
2974 	struct intel_dbuf_state *dbuf_state =
2975 		to_intel_dbuf_state(i915->display.dbuf.obj.state);
2976 	struct intel_crtc *crtc;
2977 
2978 	if (HAS_MBUS_JOINING(i915))
2979 		dbuf_state->joined_mbus = intel_de_read(i915, MBUS_CTL) & MBUS_JOIN;
2980 
2981 	dbuf_state->mdclk_cdclk_ratio = intel_mdclk_cdclk_ratio(i915, &i915->display.cdclk.hw);
2982 
2983 	for_each_intel_crtc(&i915->drm, crtc) {
2984 		struct intel_crtc_state *crtc_state =
2985 			to_intel_crtc_state(crtc->base.state);
2986 		enum pipe pipe = crtc->pipe;
2987 		unsigned int mbus_offset;
2988 		enum plane_id plane_id;
2989 		u8 slices;
2990 
2991 		memset(&crtc_state->wm.skl.optimal, 0,
2992 		       sizeof(crtc_state->wm.skl.optimal));
2993 		if (crtc_state->hw.active)
2994 			skl_pipe_wm_get_hw_state(crtc, &crtc_state->wm.skl.optimal);
2995 		crtc_state->wm.skl.raw = crtc_state->wm.skl.optimal;
2996 
2997 		memset(&dbuf_state->ddb[pipe], 0, sizeof(dbuf_state->ddb[pipe]));
2998 
2999 		for_each_plane_id_on_crtc(crtc, plane_id) {
3000 			struct skl_ddb_entry *ddb =
3001 				&crtc_state->wm.skl.plane_ddb[plane_id];
3002 			struct skl_ddb_entry *ddb_y =
3003 				&crtc_state->wm.skl.plane_ddb_y[plane_id];
3004 
3005 			if (!crtc_state->hw.active)
3006 				continue;
3007 
3008 			skl_ddb_get_hw_plane_state(i915, crtc->pipe,
3009 						   plane_id, ddb, ddb_y);
3010 
3011 			skl_ddb_entry_union(&dbuf_state->ddb[pipe], ddb);
3012 			skl_ddb_entry_union(&dbuf_state->ddb[pipe], ddb_y);
3013 		}
3014 
3015 		dbuf_state->weight[pipe] = intel_crtc_ddb_weight(crtc_state);
3016 
3017 		/*
3018 		 * Used for checking overlaps, so we need absolute
3019 		 * offsets instead of MBUS relative offsets.
3020 		 */
3021 		slices = skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes,
3022 						 dbuf_state->joined_mbus);
3023 		mbus_offset = mbus_ddb_offset(i915, slices);
3024 		crtc_state->wm.skl.ddb.start = mbus_offset + dbuf_state->ddb[pipe].start;
3025 		crtc_state->wm.skl.ddb.end = mbus_offset + dbuf_state->ddb[pipe].end;
3026 
3027 		/* The slices actually used by the planes on the pipe */
3028 		dbuf_state->slices[pipe] =
3029 			skl_ddb_dbuf_slice_mask(i915, &crtc_state->wm.skl.ddb);
3030 
3031 		drm_dbg_kms(&i915->drm,
3032 			    "[CRTC:%d:%s] dbuf slices 0x%x, ddb (%d - %d), active pipes 0x%x, mbus joined: %s\n",
3033 			    crtc->base.base.id, crtc->base.name,
3034 			    dbuf_state->slices[pipe], dbuf_state->ddb[pipe].start,
3035 			    dbuf_state->ddb[pipe].end, dbuf_state->active_pipes,
3036 			    str_yes_no(dbuf_state->joined_mbus));
3037 	}
3038 
3039 	dbuf_state->enabled_slices = i915->display.dbuf.enabled_slices;
3040 }
3041 
skl_dbuf_is_misconfigured(struct drm_i915_private * i915)3042 static bool skl_dbuf_is_misconfigured(struct drm_i915_private *i915)
3043 {
3044 	const struct intel_dbuf_state *dbuf_state =
3045 		to_intel_dbuf_state(i915->display.dbuf.obj.state);
3046 	struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
3047 	struct intel_crtc *crtc;
3048 
3049 	for_each_intel_crtc(&i915->drm, crtc) {
3050 		const struct intel_crtc_state *crtc_state =
3051 			to_intel_crtc_state(crtc->base.state);
3052 
3053 		entries[crtc->pipe] = crtc_state->wm.skl.ddb;
3054 	}
3055 
3056 	for_each_intel_crtc(&i915->drm, crtc) {
3057 		const struct intel_crtc_state *crtc_state =
3058 			to_intel_crtc_state(crtc->base.state);
3059 		u8 slices;
3060 
3061 		slices = skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes,
3062 						 dbuf_state->joined_mbus);
3063 		if (dbuf_state->slices[crtc->pipe] & ~slices)
3064 			return true;
3065 
3066 		if (skl_ddb_allocation_overlaps(&crtc_state->wm.skl.ddb, entries,
3067 						I915_MAX_PIPES, crtc->pipe))
3068 			return true;
3069 	}
3070 
3071 	return false;
3072 }
3073 
skl_wm_sanitize(struct drm_i915_private * i915)3074 static void skl_wm_sanitize(struct drm_i915_private *i915)
3075 {
3076 	struct intel_crtc *crtc;
3077 
3078 	/*
3079 	 * On TGL/RKL (at least) the BIOS likes to assign the planes
3080 	 * to the wrong DBUF slices. This will cause an infinite loop
3081 	 * in skl_commit_modeset_enables() as it can't find a way to
3082 	 * transition between the old bogus DBUF layout to the new
3083 	 * proper DBUF layout without DBUF allocation overlaps between
3084 	 * the planes (which cannot be allowed or else the hardware
3085 	 * may hang). If we detect a bogus DBUF layout just turn off
3086 	 * all the planes so that skl_commit_modeset_enables() can
3087 	 * simply ignore them.
3088 	 */
3089 	if (!skl_dbuf_is_misconfigured(i915))
3090 		return;
3091 
3092 	drm_dbg_kms(&i915->drm, "BIOS has misprogrammed the DBUF, disabling all planes\n");
3093 
3094 	for_each_intel_crtc(&i915->drm, crtc) {
3095 		struct intel_plane *plane = to_intel_plane(crtc->base.primary);
3096 		const struct intel_plane_state *plane_state =
3097 			to_intel_plane_state(plane->base.state);
3098 		struct intel_crtc_state *crtc_state =
3099 			to_intel_crtc_state(crtc->base.state);
3100 
3101 		if (plane_state->uapi.visible)
3102 			intel_plane_disable_noatomic(crtc, plane);
3103 
3104 		drm_WARN_ON(&i915->drm, crtc_state->active_planes != 0);
3105 
3106 		memset(&crtc_state->wm.skl.ddb, 0, sizeof(crtc_state->wm.skl.ddb));
3107 	}
3108 }
3109 
skl_wm_get_hw_state_and_sanitize(struct drm_i915_private * i915)3110 static void skl_wm_get_hw_state_and_sanitize(struct drm_i915_private *i915)
3111 {
3112 	skl_wm_get_hw_state(i915);
3113 	skl_wm_sanitize(i915);
3114 }
3115 
intel_wm_state_verify(struct intel_atomic_state * state,struct intel_crtc * crtc)3116 void intel_wm_state_verify(struct intel_atomic_state *state,
3117 			   struct intel_crtc *crtc)
3118 {
3119 	struct drm_i915_private *i915 = to_i915(state->base.dev);
3120 	const struct intel_crtc_state *new_crtc_state =
3121 		intel_atomic_get_new_crtc_state(state, crtc);
3122 	struct skl_hw_state {
3123 		struct skl_ddb_entry ddb[I915_MAX_PLANES];
3124 		struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
3125 		struct skl_pipe_wm wm;
3126 	} *hw;
3127 	const struct skl_pipe_wm *sw_wm = &new_crtc_state->wm.skl.optimal;
3128 	struct intel_plane *plane;
3129 	u8 hw_enabled_slices;
3130 	int level;
3131 
3132 	if (DISPLAY_VER(i915) < 9 || !new_crtc_state->hw.active)
3133 		return;
3134 
3135 	hw = kzalloc(sizeof(*hw), GFP_KERNEL);
3136 	if (!hw)
3137 		return;
3138 
3139 	skl_pipe_wm_get_hw_state(crtc, &hw->wm);
3140 
3141 	skl_pipe_ddb_get_hw_state(crtc, hw->ddb, hw->ddb_y);
3142 
3143 	hw_enabled_slices = intel_enabled_dbuf_slices_mask(i915);
3144 
3145 	if (DISPLAY_VER(i915) >= 11 &&
3146 	    hw_enabled_slices != i915->display.dbuf.enabled_slices)
3147 		drm_err(&i915->drm,
3148 			"mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n",
3149 			i915->display.dbuf.enabled_slices,
3150 			hw_enabled_slices);
3151 
3152 	for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
3153 		const struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
3154 		const struct skl_wm_level *hw_wm_level, *sw_wm_level;
3155 
3156 		/* Watermarks */
3157 		for (level = 0; level < i915->display.wm.num_levels; level++) {
3158 			hw_wm_level = &hw->wm.planes[plane->id].wm[level];
3159 			sw_wm_level = skl_plane_wm_level(sw_wm, plane->id, level);
3160 
3161 			if (skl_wm_level_equals(hw_wm_level, sw_wm_level))
3162 				continue;
3163 
3164 			drm_err(&i915->drm,
3165 				"[PLANE:%d:%s] mismatch in WM%d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
3166 				plane->base.base.id, plane->base.name, level,
3167 				sw_wm_level->enable,
3168 				sw_wm_level->blocks,
3169 				sw_wm_level->lines,
3170 				hw_wm_level->enable,
3171 				hw_wm_level->blocks,
3172 				hw_wm_level->lines);
3173 		}
3174 
3175 		hw_wm_level = &hw->wm.planes[plane->id].trans_wm;
3176 		sw_wm_level = skl_plane_trans_wm(sw_wm, plane->id);
3177 
3178 		if (!skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
3179 			drm_err(&i915->drm,
3180 				"[PLANE:%d:%s] mismatch in trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
3181 				plane->base.base.id, plane->base.name,
3182 				sw_wm_level->enable,
3183 				sw_wm_level->blocks,
3184 				sw_wm_level->lines,
3185 				hw_wm_level->enable,
3186 				hw_wm_level->blocks,
3187 				hw_wm_level->lines);
3188 		}
3189 
3190 		hw_wm_level = &hw->wm.planes[plane->id].sagv.wm0;
3191 		sw_wm_level = &sw_wm->planes[plane->id].sagv.wm0;
3192 
3193 		if (HAS_HW_SAGV_WM(i915) &&
3194 		    !skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
3195 			drm_err(&i915->drm,
3196 				"[PLANE:%d:%s] mismatch in SAGV WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
3197 				plane->base.base.id, plane->base.name,
3198 				sw_wm_level->enable,
3199 				sw_wm_level->blocks,
3200 				sw_wm_level->lines,
3201 				hw_wm_level->enable,
3202 				hw_wm_level->blocks,
3203 				hw_wm_level->lines);
3204 		}
3205 
3206 		hw_wm_level = &hw->wm.planes[plane->id].sagv.trans_wm;
3207 		sw_wm_level = &sw_wm->planes[plane->id].sagv.trans_wm;
3208 
3209 		if (HAS_HW_SAGV_WM(i915) &&
3210 		    !skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
3211 			drm_err(&i915->drm,
3212 				"[PLANE:%d:%s] mismatch in SAGV trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
3213 				plane->base.base.id, plane->base.name,
3214 				sw_wm_level->enable,
3215 				sw_wm_level->blocks,
3216 				sw_wm_level->lines,
3217 				hw_wm_level->enable,
3218 				hw_wm_level->blocks,
3219 				hw_wm_level->lines);
3220 		}
3221 
3222 		/* DDB */
3223 		hw_ddb_entry = &hw->ddb[PLANE_CURSOR];
3224 		sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb[PLANE_CURSOR];
3225 
3226 		if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
3227 			drm_err(&i915->drm,
3228 				"[PLANE:%d:%s] mismatch in DDB (expected (%u,%u), found (%u,%u))\n",
3229 				plane->base.base.id, plane->base.name,
3230 				sw_ddb_entry->start, sw_ddb_entry->end,
3231 				hw_ddb_entry->start, hw_ddb_entry->end);
3232 		}
3233 	}
3234 
3235 	kfree(hw);
3236 }
3237 
skl_watermark_ipc_enabled(struct drm_i915_private * i915)3238 bool skl_watermark_ipc_enabled(struct drm_i915_private *i915)
3239 {
3240 	return i915->display.wm.ipc_enabled;
3241 }
3242 
skl_watermark_ipc_update(struct drm_i915_private * i915)3243 void skl_watermark_ipc_update(struct drm_i915_private *i915)
3244 {
3245 	if (!HAS_IPC(i915))
3246 		return;
3247 
3248 	intel_de_rmw(i915, DISP_ARB_CTL2, DISP_IPC_ENABLE,
3249 		     skl_watermark_ipc_enabled(i915) ? DISP_IPC_ENABLE : 0);
3250 }
3251 
skl_watermark_ipc_can_enable(struct drm_i915_private * i915)3252 static bool skl_watermark_ipc_can_enable(struct drm_i915_private *i915)
3253 {
3254 	/* Display WA #0477 WaDisableIPC: skl */
3255 	if (IS_SKYLAKE(i915))
3256 		return false;
3257 
3258 	/* Display WA #1141: SKL:all KBL:all CFL */
3259 	if (IS_KABYLAKE(i915) ||
3260 	    IS_COFFEELAKE(i915) ||
3261 	    IS_COMETLAKE(i915))
3262 		return i915->dram_info.symmetric_memory;
3263 
3264 	return true;
3265 }
3266 
skl_watermark_ipc_init(struct drm_i915_private * i915)3267 void skl_watermark_ipc_init(struct drm_i915_private *i915)
3268 {
3269 	if (!HAS_IPC(i915))
3270 		return;
3271 
3272 	i915->display.wm.ipc_enabled = skl_watermark_ipc_can_enable(i915);
3273 
3274 	skl_watermark_ipc_update(i915);
3275 }
3276 
3277 static void
adjust_wm_latency(struct drm_i915_private * i915,u16 wm[],int num_levels,int read_latency)3278 adjust_wm_latency(struct drm_i915_private *i915,
3279 		  u16 wm[], int num_levels, int read_latency)
3280 {
3281 	bool wm_lv_0_adjust_needed = i915->dram_info.wm_lv_0_adjust_needed;
3282 	int i, level;
3283 
3284 	/*
3285 	 * If a level n (n > 1) has a 0us latency, all levels m (m >= n)
3286 	 * need to be disabled. We make sure to sanitize the values out
3287 	 * of the punit to satisfy this requirement.
3288 	 */
3289 	for (level = 1; level < num_levels; level++) {
3290 		if (wm[level] == 0) {
3291 			for (i = level + 1; i < num_levels; i++)
3292 				wm[i] = 0;
3293 
3294 			num_levels = level;
3295 			break;
3296 		}
3297 	}
3298 
3299 	/*
3300 	 * WaWmMemoryReadLatency
3301 	 *
3302 	 * punit doesn't take into account the read latency so we need
3303 	 * to add proper adjustement to each valid level we retrieve
3304 	 * from the punit when level 0 response data is 0us.
3305 	 */
3306 	if (wm[0] == 0) {
3307 		for (level = 0; level < num_levels; level++)
3308 			wm[level] += read_latency;
3309 	}
3310 
3311 	/*
3312 	 * WA Level-0 adjustment for 16GB DIMMs: SKL+
3313 	 * If we could not get dimm info enable this WA to prevent from
3314 	 * any underrun. If not able to get Dimm info assume 16GB dimm
3315 	 * to avoid any underrun.
3316 	 */
3317 	if (wm_lv_0_adjust_needed)
3318 		wm[0] += 1;
3319 }
3320 
mtl_read_wm_latency(struct drm_i915_private * i915,u16 wm[])3321 static void mtl_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
3322 {
3323 	int num_levels = i915->display.wm.num_levels;
3324 	u32 val;
3325 
3326 	val = intel_de_read(i915, MTL_LATENCY_LP0_LP1);
3327 	wm[0] = REG_FIELD_GET(MTL_LATENCY_LEVEL_EVEN_MASK, val);
3328 	wm[1] = REG_FIELD_GET(MTL_LATENCY_LEVEL_ODD_MASK, val);
3329 
3330 	val = intel_de_read(i915, MTL_LATENCY_LP2_LP3);
3331 	wm[2] = REG_FIELD_GET(MTL_LATENCY_LEVEL_EVEN_MASK, val);
3332 	wm[3] = REG_FIELD_GET(MTL_LATENCY_LEVEL_ODD_MASK, val);
3333 
3334 	val = intel_de_read(i915, MTL_LATENCY_LP4_LP5);
3335 	wm[4] = REG_FIELD_GET(MTL_LATENCY_LEVEL_EVEN_MASK, val);
3336 	wm[5] = REG_FIELD_GET(MTL_LATENCY_LEVEL_ODD_MASK, val);
3337 
3338 	adjust_wm_latency(i915, wm, num_levels, 6);
3339 }
3340 
skl_read_wm_latency(struct drm_i915_private * i915,u16 wm[])3341 static void skl_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
3342 {
3343 	int num_levels = i915->display.wm.num_levels;
3344 	int read_latency = DISPLAY_VER(i915) >= 12 ? 3 : 2;
3345 	int mult = IS_DG2(i915) ? 2 : 1;
3346 	u32 val;
3347 	int ret;
3348 
3349 	/* read the first set of memory latencies[0:3] */
3350 	val = 0; /* data0 to be programmed to 0 for first set */
3351 	ret = snb_pcode_read(&i915->uncore, GEN9_PCODE_READ_MEM_LATENCY, &val, NULL);
3352 	if (ret) {
3353 		drm_err(&i915->drm, "SKL Mailbox read error = %d\n", ret);
3354 		return;
3355 	}
3356 
3357 	wm[0] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_0_4_MASK, val) * mult;
3358 	wm[1] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_1_5_MASK, val) * mult;
3359 	wm[2] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_2_6_MASK, val) * mult;
3360 	wm[3] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_3_7_MASK, val) * mult;
3361 
3362 	/* read the second set of memory latencies[4:7] */
3363 	val = 1; /* data0 to be programmed to 1 for second set */
3364 	ret = snb_pcode_read(&i915->uncore, GEN9_PCODE_READ_MEM_LATENCY, &val, NULL);
3365 	if (ret) {
3366 		drm_err(&i915->drm, "SKL Mailbox read error = %d\n", ret);
3367 		return;
3368 	}
3369 
3370 	wm[4] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_0_4_MASK, val) * mult;
3371 	wm[5] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_1_5_MASK, val) * mult;
3372 	wm[6] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_2_6_MASK, val) * mult;
3373 	wm[7] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_3_7_MASK, val) * mult;
3374 
3375 	adjust_wm_latency(i915, wm, num_levels, read_latency);
3376 }
3377 
skl_setup_wm_latency(struct drm_i915_private * i915)3378 static void skl_setup_wm_latency(struct drm_i915_private *i915)
3379 {
3380 	if (HAS_HW_SAGV_WM(i915))
3381 		i915->display.wm.num_levels = 6;
3382 	else
3383 		i915->display.wm.num_levels = 8;
3384 
3385 	if (DISPLAY_VER(i915) >= 14)
3386 		mtl_read_wm_latency(i915, i915->display.wm.skl_latency);
3387 	else
3388 		skl_read_wm_latency(i915, i915->display.wm.skl_latency);
3389 
3390 	intel_print_wm_latency(i915, "Gen9 Plane", i915->display.wm.skl_latency);
3391 }
3392 
3393 static const struct intel_wm_funcs skl_wm_funcs = {
3394 	.compute_global_watermarks = skl_compute_wm,
3395 	.get_hw_state = skl_wm_get_hw_state_and_sanitize,
3396 };
3397 
skl_wm_init(struct drm_i915_private * i915)3398 void skl_wm_init(struct drm_i915_private *i915)
3399 {
3400 	intel_sagv_init(i915);
3401 
3402 	skl_setup_wm_latency(i915);
3403 
3404 	i915->display.funcs.wm = &skl_wm_funcs;
3405 }
3406 
intel_dbuf_duplicate_state(struct intel_global_obj * obj)3407 static struct intel_global_state *intel_dbuf_duplicate_state(struct intel_global_obj *obj)
3408 {
3409 	struct intel_dbuf_state *dbuf_state;
3410 
3411 	dbuf_state = kmemdup(obj->state, sizeof(*dbuf_state), GFP_KERNEL);
3412 	if (!dbuf_state)
3413 		return NULL;
3414 
3415 	return &dbuf_state->base;
3416 }
3417 
intel_dbuf_destroy_state(struct intel_global_obj * obj,struct intel_global_state * state)3418 static void intel_dbuf_destroy_state(struct intel_global_obj *obj,
3419 				     struct intel_global_state *state)
3420 {
3421 	kfree(state);
3422 }
3423 
3424 static const struct intel_global_state_funcs intel_dbuf_funcs = {
3425 	.atomic_duplicate_state = intel_dbuf_duplicate_state,
3426 	.atomic_destroy_state = intel_dbuf_destroy_state,
3427 };
3428 
3429 struct intel_dbuf_state *
intel_atomic_get_dbuf_state(struct intel_atomic_state * state)3430 intel_atomic_get_dbuf_state(struct intel_atomic_state *state)
3431 {
3432 	struct drm_i915_private *i915 = to_i915(state->base.dev);
3433 	struct intel_global_state *dbuf_state;
3434 
3435 	dbuf_state = intel_atomic_get_global_obj_state(state, &i915->display.dbuf.obj);
3436 	if (IS_ERR(dbuf_state))
3437 		return ERR_CAST(dbuf_state);
3438 
3439 	return to_intel_dbuf_state(dbuf_state);
3440 }
3441 
intel_dbuf_init(struct drm_i915_private * i915)3442 int intel_dbuf_init(struct drm_i915_private *i915)
3443 {
3444 	struct intel_dbuf_state *dbuf_state;
3445 
3446 	dbuf_state = kzalloc(sizeof(*dbuf_state), GFP_KERNEL);
3447 	if (!dbuf_state)
3448 		return -ENOMEM;
3449 
3450 	intel_atomic_global_obj_init(i915, &i915->display.dbuf.obj,
3451 				     &dbuf_state->base, &intel_dbuf_funcs);
3452 
3453 	return 0;
3454 }
3455 
xelpdp_is_only_pipe_per_dbuf_bank(enum pipe pipe,u8 active_pipes)3456 static bool xelpdp_is_only_pipe_per_dbuf_bank(enum pipe pipe, u8 active_pipes)
3457 {
3458 	switch (pipe) {
3459 	case PIPE_A:
3460 		return !(active_pipes & BIT(PIPE_D));
3461 	case PIPE_D:
3462 		return !(active_pipes & BIT(PIPE_A));
3463 	case PIPE_B:
3464 		return !(active_pipes & BIT(PIPE_C));
3465 	case PIPE_C:
3466 		return !(active_pipes & BIT(PIPE_B));
3467 	default: /* to suppress compiler warning */
3468 		MISSING_CASE(pipe);
3469 		break;
3470 	}
3471 
3472 	return false;
3473 }
3474 
intel_mbus_dbox_update(struct intel_atomic_state * state)3475 static void intel_mbus_dbox_update(struct intel_atomic_state *state)
3476 {
3477 	struct drm_i915_private *i915 = to_i915(state->base.dev);
3478 	const struct intel_dbuf_state *new_dbuf_state, *old_dbuf_state;
3479 	const struct intel_crtc *crtc;
3480 	u32 val = 0;
3481 
3482 	if (DISPLAY_VER(i915) < 11)
3483 		return;
3484 
3485 	new_dbuf_state = intel_atomic_get_new_dbuf_state(state);
3486 	old_dbuf_state = intel_atomic_get_old_dbuf_state(state);
3487 	if (!new_dbuf_state ||
3488 	    (new_dbuf_state->joined_mbus == old_dbuf_state->joined_mbus &&
3489 	     new_dbuf_state->active_pipes == old_dbuf_state->active_pipes))
3490 		return;
3491 
3492 	if (DISPLAY_VER(i915) >= 14)
3493 		val |= MBUS_DBOX_I_CREDIT(2);
3494 
3495 	if (DISPLAY_VER(i915) >= 12) {
3496 		val |= MBUS_DBOX_B2B_TRANSACTIONS_MAX(16);
3497 		val |= MBUS_DBOX_B2B_TRANSACTIONS_DELAY(1);
3498 		val |= MBUS_DBOX_REGULATE_B2B_TRANSACTIONS_EN;
3499 	}
3500 
3501 	if (DISPLAY_VER(i915) >= 14)
3502 		val |= new_dbuf_state->joined_mbus ? MBUS_DBOX_A_CREDIT(12) :
3503 						     MBUS_DBOX_A_CREDIT(8);
3504 	else if (IS_ALDERLAKE_P(i915))
3505 		/* Wa_22010947358:adl-p */
3506 		val |= new_dbuf_state->joined_mbus ? MBUS_DBOX_A_CREDIT(6) :
3507 						     MBUS_DBOX_A_CREDIT(4);
3508 	else
3509 		val |= MBUS_DBOX_A_CREDIT(2);
3510 
3511 	if (DISPLAY_VER(i915) >= 14) {
3512 		val |= MBUS_DBOX_B_CREDIT(0xA);
3513 	} else if (IS_ALDERLAKE_P(i915)) {
3514 		val |= MBUS_DBOX_BW_CREDIT(2);
3515 		val |= MBUS_DBOX_B_CREDIT(8);
3516 	} else if (DISPLAY_VER(i915) >= 12) {
3517 		val |= MBUS_DBOX_BW_CREDIT(2);
3518 		val |= MBUS_DBOX_B_CREDIT(12);
3519 	} else {
3520 		val |= MBUS_DBOX_BW_CREDIT(1);
3521 		val |= MBUS_DBOX_B_CREDIT(8);
3522 	}
3523 
3524 	for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, new_dbuf_state->active_pipes) {
3525 		u32 pipe_val = val;
3526 
3527 		if (DISPLAY_VER_FULL(i915) == IP_VER(14, 0)) {
3528 			if (xelpdp_is_only_pipe_per_dbuf_bank(crtc->pipe,
3529 							      new_dbuf_state->active_pipes))
3530 				pipe_val |= MBUS_DBOX_BW_8CREDITS_MTL;
3531 			else
3532 				pipe_val |= MBUS_DBOX_BW_4CREDITS_MTL;
3533 		}
3534 
3535 		intel_de_write(i915, PIPE_MBUS_DBOX_CTL(crtc->pipe), pipe_val);
3536 	}
3537 }
3538 
intel_dbuf_state_set_mdclk_cdclk_ratio(struct intel_atomic_state * state,int ratio)3539 int intel_dbuf_state_set_mdclk_cdclk_ratio(struct intel_atomic_state *state,
3540 					   int ratio)
3541 {
3542 	struct intel_dbuf_state *dbuf_state;
3543 
3544 	dbuf_state = intel_atomic_get_dbuf_state(state);
3545 	if (IS_ERR(dbuf_state))
3546 		return PTR_ERR(dbuf_state);
3547 
3548 	dbuf_state->mdclk_cdclk_ratio = ratio;
3549 
3550 	return intel_atomic_lock_global_state(&dbuf_state->base);
3551 }
3552 
intel_dbuf_mdclk_cdclk_ratio_update(struct drm_i915_private * i915,int ratio,bool joined_mbus)3553 void intel_dbuf_mdclk_cdclk_ratio_update(struct drm_i915_private *i915,
3554 					 int ratio, bool joined_mbus)
3555 {
3556 	enum dbuf_slice slice;
3557 
3558 	if (!HAS_MBUS_JOINING(i915))
3559 		return;
3560 
3561 	if (DISPLAY_VER(i915) >= 20)
3562 		intel_de_rmw(i915, MBUS_CTL, MBUS_TRANSLATION_THROTTLE_MIN_MASK,
3563 			     MBUS_TRANSLATION_THROTTLE_MIN(ratio - 1));
3564 
3565 	if (joined_mbus)
3566 		ratio *= 2;
3567 
3568 	drm_dbg_kms(&i915->drm, "Updating dbuf ratio to %d (mbus joined: %s)\n",
3569 		    ratio, str_yes_no(joined_mbus));
3570 
3571 	for_each_dbuf_slice(i915, slice)
3572 		intel_de_rmw(i915, DBUF_CTL_S(slice),
3573 			     DBUF_MIN_TRACKER_STATE_SERVICE_MASK,
3574 			     DBUF_MIN_TRACKER_STATE_SERVICE(ratio - 1));
3575 }
3576 
intel_dbuf_mdclk_min_tracker_update(struct intel_atomic_state * state)3577 static void intel_dbuf_mdclk_min_tracker_update(struct intel_atomic_state *state)
3578 {
3579 	struct drm_i915_private *i915 = to_i915(state->base.dev);
3580 	const struct intel_dbuf_state *old_dbuf_state =
3581 		intel_atomic_get_old_dbuf_state(state);
3582 	const struct intel_dbuf_state *new_dbuf_state =
3583 		intel_atomic_get_new_dbuf_state(state);
3584 	int mdclk_cdclk_ratio;
3585 
3586 	if (intel_cdclk_is_decreasing_later(state)) {
3587 		/* cdclk/mdclk will be changed later by intel_set_cdclk_post_plane_update() */
3588 		mdclk_cdclk_ratio = old_dbuf_state->mdclk_cdclk_ratio;
3589 	} else {
3590 		/* cdclk/mdclk already changed by intel_set_cdclk_pre_plane_update() */
3591 		mdclk_cdclk_ratio = new_dbuf_state->mdclk_cdclk_ratio;
3592 	}
3593 
3594 	intel_dbuf_mdclk_cdclk_ratio_update(i915, mdclk_cdclk_ratio,
3595 					    new_dbuf_state->joined_mbus);
3596 }
3597 
intel_mbus_joined_pipe(struct intel_atomic_state * state,const struct intel_dbuf_state * dbuf_state)3598 static enum pipe intel_mbus_joined_pipe(struct intel_atomic_state *state,
3599 					const struct intel_dbuf_state *dbuf_state)
3600 {
3601 	struct drm_i915_private *i915 = to_i915(state->base.dev);
3602 	enum pipe pipe = ffs(dbuf_state->active_pipes) - 1;
3603 	const struct intel_crtc_state *new_crtc_state;
3604 	struct intel_crtc *crtc;
3605 
3606 	drm_WARN_ON(&i915->drm, !dbuf_state->joined_mbus);
3607 	drm_WARN_ON(&i915->drm, !is_power_of_2(dbuf_state->active_pipes));
3608 
3609 	crtc = intel_crtc_for_pipe(i915, pipe);
3610 	new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
3611 
3612 	if (new_crtc_state && !intel_crtc_needs_modeset(new_crtc_state))
3613 		return pipe;
3614 	else
3615 		return INVALID_PIPE;
3616 }
3617 
intel_dbuf_mbus_join_update(struct intel_atomic_state * state,enum pipe pipe)3618 static void intel_dbuf_mbus_join_update(struct intel_atomic_state *state,
3619 					enum pipe pipe)
3620 {
3621 	struct drm_i915_private *i915 = to_i915(state->base.dev);
3622 	const struct intel_dbuf_state *old_dbuf_state =
3623 		intel_atomic_get_old_dbuf_state(state);
3624 	const struct intel_dbuf_state *new_dbuf_state =
3625 		intel_atomic_get_new_dbuf_state(state);
3626 	u32 mbus_ctl;
3627 
3628 	drm_dbg_kms(&i915->drm, "Changing mbus joined: %s -> %s (pipe: %c)\n",
3629 		    str_yes_no(old_dbuf_state->joined_mbus),
3630 		    str_yes_no(new_dbuf_state->joined_mbus),
3631 		    pipe != INVALID_PIPE ? pipe_name(pipe) : '*');
3632 
3633 	if (new_dbuf_state->joined_mbus)
3634 		mbus_ctl = MBUS_HASHING_MODE_1x4 | MBUS_JOIN;
3635 	else
3636 		mbus_ctl = MBUS_HASHING_MODE_2x2;
3637 
3638 	if (pipe != INVALID_PIPE)
3639 		mbus_ctl |= MBUS_JOIN_PIPE_SELECT(pipe);
3640 	else
3641 		mbus_ctl |= MBUS_JOIN_PIPE_SELECT_NONE;
3642 
3643 	intel_de_rmw(i915, MBUS_CTL,
3644 		     MBUS_HASHING_MODE_MASK | MBUS_JOIN |
3645 		     MBUS_JOIN_PIPE_SELECT_MASK, mbus_ctl);
3646 }
3647 
intel_dbuf_mbus_pre_ddb_update(struct intel_atomic_state * state)3648 void intel_dbuf_mbus_pre_ddb_update(struct intel_atomic_state *state)
3649 {
3650 	const struct intel_dbuf_state *new_dbuf_state =
3651 		intel_atomic_get_new_dbuf_state(state);
3652 	const struct intel_dbuf_state *old_dbuf_state =
3653 		intel_atomic_get_old_dbuf_state(state);
3654 
3655 	if (!new_dbuf_state)
3656 		return;
3657 
3658 	if (!old_dbuf_state->joined_mbus && new_dbuf_state->joined_mbus) {
3659 		enum pipe pipe = intel_mbus_joined_pipe(state, new_dbuf_state);
3660 
3661 		WARN_ON(!new_dbuf_state->base.changed);
3662 
3663 		intel_dbuf_mbus_join_update(state, pipe);
3664 		intel_mbus_dbox_update(state);
3665 		intel_dbuf_mdclk_min_tracker_update(state);
3666 	}
3667 }
3668 
intel_dbuf_mbus_post_ddb_update(struct intel_atomic_state * state)3669 void intel_dbuf_mbus_post_ddb_update(struct intel_atomic_state *state)
3670 {
3671 	struct drm_i915_private *i915 = to_i915(state->base.dev);
3672 	const struct intel_dbuf_state *new_dbuf_state =
3673 		intel_atomic_get_new_dbuf_state(state);
3674 	const struct intel_dbuf_state *old_dbuf_state =
3675 		intel_atomic_get_old_dbuf_state(state);
3676 
3677 	if (!new_dbuf_state)
3678 		return;
3679 
3680 	if (old_dbuf_state->joined_mbus && !new_dbuf_state->joined_mbus) {
3681 		enum pipe pipe = intel_mbus_joined_pipe(state, old_dbuf_state);
3682 
3683 		WARN_ON(!new_dbuf_state->base.changed);
3684 
3685 		intel_dbuf_mdclk_min_tracker_update(state);
3686 		intel_mbus_dbox_update(state);
3687 		intel_dbuf_mbus_join_update(state, pipe);
3688 
3689 		if (pipe != INVALID_PIPE) {
3690 			struct intel_crtc *crtc = intel_crtc_for_pipe(i915, pipe);
3691 
3692 			intel_crtc_wait_for_next_vblank(crtc);
3693 		}
3694 	} else if (old_dbuf_state->joined_mbus == new_dbuf_state->joined_mbus &&
3695 		   old_dbuf_state->active_pipes != new_dbuf_state->active_pipes) {
3696 		WARN_ON(!new_dbuf_state->base.changed);
3697 
3698 		intel_dbuf_mdclk_min_tracker_update(state);
3699 		intel_mbus_dbox_update(state);
3700 	}
3701 
3702 }
3703 
intel_dbuf_pre_plane_update(struct intel_atomic_state * state)3704 void intel_dbuf_pre_plane_update(struct intel_atomic_state *state)
3705 {
3706 	struct drm_i915_private *i915 = to_i915(state->base.dev);
3707 	const struct intel_dbuf_state *new_dbuf_state =
3708 		intel_atomic_get_new_dbuf_state(state);
3709 	const struct intel_dbuf_state *old_dbuf_state =
3710 		intel_atomic_get_old_dbuf_state(state);
3711 	u8 old_slices, new_slices;
3712 
3713 	if (!new_dbuf_state)
3714 		return;
3715 
3716 	old_slices = old_dbuf_state->enabled_slices;
3717 	new_slices = old_dbuf_state->enabled_slices | new_dbuf_state->enabled_slices;
3718 
3719 	if (old_slices == new_slices)
3720 		return;
3721 
3722 	WARN_ON(!new_dbuf_state->base.changed);
3723 
3724 	gen9_dbuf_slices_update(i915, new_slices);
3725 }
3726 
intel_dbuf_post_plane_update(struct intel_atomic_state * state)3727 void intel_dbuf_post_plane_update(struct intel_atomic_state *state)
3728 {
3729 	struct drm_i915_private *i915 = to_i915(state->base.dev);
3730 	const struct intel_dbuf_state *new_dbuf_state =
3731 		intel_atomic_get_new_dbuf_state(state);
3732 	const struct intel_dbuf_state *old_dbuf_state =
3733 		intel_atomic_get_old_dbuf_state(state);
3734 	u8 old_slices, new_slices;
3735 
3736 	if (!new_dbuf_state)
3737 		return;
3738 
3739 	old_slices = old_dbuf_state->enabled_slices | new_dbuf_state->enabled_slices;
3740 	new_slices = new_dbuf_state->enabled_slices;
3741 
3742 	if (old_slices == new_slices)
3743 		return;
3744 
3745 	WARN_ON(!new_dbuf_state->base.changed);
3746 
3747 	gen9_dbuf_slices_update(i915, new_slices);
3748 }
3749 
skl_watermark_ipc_status_show(struct seq_file * m,void * data)3750 static int skl_watermark_ipc_status_show(struct seq_file *m, void *data)
3751 {
3752 	struct drm_i915_private *i915 = m->private;
3753 
3754 	seq_printf(m, "Isochronous Priority Control: %s\n",
3755 		   str_yes_no(skl_watermark_ipc_enabled(i915)));
3756 	return 0;
3757 }
3758 
skl_watermark_ipc_status_open(struct inode * inode,struct file * file)3759 static int skl_watermark_ipc_status_open(struct inode *inode, struct file *file)
3760 {
3761 	struct drm_i915_private *i915 = inode->i_private;
3762 
3763 	return single_open(file, skl_watermark_ipc_status_show, i915);
3764 }
3765 
skl_watermark_ipc_status_write(struct file * file,const char __user * ubuf,size_t len,loff_t * offp)3766 static ssize_t skl_watermark_ipc_status_write(struct file *file,
3767 					      const char __user *ubuf,
3768 					      size_t len, loff_t *offp)
3769 {
3770 	struct seq_file *m = file->private_data;
3771 	struct drm_i915_private *i915 = m->private;
3772 	intel_wakeref_t wakeref;
3773 	bool enable;
3774 	int ret;
3775 
3776 	ret = kstrtobool_from_user(ubuf, len, &enable);
3777 	if (ret < 0)
3778 		return ret;
3779 
3780 	with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
3781 		if (!skl_watermark_ipc_enabled(i915) && enable)
3782 			drm_info(&i915->drm,
3783 				 "Enabling IPC: WM will be proper only after next commit\n");
3784 		i915->display.wm.ipc_enabled = enable;
3785 		skl_watermark_ipc_update(i915);
3786 	}
3787 
3788 	return len;
3789 }
3790 
3791 static const struct file_operations skl_watermark_ipc_status_fops = {
3792 	.owner = THIS_MODULE,
3793 	.open = skl_watermark_ipc_status_open,
3794 	.read = seq_read,
3795 	.llseek = seq_lseek,
3796 	.release = single_release,
3797 	.write = skl_watermark_ipc_status_write
3798 };
3799 
intel_sagv_status_show(struct seq_file * m,void * unused)3800 static int intel_sagv_status_show(struct seq_file *m, void *unused)
3801 {
3802 	struct drm_i915_private *i915 = m->private;
3803 	static const char * const sagv_status[] = {
3804 		[I915_SAGV_UNKNOWN] = "unknown",
3805 		[I915_SAGV_DISABLED] = "disabled",
3806 		[I915_SAGV_ENABLED] = "enabled",
3807 		[I915_SAGV_NOT_CONTROLLED] = "not controlled",
3808 	};
3809 
3810 	seq_printf(m, "SAGV available: %s\n", str_yes_no(intel_has_sagv(i915)));
3811 	seq_printf(m, "SAGV modparam: %s\n",
3812 		   str_enabled_disabled(i915->display.params.enable_sagv));
3813 	seq_printf(m, "SAGV status: %s\n", sagv_status[i915->display.sagv.status]);
3814 	seq_printf(m, "SAGV block time: %d usec\n", i915->display.sagv.block_time_us);
3815 
3816 	return 0;
3817 }
3818 
3819 DEFINE_SHOW_ATTRIBUTE(intel_sagv_status);
3820 
skl_watermark_debugfs_register(struct drm_i915_private * i915)3821 void skl_watermark_debugfs_register(struct drm_i915_private *i915)
3822 {
3823 	struct drm_minor *minor = i915->drm.primary;
3824 
3825 	if (HAS_IPC(i915))
3826 		debugfs_create_file("i915_ipc_status", 0644, minor->debugfs_root, i915,
3827 				    &skl_watermark_ipc_status_fops);
3828 
3829 	if (HAS_SAGV(i915))
3830 		debugfs_create_file("i915_sagv_status", 0444, minor->debugfs_root, i915,
3831 				    &intel_sagv_status_fops);
3832 }
3833 
skl_watermark_max_latency(struct drm_i915_private * i915,int initial_wm_level)3834 unsigned int skl_watermark_max_latency(struct drm_i915_private *i915, int initial_wm_level)
3835 {
3836 	int level;
3837 
3838 	for (level = i915->display.wm.num_levels - 1; level >= initial_wm_level; level--) {
3839 		unsigned int latency = skl_wm_latency(i915, level, NULL);
3840 
3841 		if (latency)
3842 			return latency;
3843 	}
3844 
3845 	return 0;
3846 }
3847