xref: /linux/drivers/gpu/drm/i915/display/intel_atomic_plane.c (revision 569d7db70e5dcf13fbf072f10e9096577ac1e565)
1 /*
2  * Copyright © 2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 /**
25  * DOC: atomic plane helpers
26  *
27  * The functions here are used by the atomic plane helper functions to
28  * implement legacy plane updates (i.e., drm_plane->update_plane() and
29  * drm_plane->disable_plane()).  This allows plane updates to use the
30  * atomic state infrastructure and perform plane updates as separate
31  * prepare/check/commit/cleanup steps.
32  */
33 
34 #include <linux/dma-fence-chain.h>
35 #include <linux/dma-resv.h>
36 
37 #include <drm/drm_atomic_helper.h>
38 #include <drm/drm_gem_atomic_helper.h>
39 #include <drm/drm_blend.h>
40 #include <drm/drm_fourcc.h>
41 
42 #include "i915_config.h"
43 #include "i9xx_plane_regs.h"
44 #include "intel_atomic_plane.h"
45 #include "intel_cdclk.h"
46 #include "intel_display_rps.h"
47 #include "intel_display_trace.h"
48 #include "intel_display_types.h"
49 #include "intel_fb.h"
50 #include "intel_fb_pin.h"
51 #include "skl_scaler.h"
52 #include "skl_watermark.h"
53 
54 static void intel_plane_state_reset(struct intel_plane_state *plane_state,
55 				    struct intel_plane *plane)
56 {
57 	memset(plane_state, 0, sizeof(*plane_state));
58 
59 	__drm_atomic_helper_plane_state_reset(&plane_state->uapi, &plane->base);
60 
61 	plane_state->scaler_id = -1;
62 }
63 
64 struct intel_plane *intel_plane_alloc(void)
65 {
66 	struct intel_plane_state *plane_state;
67 	struct intel_plane *plane;
68 
69 	plane = kzalloc(sizeof(*plane), GFP_KERNEL);
70 	if (!plane)
71 		return ERR_PTR(-ENOMEM);
72 
73 	plane_state = kzalloc(sizeof(*plane_state), GFP_KERNEL);
74 	if (!plane_state) {
75 		kfree(plane);
76 		return ERR_PTR(-ENOMEM);
77 	}
78 
79 	intel_plane_state_reset(plane_state, plane);
80 
81 	plane->base.state = &plane_state->uapi;
82 
83 	return plane;
84 }
85 
86 void intel_plane_free(struct intel_plane *plane)
87 {
88 	intel_plane_destroy_state(&plane->base, plane->base.state);
89 	kfree(plane);
90 }
91 
92 /**
93  * intel_plane_duplicate_state - duplicate plane state
94  * @plane: drm plane
95  *
96  * Allocates and returns a copy of the plane state (both common and
97  * Intel-specific) for the specified plane.
98  *
99  * Returns: The newly allocated plane state, or NULL on failure.
100  */
101 struct drm_plane_state *
102 intel_plane_duplicate_state(struct drm_plane *plane)
103 {
104 	struct intel_plane_state *intel_state;
105 
106 	intel_state = to_intel_plane_state(plane->state);
107 	intel_state = kmemdup(intel_state, sizeof(*intel_state), GFP_KERNEL);
108 
109 	if (!intel_state)
110 		return NULL;
111 
112 	__drm_atomic_helper_plane_duplicate_state(plane, &intel_state->uapi);
113 
114 	intel_state->ggtt_vma = NULL;
115 	intel_state->dpt_vma = NULL;
116 	intel_state->flags = 0;
117 
118 	/* add reference to fb */
119 	if (intel_state->hw.fb)
120 		drm_framebuffer_get(intel_state->hw.fb);
121 
122 	return &intel_state->uapi;
123 }
124 
125 /**
126  * intel_plane_destroy_state - destroy plane state
127  * @plane: drm plane
128  * @state: state object to destroy
129  *
130  * Destroys the plane state (both common and Intel-specific) for the
131  * specified plane.
132  */
133 void
134 intel_plane_destroy_state(struct drm_plane *plane,
135 			  struct drm_plane_state *state)
136 {
137 	struct intel_plane_state *plane_state = to_intel_plane_state(state);
138 
139 	drm_WARN_ON(plane->dev, plane_state->ggtt_vma);
140 	drm_WARN_ON(plane->dev, plane_state->dpt_vma);
141 
142 	__drm_atomic_helper_plane_destroy_state(&plane_state->uapi);
143 	if (plane_state->hw.fb)
144 		drm_framebuffer_put(plane_state->hw.fb);
145 	kfree(plane_state);
146 }
147 
148 bool intel_plane_needs_physical(struct intel_plane *plane)
149 {
150 	struct drm_i915_private *i915 = to_i915(plane->base.dev);
151 
152 	return plane->id == PLANE_CURSOR &&
153 		DISPLAY_INFO(i915)->cursor_needs_physical;
154 }
155 
156 unsigned int intel_adjusted_rate(const struct drm_rect *src,
157 				 const struct drm_rect *dst,
158 				 unsigned int rate)
159 {
160 	unsigned int src_w, src_h, dst_w, dst_h;
161 
162 	src_w = drm_rect_width(src) >> 16;
163 	src_h = drm_rect_height(src) >> 16;
164 	dst_w = drm_rect_width(dst);
165 	dst_h = drm_rect_height(dst);
166 
167 	/* Downscaling limits the maximum pixel rate */
168 	dst_w = min(src_w, dst_w);
169 	dst_h = min(src_h, dst_h);
170 
171 	return DIV_ROUND_UP_ULL(mul_u32_u32(rate, src_w * src_h),
172 				dst_w * dst_h);
173 }
174 
175 unsigned int intel_plane_pixel_rate(const struct intel_crtc_state *crtc_state,
176 				    const struct intel_plane_state *plane_state)
177 {
178 	/*
179 	 * Note we don't check for plane visibility here as
180 	 * we want to use this when calculating the cursor
181 	 * watermarks even if the cursor is fully offscreen.
182 	 * That depends on the src/dst rectangles being
183 	 * correctly populated whenever the watermark code
184 	 * considers the cursor to be visible, whether or not
185 	 * it is actually visible.
186 	 *
187 	 * See: intel_wm_plane_visible() and intel_check_cursor()
188 	 */
189 
190 	return intel_adjusted_rate(&plane_state->uapi.src,
191 				   &plane_state->uapi.dst,
192 				   crtc_state->pixel_rate);
193 }
194 
195 unsigned int intel_plane_data_rate(const struct intel_crtc_state *crtc_state,
196 				   const struct intel_plane_state *plane_state,
197 				   int color_plane)
198 {
199 	const struct drm_framebuffer *fb = plane_state->hw.fb;
200 
201 	if (!plane_state->uapi.visible)
202 		return 0;
203 
204 	return intel_plane_pixel_rate(crtc_state, plane_state) *
205 		fb->format->cpp[color_plane];
206 }
207 
208 static bool
209 use_min_ddb(const struct intel_crtc_state *crtc_state,
210 	    struct intel_plane *plane)
211 {
212 	struct drm_i915_private *i915 = to_i915(plane->base.dev);
213 
214 	return DISPLAY_VER(i915) >= 13 &&
215 	       crtc_state->uapi.async_flip &&
216 	       plane->async_flip;
217 }
218 
219 static unsigned int
220 intel_plane_relative_data_rate(const struct intel_crtc_state *crtc_state,
221 			       const struct intel_plane_state *plane_state,
222 			       int color_plane)
223 {
224 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
225 	const struct drm_framebuffer *fb = plane_state->hw.fb;
226 	int width, height;
227 	unsigned int rel_data_rate;
228 
229 	if (plane->id == PLANE_CURSOR)
230 		return 0;
231 
232 	if (!plane_state->uapi.visible)
233 		return 0;
234 
235 	/*
236 	 * We calculate extra ddb based on ratio plane rate/total data rate
237 	 * in case, in some cases we should not allocate extra ddb for the plane,
238 	 * so do not count its data rate, if this is the case.
239 	 */
240 	if (use_min_ddb(crtc_state, plane))
241 		return 0;
242 
243 	/*
244 	 * Src coordinates are already rotated by 270 degrees for
245 	 * the 90/270 degree plane rotation cases (to match the
246 	 * GTT mapping), hence no need to account for rotation here.
247 	 */
248 	width = drm_rect_width(&plane_state->uapi.src) >> 16;
249 	height = drm_rect_height(&plane_state->uapi.src) >> 16;
250 
251 	/* UV plane does 1/2 pixel sub-sampling */
252 	if (color_plane == 1) {
253 		width /= 2;
254 		height /= 2;
255 	}
256 
257 	rel_data_rate = width * height * fb->format->cpp[color_plane];
258 
259 	return intel_adjusted_rate(&plane_state->uapi.src,
260 				   &plane_state->uapi.dst,
261 				   rel_data_rate);
262 }
263 
264 int intel_plane_calc_min_cdclk(struct intel_atomic_state *state,
265 			       struct intel_plane *plane,
266 			       bool *need_cdclk_calc)
267 {
268 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
269 	const struct intel_plane_state *plane_state =
270 		intel_atomic_get_new_plane_state(state, plane);
271 	struct intel_crtc *crtc = to_intel_crtc(plane_state->hw.crtc);
272 	const struct intel_cdclk_state *cdclk_state;
273 	const struct intel_crtc_state *old_crtc_state;
274 	struct intel_crtc_state *new_crtc_state;
275 
276 	if (!plane_state->uapi.visible || !plane->min_cdclk)
277 		return 0;
278 
279 	old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc);
280 	new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
281 
282 	new_crtc_state->min_cdclk[plane->id] =
283 		plane->min_cdclk(new_crtc_state, plane_state);
284 
285 	/*
286 	 * No need to check against the cdclk state if
287 	 * the min cdclk for the plane doesn't increase.
288 	 *
289 	 * Ie. we only ever increase the cdclk due to plane
290 	 * requirements. This can reduce back and forth
291 	 * display blinking due to constant cdclk changes.
292 	 */
293 	if (new_crtc_state->min_cdclk[plane->id] <=
294 	    old_crtc_state->min_cdclk[plane->id])
295 		return 0;
296 
297 	cdclk_state = intel_atomic_get_cdclk_state(state);
298 	if (IS_ERR(cdclk_state))
299 		return PTR_ERR(cdclk_state);
300 
301 	/*
302 	 * No need to recalculate the cdclk state if
303 	 * the min cdclk for the pipe doesn't increase.
304 	 *
305 	 * Ie. we only ever increase the cdclk due to plane
306 	 * requirements. This can reduce back and forth
307 	 * display blinking due to constant cdclk changes.
308 	 */
309 	if (new_crtc_state->min_cdclk[plane->id] <=
310 	    cdclk_state->min_cdclk[crtc->pipe])
311 		return 0;
312 
313 	drm_dbg_kms(&dev_priv->drm,
314 		    "[PLANE:%d:%s] min cdclk (%d kHz) > [CRTC:%d:%s] min cdclk (%d kHz)\n",
315 		    plane->base.base.id, plane->base.name,
316 		    new_crtc_state->min_cdclk[plane->id],
317 		    crtc->base.base.id, crtc->base.name,
318 		    cdclk_state->min_cdclk[crtc->pipe]);
319 	*need_cdclk_calc = true;
320 
321 	return 0;
322 }
323 
324 static void intel_plane_clear_hw_state(struct intel_plane_state *plane_state)
325 {
326 	if (plane_state->hw.fb)
327 		drm_framebuffer_put(plane_state->hw.fb);
328 
329 	memset(&plane_state->hw, 0, sizeof(plane_state->hw));
330 }
331 
332 void intel_plane_copy_uapi_to_hw_state(struct intel_plane_state *plane_state,
333 				       const struct intel_plane_state *from_plane_state,
334 				       struct intel_crtc *crtc)
335 {
336 	intel_plane_clear_hw_state(plane_state);
337 
338 	/*
339 	 * For the joiner secondary uapi.crtc will point at
340 	 * the primary crtc. So we explicitly assign the right
341 	 * secondary crtc to hw.crtc. uapi.crtc!=NULL simply
342 	 * indicates the plane is logically enabled on the uapi level.
343 	 */
344 	plane_state->hw.crtc = from_plane_state->uapi.crtc ? &crtc->base : NULL;
345 
346 	plane_state->hw.fb = from_plane_state->uapi.fb;
347 	if (plane_state->hw.fb)
348 		drm_framebuffer_get(plane_state->hw.fb);
349 
350 	plane_state->hw.alpha = from_plane_state->uapi.alpha;
351 	plane_state->hw.pixel_blend_mode =
352 		from_plane_state->uapi.pixel_blend_mode;
353 	plane_state->hw.rotation = from_plane_state->uapi.rotation;
354 	plane_state->hw.color_encoding = from_plane_state->uapi.color_encoding;
355 	plane_state->hw.color_range = from_plane_state->uapi.color_range;
356 	plane_state->hw.scaling_filter = from_plane_state->uapi.scaling_filter;
357 
358 	plane_state->uapi.src = drm_plane_state_src(&from_plane_state->uapi);
359 	plane_state->uapi.dst = drm_plane_state_dest(&from_plane_state->uapi);
360 }
361 
362 void intel_plane_copy_hw_state(struct intel_plane_state *plane_state,
363 			       const struct intel_plane_state *from_plane_state)
364 {
365 	intel_plane_clear_hw_state(plane_state);
366 
367 	memcpy(&plane_state->hw, &from_plane_state->hw,
368 	       sizeof(plane_state->hw));
369 
370 	if (plane_state->hw.fb)
371 		drm_framebuffer_get(plane_state->hw.fb);
372 }
373 
374 void intel_plane_set_invisible(struct intel_crtc_state *crtc_state,
375 			       struct intel_plane_state *plane_state)
376 {
377 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
378 
379 	crtc_state->active_planes &= ~BIT(plane->id);
380 	crtc_state->scaled_planes &= ~BIT(plane->id);
381 	crtc_state->nv12_planes &= ~BIT(plane->id);
382 	crtc_state->c8_planes &= ~BIT(plane->id);
383 	crtc_state->async_flip_planes &= ~BIT(plane->id);
384 	crtc_state->data_rate[plane->id] = 0;
385 	crtc_state->data_rate_y[plane->id] = 0;
386 	crtc_state->rel_data_rate[plane->id] = 0;
387 	crtc_state->rel_data_rate_y[plane->id] = 0;
388 	crtc_state->min_cdclk[plane->id] = 0;
389 
390 	plane_state->uapi.visible = false;
391 }
392 
393 /* FIXME nuke when all wm code is atomic */
394 static bool intel_wm_need_update(const struct intel_plane_state *cur,
395 				 struct intel_plane_state *new)
396 {
397 	/* Update watermarks on tiling or size changes. */
398 	if (new->uapi.visible != cur->uapi.visible)
399 		return true;
400 
401 	if (!cur->hw.fb || !new->hw.fb)
402 		return false;
403 
404 	if (cur->hw.fb->modifier != new->hw.fb->modifier ||
405 	    cur->hw.rotation != new->hw.rotation ||
406 	    drm_rect_width(&new->uapi.src) != drm_rect_width(&cur->uapi.src) ||
407 	    drm_rect_height(&new->uapi.src) != drm_rect_height(&cur->uapi.src) ||
408 	    drm_rect_width(&new->uapi.dst) != drm_rect_width(&cur->uapi.dst) ||
409 	    drm_rect_height(&new->uapi.dst) != drm_rect_height(&cur->uapi.dst))
410 		return true;
411 
412 	return false;
413 }
414 
415 static bool intel_plane_is_scaled(const struct intel_plane_state *plane_state)
416 {
417 	int src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
418 	int src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
419 	int dst_w = drm_rect_width(&plane_state->uapi.dst);
420 	int dst_h = drm_rect_height(&plane_state->uapi.dst);
421 
422 	return src_w != dst_w || src_h != dst_h;
423 }
424 
425 static bool intel_plane_do_async_flip(struct intel_plane *plane,
426 				      const struct intel_crtc_state *old_crtc_state,
427 				      const struct intel_crtc_state *new_crtc_state)
428 {
429 	struct drm_i915_private *i915 = to_i915(plane->base.dev);
430 
431 	if (!plane->async_flip)
432 		return false;
433 
434 	if (!new_crtc_state->uapi.async_flip)
435 		return false;
436 
437 	/*
438 	 * In platforms after DISPLAY13, we might need to override
439 	 * first async flip in order to change watermark levels
440 	 * as part of optimization.
441 	 *
442 	 * And let's do this for all skl+ so that we can eg. change the
443 	 * modifier as well.
444 	 *
445 	 * TODO: For older platforms there is less reason to do this as
446 	 * only X-tile is supported with async flips, though we could
447 	 * extend this so other scanout parameters (stride/etc) could
448 	 * be changed as well...
449 	 */
450 	return DISPLAY_VER(i915) < 9 || old_crtc_state->uapi.async_flip;
451 }
452 
453 static bool i9xx_must_disable_cxsr(const struct intel_crtc_state *new_crtc_state,
454 				   const struct intel_plane_state *old_plane_state,
455 				   const struct intel_plane_state *new_plane_state)
456 {
457 	struct intel_plane *plane = to_intel_plane(new_plane_state->uapi.plane);
458 	bool old_visible = old_plane_state->uapi.visible;
459 	bool new_visible = new_plane_state->uapi.visible;
460 	u32 old_ctl = old_plane_state->ctl;
461 	u32 new_ctl = new_plane_state->ctl;
462 	bool modeset, turn_on, turn_off;
463 
464 	if (plane->id == PLANE_CURSOR)
465 		return false;
466 
467 	modeset = intel_crtc_needs_modeset(new_crtc_state);
468 	turn_off = old_visible && (!new_visible || modeset);
469 	turn_on = new_visible && (!old_visible || modeset);
470 
471 	/* Must disable CxSR around plane enable/disable */
472 	if (turn_on || turn_off)
473 		return true;
474 
475 	if (!old_visible || !new_visible)
476 		return false;
477 
478 	/*
479 	 * Most plane control register updates are blocked while in CxSR.
480 	 *
481 	 * Tiling mode is one exception where the primary plane can
482 	 * apparently handle it, whereas the sprites can not (the
483 	 * sprite issue being only relevant on VLV/CHV where CxSR
484 	 * is actually possible with a sprite enabled).
485 	 */
486 	if (plane->id == PLANE_PRIMARY) {
487 		old_ctl &= ~DISP_TILED;
488 		new_ctl &= ~DISP_TILED;
489 	}
490 
491 	return old_ctl != new_ctl;
492 }
493 
494 static int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
495 					   struct intel_crtc_state *new_crtc_state,
496 					   const struct intel_plane_state *old_plane_state,
497 					   struct intel_plane_state *new_plane_state)
498 {
499 	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
500 	struct intel_plane *plane = to_intel_plane(new_plane_state->uapi.plane);
501 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
502 	bool mode_changed = intel_crtc_needs_modeset(new_crtc_state);
503 	bool was_crtc_enabled = old_crtc_state->hw.active;
504 	bool is_crtc_enabled = new_crtc_state->hw.active;
505 	bool turn_off, turn_on, visible, was_visible;
506 	int ret;
507 
508 	if (DISPLAY_VER(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
509 		ret = skl_update_scaler_plane(new_crtc_state, new_plane_state);
510 		if (ret)
511 			return ret;
512 	}
513 
514 	was_visible = old_plane_state->uapi.visible;
515 	visible = new_plane_state->uapi.visible;
516 
517 	if (!was_crtc_enabled && drm_WARN_ON(&dev_priv->drm, was_visible))
518 		was_visible = false;
519 
520 	/*
521 	 * Visibility is calculated as if the crtc was on, but
522 	 * after scaler setup everything depends on it being off
523 	 * when the crtc isn't active.
524 	 *
525 	 * FIXME this is wrong for watermarks. Watermarks should also
526 	 * be computed as if the pipe would be active. Perhaps move
527 	 * per-plane wm computation to the .check_plane() hook, and
528 	 * only combine the results from all planes in the current place?
529 	 */
530 	if (!is_crtc_enabled) {
531 		intel_plane_set_invisible(new_crtc_state, new_plane_state);
532 		visible = false;
533 	}
534 
535 	if (!was_visible && !visible)
536 		return 0;
537 
538 	turn_off = was_visible && (!visible || mode_changed);
539 	turn_on = visible && (!was_visible || mode_changed);
540 
541 	drm_dbg_atomic(&dev_priv->drm,
542 		       "[CRTC:%d:%s] with [PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
543 		       crtc->base.base.id, crtc->base.name,
544 		       plane->base.base.id, plane->base.name,
545 		       was_visible, visible,
546 		       turn_off, turn_on, mode_changed);
547 
548 	if (turn_on) {
549 		if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
550 			new_crtc_state->update_wm_pre = true;
551 	} else if (turn_off) {
552 		if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
553 			new_crtc_state->update_wm_post = true;
554 	} else if (intel_wm_need_update(old_plane_state, new_plane_state)) {
555 		if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv)) {
556 			/* FIXME bollocks */
557 			new_crtc_state->update_wm_pre = true;
558 			new_crtc_state->update_wm_post = true;
559 		}
560 	}
561 
562 	if (visible || was_visible)
563 		new_crtc_state->fb_bits |= plane->frontbuffer_bit;
564 
565 	if (HAS_GMCH(dev_priv) &&
566 	    i9xx_must_disable_cxsr(new_crtc_state, old_plane_state, new_plane_state))
567 		new_crtc_state->disable_cxsr = true;
568 
569 	/*
570 	 * ILK/SNB DVSACNTR/Sprite Enable
571 	 * IVB SPR_CTL/Sprite Enable
572 	 * "When in Self Refresh Big FIFO mode, a write to enable the
573 	 *  plane will be internally buffered and delayed while Big FIFO
574 	 *  mode is exiting."
575 	 *
576 	 * Which means that enabling the sprite can take an extra frame
577 	 * when we start in big FIFO mode (LP1+). Thus we need to drop
578 	 * down to LP0 and wait for vblank in order to make sure the
579 	 * sprite gets enabled on the next vblank after the register write.
580 	 * Doing otherwise would risk enabling the sprite one frame after
581 	 * we've already signalled flip completion. We can resume LP1+
582 	 * once the sprite has been enabled.
583 	 *
584 	 *
585 	 * WaCxSRDisabledForSpriteScaling:ivb
586 	 * IVB SPR_SCALE/Scaling Enable
587 	 * "Low Power watermarks must be disabled for at least one
588 	 *  frame before enabling sprite scaling, and kept disabled
589 	 *  until sprite scaling is disabled."
590 	 *
591 	 * ILK/SNB DVSASCALE/Scaling Enable
592 	 * "When in Self Refresh Big FIFO mode, scaling enable will be
593 	 *  masked off while Big FIFO mode is exiting."
594 	 *
595 	 * Despite the w/a only being listed for IVB we assume that
596 	 * the ILK/SNB note has similar ramifications, hence we apply
597 	 * the w/a on all three platforms.
598 	 *
599 	 * With experimental results seems this is needed also for primary
600 	 * plane, not only sprite plane.
601 	 */
602 	if (plane->id != PLANE_CURSOR &&
603 	    (IS_IRONLAKE(dev_priv) || IS_SANDYBRIDGE(dev_priv) ||
604 	     IS_IVYBRIDGE(dev_priv)) &&
605 	    (turn_on || (!intel_plane_is_scaled(old_plane_state) &&
606 			 intel_plane_is_scaled(new_plane_state))))
607 		new_crtc_state->disable_lp_wm = true;
608 
609 	if (intel_plane_do_async_flip(plane, old_crtc_state, new_crtc_state)) {
610 		new_crtc_state->do_async_flip = true;
611 		new_crtc_state->async_flip_planes |= BIT(plane->id);
612 	} else if (plane->need_async_flip_toggle_wa &&
613 		   new_crtc_state->uapi.async_flip) {
614 		/*
615 		 * On platforms with double buffered async flip bit we
616 		 * set the bit already one frame early during the sync
617 		 * flip (see {i9xx,skl}_plane_update_arm()). The
618 		 * hardware will therefore be ready to perform a real
619 		 * async flip during the next commit, without having
620 		 * to wait yet another frame for the bit to latch.
621 		 */
622 		new_crtc_state->async_flip_planes |= BIT(plane->id);
623 	}
624 
625 	return 0;
626 }
627 
628 int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_state,
629 					struct intel_crtc_state *new_crtc_state,
630 					const struct intel_plane_state *old_plane_state,
631 					struct intel_plane_state *new_plane_state)
632 {
633 	struct intel_plane *plane = to_intel_plane(new_plane_state->uapi.plane);
634 	const struct drm_framebuffer *fb = new_plane_state->hw.fb;
635 	int ret;
636 
637 	intel_plane_set_invisible(new_crtc_state, new_plane_state);
638 	new_crtc_state->enabled_planes &= ~BIT(plane->id);
639 
640 	if (!new_plane_state->hw.crtc && !old_plane_state->hw.crtc)
641 		return 0;
642 
643 	ret = plane->check_plane(new_crtc_state, new_plane_state);
644 	if (ret)
645 		return ret;
646 
647 	if (fb)
648 		new_crtc_state->enabled_planes |= BIT(plane->id);
649 
650 	/* FIXME pre-g4x don't work like this */
651 	if (new_plane_state->uapi.visible)
652 		new_crtc_state->active_planes |= BIT(plane->id);
653 
654 	if (new_plane_state->uapi.visible &&
655 	    intel_plane_is_scaled(new_plane_state))
656 		new_crtc_state->scaled_planes |= BIT(plane->id);
657 
658 	if (new_plane_state->uapi.visible &&
659 	    intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier))
660 		new_crtc_state->nv12_planes |= BIT(plane->id);
661 
662 	if (new_plane_state->uapi.visible &&
663 	    fb->format->format == DRM_FORMAT_C8)
664 		new_crtc_state->c8_planes |= BIT(plane->id);
665 
666 	if (new_plane_state->uapi.visible || old_plane_state->uapi.visible)
667 		new_crtc_state->update_planes |= BIT(plane->id);
668 
669 	if (new_plane_state->uapi.visible &&
670 	    intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier)) {
671 		new_crtc_state->data_rate_y[plane->id] =
672 			intel_plane_data_rate(new_crtc_state, new_plane_state, 0);
673 		new_crtc_state->data_rate[plane->id] =
674 			intel_plane_data_rate(new_crtc_state, new_plane_state, 1);
675 
676 		new_crtc_state->rel_data_rate_y[plane->id] =
677 			intel_plane_relative_data_rate(new_crtc_state,
678 						       new_plane_state, 0);
679 		new_crtc_state->rel_data_rate[plane->id] =
680 			intel_plane_relative_data_rate(new_crtc_state,
681 						       new_plane_state, 1);
682 	} else if (new_plane_state->uapi.visible) {
683 		new_crtc_state->data_rate[plane->id] =
684 			intel_plane_data_rate(new_crtc_state, new_plane_state, 0);
685 
686 		new_crtc_state->rel_data_rate[plane->id] =
687 			intel_plane_relative_data_rate(new_crtc_state,
688 						       new_plane_state, 0);
689 	}
690 
691 	return intel_plane_atomic_calc_changes(old_crtc_state, new_crtc_state,
692 					       old_plane_state, new_plane_state);
693 }
694 
695 static struct intel_plane *
696 intel_crtc_get_plane(struct intel_crtc *crtc, enum plane_id plane_id)
697 {
698 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
699 	struct intel_plane *plane;
700 
701 	for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
702 		if (plane->id == plane_id)
703 			return plane;
704 	}
705 
706 	return NULL;
707 }
708 
709 int intel_plane_atomic_check(struct intel_atomic_state *state,
710 			     struct intel_plane *plane)
711 {
712 	struct drm_i915_private *i915 = to_i915(state->base.dev);
713 	struct intel_plane_state *new_plane_state =
714 		intel_atomic_get_new_plane_state(state, plane);
715 	const struct intel_plane_state *old_plane_state =
716 		intel_atomic_get_old_plane_state(state, plane);
717 	const struct intel_plane_state *new_primary_crtc_plane_state;
718 	struct intel_crtc *crtc = intel_crtc_for_pipe(i915, plane->pipe);
719 	const struct intel_crtc_state *old_crtc_state =
720 		intel_atomic_get_old_crtc_state(state, crtc);
721 	struct intel_crtc_state *new_crtc_state =
722 		intel_atomic_get_new_crtc_state(state, crtc);
723 
724 	if (new_crtc_state && intel_crtc_is_joiner_secondary(new_crtc_state)) {
725 		struct intel_crtc *primary_crtc =
726 			intel_primary_crtc(new_crtc_state);
727 		struct intel_plane *primary_crtc_plane =
728 			intel_crtc_get_plane(primary_crtc, plane->id);
729 
730 		new_primary_crtc_plane_state =
731 			intel_atomic_get_new_plane_state(state, primary_crtc_plane);
732 	} else {
733 		new_primary_crtc_plane_state = new_plane_state;
734 	}
735 
736 	intel_plane_copy_uapi_to_hw_state(new_plane_state,
737 					  new_primary_crtc_plane_state,
738 					  crtc);
739 
740 	new_plane_state->uapi.visible = false;
741 	if (!new_crtc_state)
742 		return 0;
743 
744 	return intel_plane_atomic_check_with_state(old_crtc_state,
745 						   new_crtc_state,
746 						   old_plane_state,
747 						   new_plane_state);
748 }
749 
750 static struct intel_plane *
751 skl_next_plane_to_commit(struct intel_atomic_state *state,
752 			 struct intel_crtc *crtc,
753 			 struct skl_ddb_entry ddb[I915_MAX_PLANES],
754 			 struct skl_ddb_entry ddb_y[I915_MAX_PLANES],
755 			 unsigned int *update_mask)
756 {
757 	struct intel_crtc_state *crtc_state =
758 		intel_atomic_get_new_crtc_state(state, crtc);
759 	struct intel_plane_state __maybe_unused *plane_state;
760 	struct intel_plane *plane;
761 	int i;
762 
763 	if (*update_mask == 0)
764 		return NULL;
765 
766 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
767 		enum plane_id plane_id = plane->id;
768 
769 		if (crtc->pipe != plane->pipe ||
770 		    !(*update_mask & BIT(plane_id)))
771 			continue;
772 
773 		if (skl_ddb_allocation_overlaps(&crtc_state->wm.skl.plane_ddb[plane_id],
774 						ddb, I915_MAX_PLANES, plane_id) ||
775 		    skl_ddb_allocation_overlaps(&crtc_state->wm.skl.plane_ddb_y[plane_id],
776 						ddb_y, I915_MAX_PLANES, plane_id))
777 			continue;
778 
779 		*update_mask &= ~BIT(plane_id);
780 		ddb[plane_id] = crtc_state->wm.skl.plane_ddb[plane_id];
781 		ddb_y[plane_id] = crtc_state->wm.skl.plane_ddb_y[plane_id];
782 
783 		return plane;
784 	}
785 
786 	/* should never happen */
787 	drm_WARN_ON(state->base.dev, 1);
788 
789 	return NULL;
790 }
791 
792 void intel_plane_update_noarm(struct intel_plane *plane,
793 			      const struct intel_crtc_state *crtc_state,
794 			      const struct intel_plane_state *plane_state)
795 {
796 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
797 
798 	trace_intel_plane_update_noarm(plane, crtc);
799 
800 	if (plane->update_noarm)
801 		plane->update_noarm(plane, crtc_state, plane_state);
802 }
803 
804 void intel_plane_update_arm(struct intel_plane *plane,
805 			    const struct intel_crtc_state *crtc_state,
806 			    const struct intel_plane_state *plane_state)
807 {
808 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
809 
810 	trace_intel_plane_update_arm(plane, crtc);
811 
812 	if (crtc_state->do_async_flip && plane->async_flip)
813 		plane->async_flip(plane, crtc_state, plane_state, true);
814 	else
815 		plane->update_arm(plane, crtc_state, plane_state);
816 }
817 
818 void intel_plane_disable_arm(struct intel_plane *plane,
819 			     const struct intel_crtc_state *crtc_state)
820 {
821 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
822 
823 	trace_intel_plane_disable_arm(plane, crtc);
824 	plane->disable_arm(plane, crtc_state);
825 }
826 
827 void intel_crtc_planes_update_noarm(struct intel_atomic_state *state,
828 				    struct intel_crtc *crtc)
829 {
830 	struct intel_crtc_state *new_crtc_state =
831 		intel_atomic_get_new_crtc_state(state, crtc);
832 	u32 update_mask = new_crtc_state->update_planes;
833 	struct intel_plane_state *new_plane_state;
834 	struct intel_plane *plane;
835 	int i;
836 
837 	if (new_crtc_state->do_async_flip)
838 		return;
839 
840 	/*
841 	 * Since we only write non-arming registers here,
842 	 * the order does not matter even for skl+.
843 	 */
844 	for_each_new_intel_plane_in_state(state, plane, new_plane_state, i) {
845 		if (crtc->pipe != plane->pipe ||
846 		    !(update_mask & BIT(plane->id)))
847 			continue;
848 
849 		/* TODO: for mailbox updates this should be skipped */
850 		if (new_plane_state->uapi.visible ||
851 		    new_plane_state->planar_slave)
852 			intel_plane_update_noarm(plane, new_crtc_state, new_plane_state);
853 	}
854 }
855 
856 static void skl_crtc_planes_update_arm(struct intel_atomic_state *state,
857 				       struct intel_crtc *crtc)
858 {
859 	struct intel_crtc_state *old_crtc_state =
860 		intel_atomic_get_old_crtc_state(state, crtc);
861 	struct intel_crtc_state *new_crtc_state =
862 		intel_atomic_get_new_crtc_state(state, crtc);
863 	struct skl_ddb_entry ddb[I915_MAX_PLANES];
864 	struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
865 	u32 update_mask = new_crtc_state->update_planes;
866 	struct intel_plane *plane;
867 
868 	memcpy(ddb, old_crtc_state->wm.skl.plane_ddb,
869 	       sizeof(old_crtc_state->wm.skl.plane_ddb));
870 	memcpy(ddb_y, old_crtc_state->wm.skl.plane_ddb_y,
871 	       sizeof(old_crtc_state->wm.skl.plane_ddb_y));
872 
873 	while ((plane = skl_next_plane_to_commit(state, crtc, ddb, ddb_y, &update_mask))) {
874 		struct intel_plane_state *new_plane_state =
875 			intel_atomic_get_new_plane_state(state, plane);
876 
877 		/*
878 		 * TODO: for mailbox updates intel_plane_update_noarm()
879 		 * would have to be called here as well.
880 		 */
881 		if (new_plane_state->uapi.visible ||
882 		    new_plane_state->planar_slave)
883 			intel_plane_update_arm(plane, new_crtc_state, new_plane_state);
884 		else
885 			intel_plane_disable_arm(plane, new_crtc_state);
886 	}
887 }
888 
889 static void i9xx_crtc_planes_update_arm(struct intel_atomic_state *state,
890 					struct intel_crtc *crtc)
891 {
892 	struct intel_crtc_state *new_crtc_state =
893 		intel_atomic_get_new_crtc_state(state, crtc);
894 	u32 update_mask = new_crtc_state->update_planes;
895 	struct intel_plane_state *new_plane_state;
896 	struct intel_plane *plane;
897 	int i;
898 
899 	for_each_new_intel_plane_in_state(state, plane, new_plane_state, i) {
900 		if (crtc->pipe != plane->pipe ||
901 		    !(update_mask & BIT(plane->id)))
902 			continue;
903 
904 		/*
905 		 * TODO: for mailbox updates intel_plane_update_noarm()
906 		 * would have to be called here as well.
907 		 */
908 		if (new_plane_state->uapi.visible)
909 			intel_plane_update_arm(plane, new_crtc_state, new_plane_state);
910 		else
911 			intel_plane_disable_arm(plane, new_crtc_state);
912 	}
913 }
914 
915 void intel_crtc_planes_update_arm(struct intel_atomic_state *state,
916 				  struct intel_crtc *crtc)
917 {
918 	struct drm_i915_private *i915 = to_i915(state->base.dev);
919 
920 	if (DISPLAY_VER(i915) >= 9)
921 		skl_crtc_planes_update_arm(state, crtc);
922 	else
923 		i9xx_crtc_planes_update_arm(state, crtc);
924 }
925 
926 int intel_atomic_plane_check_clipping(struct intel_plane_state *plane_state,
927 				      struct intel_crtc_state *crtc_state,
928 				      int min_scale, int max_scale,
929 				      bool can_position)
930 {
931 	struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
932 	struct drm_framebuffer *fb = plane_state->hw.fb;
933 	struct drm_rect *src = &plane_state->uapi.src;
934 	struct drm_rect *dst = &plane_state->uapi.dst;
935 	const struct drm_rect *clip = &crtc_state->pipe_src;
936 	unsigned int rotation = plane_state->hw.rotation;
937 	int hscale, vscale;
938 
939 	if (!fb) {
940 		plane_state->uapi.visible = false;
941 		return 0;
942 	}
943 
944 	drm_rect_rotate(src, fb->width << 16, fb->height << 16, rotation);
945 
946 	/* Check scaling */
947 	hscale = drm_rect_calc_hscale(src, dst, min_scale, max_scale);
948 	vscale = drm_rect_calc_vscale(src, dst, min_scale, max_scale);
949 	if (hscale < 0 || vscale < 0) {
950 		drm_dbg_kms(&i915->drm, "Invalid scaling of plane\n");
951 		drm_rect_debug_print("src: ", src, true);
952 		drm_rect_debug_print("dst: ", dst, false);
953 		return -ERANGE;
954 	}
955 
956 	/*
957 	 * FIXME: This might need further adjustment for seamless scaling
958 	 * with phase information, for the 2p2 and 2p1 scenarios.
959 	 */
960 	plane_state->uapi.visible = drm_rect_clip_scaled(src, dst, clip);
961 
962 	drm_rect_rotate_inv(src, fb->width << 16, fb->height << 16, rotation);
963 
964 	if (!can_position && plane_state->uapi.visible &&
965 	    !drm_rect_equals(dst, clip)) {
966 		drm_dbg_kms(&i915->drm, "Plane must cover entire CRTC\n");
967 		drm_rect_debug_print("dst: ", dst, false);
968 		drm_rect_debug_print("clip: ", clip, false);
969 		return -EINVAL;
970 	}
971 
972 	/* final plane coordinates will be relative to the plane's pipe */
973 	drm_rect_translate(dst, -clip->x1, -clip->y1);
974 
975 	return 0;
976 }
977 
978 int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state)
979 {
980 	struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
981 	const struct drm_framebuffer *fb = plane_state->hw.fb;
982 	struct drm_rect *src = &plane_state->uapi.src;
983 	u32 src_x, src_y, src_w, src_h, hsub, vsub;
984 	bool rotated = drm_rotation_90_or_270(plane_state->hw.rotation);
985 
986 	/*
987 	 * FIXME hsub/vsub vs. block size is a mess. Pre-tgl CCS
988 	 * abuses hsub/vsub so we can't use them here. But as they
989 	 * are limited to 32bpp RGB formats we don't actually need
990 	 * to check anything.
991 	 */
992 	if (fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
993 	    fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS)
994 		return 0;
995 
996 	/*
997 	 * Hardware doesn't handle subpixel coordinates.
998 	 * Adjust to (macro)pixel boundary, but be careful not to
999 	 * increase the source viewport size, because that could
1000 	 * push the downscaling factor out of bounds.
1001 	 */
1002 	src_x = src->x1 >> 16;
1003 	src_w = drm_rect_width(src) >> 16;
1004 	src_y = src->y1 >> 16;
1005 	src_h = drm_rect_height(src) >> 16;
1006 
1007 	drm_rect_init(src, src_x << 16, src_y << 16,
1008 		      src_w << 16, src_h << 16);
1009 
1010 	if (fb->format->format == DRM_FORMAT_RGB565 && rotated) {
1011 		hsub = 2;
1012 		vsub = 2;
1013 	} else if (DISPLAY_VER(i915) >= 20 &&
1014 		   intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier)) {
1015 		/*
1016 		 * This allows NV12 and P0xx formats to have odd size and/or odd
1017 		 * source coordinates on DISPLAY_VER(i915) >= 20
1018 		 */
1019 		hsub = 1;
1020 		vsub = 1;
1021 	} else {
1022 		hsub = fb->format->hsub;
1023 		vsub = fb->format->vsub;
1024 	}
1025 
1026 	if (rotated)
1027 		hsub = vsub = max(hsub, vsub);
1028 
1029 	if (src_x % hsub || src_w % hsub) {
1030 		drm_dbg_kms(&i915->drm, "src x/w (%u, %u) must be a multiple of %u (rotated: %s)\n",
1031 			    src_x, src_w, hsub, str_yes_no(rotated));
1032 		return -EINVAL;
1033 	}
1034 
1035 	if (src_y % vsub || src_h % vsub) {
1036 		drm_dbg_kms(&i915->drm, "src y/h (%u, %u) must be a multiple of %u (rotated: %s)\n",
1037 			    src_y, src_h, vsub, str_yes_no(rotated));
1038 		return -EINVAL;
1039 	}
1040 
1041 	return 0;
1042 }
1043 
1044 static int add_dma_resv_fences(struct dma_resv *resv,
1045 			       struct drm_plane_state *new_plane_state)
1046 {
1047 	struct dma_fence *fence = dma_fence_get(new_plane_state->fence);
1048 	struct dma_fence *new;
1049 	int ret;
1050 
1051 	ret = dma_resv_get_singleton(resv, dma_resv_usage_rw(false), &new);
1052 	if (ret)
1053 		goto error;
1054 
1055 	if (new && fence) {
1056 		struct dma_fence_chain *chain = dma_fence_chain_alloc();
1057 
1058 		if (!chain) {
1059 			ret = -ENOMEM;
1060 			goto error;
1061 		}
1062 
1063 		dma_fence_chain_init(chain, fence, new, 1);
1064 		fence = &chain->base;
1065 
1066 	} else if (new) {
1067 		fence = new;
1068 	}
1069 
1070 	dma_fence_put(new_plane_state->fence);
1071 	new_plane_state->fence = fence;
1072 	return 0;
1073 
1074 error:
1075 	dma_fence_put(fence);
1076 	return ret;
1077 }
1078 
1079 /**
1080  * intel_prepare_plane_fb - Prepare fb for usage on plane
1081  * @_plane: drm plane to prepare for
1082  * @_new_plane_state: the plane state being prepared
1083  *
1084  * Prepares a framebuffer for usage on a display plane.  Generally this
1085  * involves pinning the underlying object and updating the frontbuffer tracking
1086  * bits.  Some older platforms need special physical address handling for
1087  * cursor planes.
1088  *
1089  * Returns 0 on success, negative error code on failure.
1090  */
1091 static int
1092 intel_prepare_plane_fb(struct drm_plane *_plane,
1093 		       struct drm_plane_state *_new_plane_state)
1094 {
1095 	struct i915_sched_attr attr = { .priority = I915_PRIORITY_DISPLAY };
1096 	struct intel_plane *plane = to_intel_plane(_plane);
1097 	struct intel_plane_state *new_plane_state =
1098 		to_intel_plane_state(_new_plane_state);
1099 	struct intel_atomic_state *state =
1100 		to_intel_atomic_state(new_plane_state->uapi.state);
1101 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
1102 	struct intel_plane_state *old_plane_state =
1103 		intel_atomic_get_old_plane_state(state, plane);
1104 	struct drm_i915_gem_object *obj = intel_fb_obj(new_plane_state->hw.fb);
1105 	struct drm_i915_gem_object *old_obj = intel_fb_obj(old_plane_state->hw.fb);
1106 	int ret;
1107 
1108 	if (old_obj) {
1109 		const struct intel_crtc_state *new_crtc_state =
1110 			intel_atomic_get_new_crtc_state(state,
1111 							to_intel_crtc(old_plane_state->hw.crtc));
1112 
1113 		/* Big Hammer, we also need to ensure that any pending
1114 		 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
1115 		 * current scanout is retired before unpinning the old
1116 		 * framebuffer. Note that we rely on userspace rendering
1117 		 * into the buffer attached to the pipe they are waiting
1118 		 * on. If not, userspace generates a GPU hang with IPEHR
1119 		 * point to the MI_WAIT_FOR_EVENT.
1120 		 *
1121 		 * This should only fail upon a hung GPU, in which case we
1122 		 * can safely continue.
1123 		 */
1124 		if (new_crtc_state && intel_crtc_needs_modeset(new_crtc_state)) {
1125 			ret = add_dma_resv_fences(intel_bo_to_drm_bo(old_obj)->resv,
1126 						  &new_plane_state->uapi);
1127 			if (ret < 0)
1128 				return ret;
1129 		}
1130 	}
1131 
1132 	if (!obj)
1133 		return 0;
1134 
1135 	ret = intel_plane_pin_fb(new_plane_state);
1136 	if (ret)
1137 		return ret;
1138 
1139 	ret = drm_gem_plane_helper_prepare_fb(&plane->base, &new_plane_state->uapi);
1140 	if (ret < 0)
1141 		goto unpin_fb;
1142 
1143 	if (new_plane_state->uapi.fence) {
1144 		i915_gem_fence_wait_priority(new_plane_state->uapi.fence,
1145 					     &attr);
1146 
1147 		intel_display_rps_boost_after_vblank(new_plane_state->hw.crtc,
1148 						     new_plane_state->uapi.fence);
1149 	}
1150 
1151 	/*
1152 	 * We declare pageflips to be interactive and so merit a small bias
1153 	 * towards upclocking to deliver the frame on time. By only changing
1154 	 * the RPS thresholds to sample more regularly and aim for higher
1155 	 * clocks we can hopefully deliver low power workloads (like kodi)
1156 	 * that are not quite steady state without resorting to forcing
1157 	 * maximum clocks following a vblank miss (see do_rps_boost()).
1158 	 */
1159 	intel_display_rps_mark_interactive(dev_priv, state, true);
1160 
1161 	return 0;
1162 
1163 unpin_fb:
1164 	intel_plane_unpin_fb(new_plane_state);
1165 
1166 	return ret;
1167 }
1168 
1169 /**
1170  * intel_cleanup_plane_fb - Cleans up an fb after plane use
1171  * @plane: drm plane to clean up for
1172  * @_old_plane_state: the state from the previous modeset
1173  *
1174  * Cleans up a framebuffer that has just been removed from a plane.
1175  */
1176 static void
1177 intel_cleanup_plane_fb(struct drm_plane *plane,
1178 		       struct drm_plane_state *_old_plane_state)
1179 {
1180 	struct intel_plane_state *old_plane_state =
1181 		to_intel_plane_state(_old_plane_state);
1182 	struct intel_atomic_state *state =
1183 		to_intel_atomic_state(old_plane_state->uapi.state);
1184 	struct drm_i915_private *dev_priv = to_i915(plane->dev);
1185 	struct drm_i915_gem_object *obj = intel_fb_obj(old_plane_state->hw.fb);
1186 
1187 	if (!obj)
1188 		return;
1189 
1190 	intel_display_rps_mark_interactive(dev_priv, state, false);
1191 
1192 	/* Should only be called after a successful intel_prepare_plane_fb()! */
1193 	intel_plane_unpin_fb(old_plane_state);
1194 }
1195 
1196 static const struct drm_plane_helper_funcs intel_plane_helper_funcs = {
1197 	.prepare_fb = intel_prepare_plane_fb,
1198 	.cleanup_fb = intel_cleanup_plane_fb,
1199 };
1200 
1201 void intel_plane_helper_add(struct intel_plane *plane)
1202 {
1203 	drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
1204 }
1205