xref: /linux/drivers/gpu/drm/i915/display/intel_atomic_plane.c (revision e7d759f31ca295d589f7420719c311870bb3166f)
1 /*
2  * Copyright © 2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 /**
25  * DOC: atomic plane helpers
26  *
27  * The functions here are used by the atomic plane helper functions to
28  * implement legacy plane updates (i.e., drm_plane->update_plane() and
29  * drm_plane->disable_plane()).  This allows plane updates to use the
30  * atomic state infrastructure and perform plane updates as separate
31  * prepare/check/commit/cleanup steps.
32  */
33 
34 #include <linux/dma-fence-chain.h>
35 
36 #include <drm/drm_atomic_helper.h>
37 #include <drm/drm_gem_atomic_helper.h>
38 #include <drm/drm_blend.h>
39 #include <drm/drm_fourcc.h>
40 
41 #include "i915_config.h"
42 #include "i915_reg.h"
43 #include "intel_atomic_plane.h"
44 #include "intel_cdclk.h"
45 #include "intel_display_rps.h"
46 #include "intel_display_trace.h"
47 #include "intel_display_types.h"
48 #include "intel_fb.h"
49 #include "intel_fb_pin.h"
50 #include "skl_scaler.h"
51 #include "skl_watermark.h"
52 
53 static void intel_plane_state_reset(struct intel_plane_state *plane_state,
54 				    struct intel_plane *plane)
55 {
56 	memset(plane_state, 0, sizeof(*plane_state));
57 
58 	__drm_atomic_helper_plane_state_reset(&plane_state->uapi, &plane->base);
59 
60 	plane_state->scaler_id = -1;
61 }
62 
63 struct intel_plane *intel_plane_alloc(void)
64 {
65 	struct intel_plane_state *plane_state;
66 	struct intel_plane *plane;
67 
68 	plane = kzalloc(sizeof(*plane), GFP_KERNEL);
69 	if (!plane)
70 		return ERR_PTR(-ENOMEM);
71 
72 	plane_state = kzalloc(sizeof(*plane_state), GFP_KERNEL);
73 	if (!plane_state) {
74 		kfree(plane);
75 		return ERR_PTR(-ENOMEM);
76 	}
77 
78 	intel_plane_state_reset(plane_state, plane);
79 
80 	plane->base.state = &plane_state->uapi;
81 
82 	return plane;
83 }
84 
85 void intel_plane_free(struct intel_plane *plane)
86 {
87 	intel_plane_destroy_state(&plane->base, plane->base.state);
88 	kfree(plane);
89 }
90 
91 /**
92  * intel_plane_duplicate_state - duplicate plane state
93  * @plane: drm plane
94  *
95  * Allocates and returns a copy of the plane state (both common and
96  * Intel-specific) for the specified plane.
97  *
98  * Returns: The newly allocated plane state, or NULL on failure.
99  */
100 struct drm_plane_state *
101 intel_plane_duplicate_state(struct drm_plane *plane)
102 {
103 	struct intel_plane_state *intel_state;
104 
105 	intel_state = to_intel_plane_state(plane->state);
106 	intel_state = kmemdup(intel_state, sizeof(*intel_state), GFP_KERNEL);
107 
108 	if (!intel_state)
109 		return NULL;
110 
111 	__drm_atomic_helper_plane_duplicate_state(plane, &intel_state->uapi);
112 
113 	intel_state->ggtt_vma = NULL;
114 	intel_state->dpt_vma = NULL;
115 	intel_state->flags = 0;
116 
117 	/* add reference to fb */
118 	if (intel_state->hw.fb)
119 		drm_framebuffer_get(intel_state->hw.fb);
120 
121 	return &intel_state->uapi;
122 }
123 
124 /**
125  * intel_plane_destroy_state - destroy plane state
126  * @plane: drm plane
127  * @state: state object to destroy
128  *
129  * Destroys the plane state (both common and Intel-specific) for the
130  * specified plane.
131  */
132 void
133 intel_plane_destroy_state(struct drm_plane *plane,
134 			  struct drm_plane_state *state)
135 {
136 	struct intel_plane_state *plane_state = to_intel_plane_state(state);
137 
138 	drm_WARN_ON(plane->dev, plane_state->ggtt_vma);
139 	drm_WARN_ON(plane->dev, plane_state->dpt_vma);
140 
141 	__drm_atomic_helper_plane_destroy_state(&plane_state->uapi);
142 	if (plane_state->hw.fb)
143 		drm_framebuffer_put(plane_state->hw.fb);
144 	kfree(plane_state);
145 }
146 
147 unsigned int intel_adjusted_rate(const struct drm_rect *src,
148 				 const struct drm_rect *dst,
149 				 unsigned int rate)
150 {
151 	unsigned int src_w, src_h, dst_w, dst_h;
152 
153 	src_w = drm_rect_width(src) >> 16;
154 	src_h = drm_rect_height(src) >> 16;
155 	dst_w = drm_rect_width(dst);
156 	dst_h = drm_rect_height(dst);
157 
158 	/* Downscaling limits the maximum pixel rate */
159 	dst_w = min(src_w, dst_w);
160 	dst_h = min(src_h, dst_h);
161 
162 	return DIV_ROUND_UP_ULL(mul_u32_u32(rate, src_w * src_h),
163 				dst_w * dst_h);
164 }
165 
166 unsigned int intel_plane_pixel_rate(const struct intel_crtc_state *crtc_state,
167 				    const struct intel_plane_state *plane_state)
168 {
169 	/*
170 	 * Note we don't check for plane visibility here as
171 	 * we want to use this when calculating the cursor
172 	 * watermarks even if the cursor is fully offscreen.
173 	 * That depends on the src/dst rectangles being
174 	 * correctly populated whenever the watermark code
175 	 * considers the cursor to be visible, whether or not
176 	 * it is actually visible.
177 	 *
178 	 * See: intel_wm_plane_visible() and intel_check_cursor()
179 	 */
180 
181 	return intel_adjusted_rate(&plane_state->uapi.src,
182 				   &plane_state->uapi.dst,
183 				   crtc_state->pixel_rate);
184 }
185 
186 unsigned int intel_plane_data_rate(const struct intel_crtc_state *crtc_state,
187 				   const struct intel_plane_state *plane_state,
188 				   int color_plane)
189 {
190 	const struct drm_framebuffer *fb = plane_state->hw.fb;
191 
192 	if (!plane_state->uapi.visible)
193 		return 0;
194 
195 	return intel_plane_pixel_rate(crtc_state, plane_state) *
196 		fb->format->cpp[color_plane];
197 }
198 
199 static bool
200 use_min_ddb(const struct intel_crtc_state *crtc_state,
201 	    struct intel_plane *plane)
202 {
203 	struct drm_i915_private *i915 = to_i915(plane->base.dev);
204 
205 	return DISPLAY_VER(i915) >= 13 &&
206 	       crtc_state->uapi.async_flip &&
207 	       plane->async_flip;
208 }
209 
210 static unsigned int
211 intel_plane_relative_data_rate(const struct intel_crtc_state *crtc_state,
212 			       const struct intel_plane_state *plane_state,
213 			       int color_plane)
214 {
215 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
216 	const struct drm_framebuffer *fb = plane_state->hw.fb;
217 	int width, height;
218 	unsigned int rel_data_rate;
219 
220 	if (!plane_state->uapi.visible)
221 		return 0;
222 
223 	/*
224 	 * We calculate extra ddb based on ratio plane rate/total data rate
225 	 * in case, in some cases we should not allocate extra ddb for the plane,
226 	 * so do not count its data rate, if this is the case.
227 	 */
228 	if (use_min_ddb(crtc_state, plane))
229 		return 0;
230 
231 	/*
232 	 * Src coordinates are already rotated by 270 degrees for
233 	 * the 90/270 degree plane rotation cases (to match the
234 	 * GTT mapping), hence no need to account for rotation here.
235 	 */
236 	width = drm_rect_width(&plane_state->uapi.src) >> 16;
237 	height = drm_rect_height(&plane_state->uapi.src) >> 16;
238 
239 	/* UV plane does 1/2 pixel sub-sampling */
240 	if (color_plane == 1) {
241 		width /= 2;
242 		height /= 2;
243 	}
244 
245 	rel_data_rate = width * height * fb->format->cpp[color_plane];
246 
247 	if (plane->id == PLANE_CURSOR)
248 		return rel_data_rate;
249 
250 	return intel_adjusted_rate(&plane_state->uapi.src,
251 				   &plane_state->uapi.dst,
252 				   rel_data_rate);
253 }
254 
255 int intel_plane_calc_min_cdclk(struct intel_atomic_state *state,
256 			       struct intel_plane *plane,
257 			       bool *need_cdclk_calc)
258 {
259 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
260 	const struct intel_plane_state *plane_state =
261 		intel_atomic_get_new_plane_state(state, plane);
262 	struct intel_crtc *crtc = to_intel_crtc(plane_state->hw.crtc);
263 	const struct intel_cdclk_state *cdclk_state;
264 	const struct intel_crtc_state *old_crtc_state;
265 	struct intel_crtc_state *new_crtc_state;
266 
267 	if (!plane_state->uapi.visible || !plane->min_cdclk)
268 		return 0;
269 
270 	old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc);
271 	new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
272 
273 	new_crtc_state->min_cdclk[plane->id] =
274 		plane->min_cdclk(new_crtc_state, plane_state);
275 
276 	/*
277 	 * No need to check against the cdclk state if
278 	 * the min cdclk for the plane doesn't increase.
279 	 *
280 	 * Ie. we only ever increase the cdclk due to plane
281 	 * requirements. This can reduce back and forth
282 	 * display blinking due to constant cdclk changes.
283 	 */
284 	if (new_crtc_state->min_cdclk[plane->id] <=
285 	    old_crtc_state->min_cdclk[plane->id])
286 		return 0;
287 
288 	cdclk_state = intel_atomic_get_cdclk_state(state);
289 	if (IS_ERR(cdclk_state))
290 		return PTR_ERR(cdclk_state);
291 
292 	/*
293 	 * No need to recalculate the cdclk state if
294 	 * the min cdclk for the pipe doesn't increase.
295 	 *
296 	 * Ie. we only ever increase the cdclk due to plane
297 	 * requirements. This can reduce back and forth
298 	 * display blinking due to constant cdclk changes.
299 	 */
300 	if (new_crtc_state->min_cdclk[plane->id] <=
301 	    cdclk_state->min_cdclk[crtc->pipe])
302 		return 0;
303 
304 	drm_dbg_kms(&dev_priv->drm,
305 		    "[PLANE:%d:%s] min cdclk (%d kHz) > [CRTC:%d:%s] min cdclk (%d kHz)\n",
306 		    plane->base.base.id, plane->base.name,
307 		    new_crtc_state->min_cdclk[plane->id],
308 		    crtc->base.base.id, crtc->base.name,
309 		    cdclk_state->min_cdclk[crtc->pipe]);
310 	*need_cdclk_calc = true;
311 
312 	return 0;
313 }
314 
315 static void intel_plane_clear_hw_state(struct intel_plane_state *plane_state)
316 {
317 	if (plane_state->hw.fb)
318 		drm_framebuffer_put(plane_state->hw.fb);
319 
320 	memset(&plane_state->hw, 0, sizeof(plane_state->hw));
321 }
322 
323 void intel_plane_copy_uapi_to_hw_state(struct intel_plane_state *plane_state,
324 				       const struct intel_plane_state *from_plane_state,
325 				       struct intel_crtc *crtc)
326 {
327 	intel_plane_clear_hw_state(plane_state);
328 
329 	/*
330 	 * For the bigjoiner slave uapi.crtc will point at
331 	 * the master crtc. So we explicitly assign the right
332 	 * slave crtc to hw.crtc. uapi.crtc!=NULL simply indicates
333 	 * the plane is logically enabled on the uapi level.
334 	 */
335 	plane_state->hw.crtc = from_plane_state->uapi.crtc ? &crtc->base : NULL;
336 
337 	plane_state->hw.fb = from_plane_state->uapi.fb;
338 	if (plane_state->hw.fb)
339 		drm_framebuffer_get(plane_state->hw.fb);
340 
341 	plane_state->hw.alpha = from_plane_state->uapi.alpha;
342 	plane_state->hw.pixel_blend_mode =
343 		from_plane_state->uapi.pixel_blend_mode;
344 	plane_state->hw.rotation = from_plane_state->uapi.rotation;
345 	plane_state->hw.color_encoding = from_plane_state->uapi.color_encoding;
346 	plane_state->hw.color_range = from_plane_state->uapi.color_range;
347 	plane_state->hw.scaling_filter = from_plane_state->uapi.scaling_filter;
348 
349 	plane_state->uapi.src = drm_plane_state_src(&from_plane_state->uapi);
350 	plane_state->uapi.dst = drm_plane_state_dest(&from_plane_state->uapi);
351 }
352 
353 void intel_plane_copy_hw_state(struct intel_plane_state *plane_state,
354 			       const struct intel_plane_state *from_plane_state)
355 {
356 	intel_plane_clear_hw_state(plane_state);
357 
358 	memcpy(&plane_state->hw, &from_plane_state->hw,
359 	       sizeof(plane_state->hw));
360 
361 	if (plane_state->hw.fb)
362 		drm_framebuffer_get(plane_state->hw.fb);
363 }
364 
365 void intel_plane_set_invisible(struct intel_crtc_state *crtc_state,
366 			       struct intel_plane_state *plane_state)
367 {
368 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
369 
370 	crtc_state->active_planes &= ~BIT(plane->id);
371 	crtc_state->scaled_planes &= ~BIT(plane->id);
372 	crtc_state->nv12_planes &= ~BIT(plane->id);
373 	crtc_state->c8_planes &= ~BIT(plane->id);
374 	crtc_state->async_flip_planes &= ~BIT(plane->id);
375 	crtc_state->data_rate[plane->id] = 0;
376 	crtc_state->data_rate_y[plane->id] = 0;
377 	crtc_state->rel_data_rate[plane->id] = 0;
378 	crtc_state->rel_data_rate_y[plane->id] = 0;
379 	crtc_state->min_cdclk[plane->id] = 0;
380 
381 	plane_state->uapi.visible = false;
382 }
383 
384 /* FIXME nuke when all wm code is atomic */
385 static bool intel_wm_need_update(const struct intel_plane_state *cur,
386 				 struct intel_plane_state *new)
387 {
388 	/* Update watermarks on tiling or size changes. */
389 	if (new->uapi.visible != cur->uapi.visible)
390 		return true;
391 
392 	if (!cur->hw.fb || !new->hw.fb)
393 		return false;
394 
395 	if (cur->hw.fb->modifier != new->hw.fb->modifier ||
396 	    cur->hw.rotation != new->hw.rotation ||
397 	    drm_rect_width(&new->uapi.src) != drm_rect_width(&cur->uapi.src) ||
398 	    drm_rect_height(&new->uapi.src) != drm_rect_height(&cur->uapi.src) ||
399 	    drm_rect_width(&new->uapi.dst) != drm_rect_width(&cur->uapi.dst) ||
400 	    drm_rect_height(&new->uapi.dst) != drm_rect_height(&cur->uapi.dst))
401 		return true;
402 
403 	return false;
404 }
405 
406 static bool intel_plane_is_scaled(const struct intel_plane_state *plane_state)
407 {
408 	int src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
409 	int src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
410 	int dst_w = drm_rect_width(&plane_state->uapi.dst);
411 	int dst_h = drm_rect_height(&plane_state->uapi.dst);
412 
413 	return src_w != dst_w || src_h != dst_h;
414 }
415 
416 static bool intel_plane_do_async_flip(struct intel_plane *plane,
417 				      const struct intel_crtc_state *old_crtc_state,
418 				      const struct intel_crtc_state *new_crtc_state)
419 {
420 	struct drm_i915_private *i915 = to_i915(plane->base.dev);
421 
422 	if (!plane->async_flip)
423 		return false;
424 
425 	if (!new_crtc_state->uapi.async_flip)
426 		return false;
427 
428 	/*
429 	 * In platforms after DISPLAY13, we might need to override
430 	 * first async flip in order to change watermark levels
431 	 * as part of optimization.
432 	 * So for those, we are checking if this is a first async flip.
433 	 * For platforms earlier than DISPLAY13 we always do async flip.
434 	 */
435 	return DISPLAY_VER(i915) < 13 || old_crtc_state->uapi.async_flip;
436 }
437 
438 static bool i9xx_must_disable_cxsr(const struct intel_crtc_state *new_crtc_state,
439 				   const struct intel_plane_state *old_plane_state,
440 				   const struct intel_plane_state *new_plane_state)
441 {
442 	struct intel_plane *plane = to_intel_plane(new_plane_state->uapi.plane);
443 	bool old_visible = old_plane_state->uapi.visible;
444 	bool new_visible = new_plane_state->uapi.visible;
445 	u32 old_ctl = old_plane_state->ctl;
446 	u32 new_ctl = new_plane_state->ctl;
447 	bool modeset, turn_on, turn_off;
448 
449 	if (plane->id == PLANE_CURSOR)
450 		return false;
451 
452 	modeset = intel_crtc_needs_modeset(new_crtc_state);
453 	turn_off = old_visible && (!new_visible || modeset);
454 	turn_on = new_visible && (!old_visible || modeset);
455 
456 	/* Must disable CxSR around plane enable/disable */
457 	if (turn_on || turn_off)
458 		return true;
459 
460 	if (!old_visible || !new_visible)
461 		return false;
462 
463 	/*
464 	 * Most plane control register updates are blocked while in CxSR.
465 	 *
466 	 * Tiling mode is one exception where the primary plane can
467 	 * apparently handle it, whereas the sprites can not (the
468 	 * sprite issue being only relevant on VLV/CHV where CxSR
469 	 * is actually possible with a sprite enabled).
470 	 */
471 	if (plane->id == PLANE_PRIMARY) {
472 		old_ctl &= ~DISP_TILED;
473 		new_ctl &= ~DISP_TILED;
474 	}
475 
476 	return old_ctl != new_ctl;
477 }
478 
479 static int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
480 					   struct intel_crtc_state *new_crtc_state,
481 					   const struct intel_plane_state *old_plane_state,
482 					   struct intel_plane_state *new_plane_state)
483 {
484 	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
485 	struct intel_plane *plane = to_intel_plane(new_plane_state->uapi.plane);
486 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
487 	bool mode_changed = intel_crtc_needs_modeset(new_crtc_state);
488 	bool was_crtc_enabled = old_crtc_state->hw.active;
489 	bool is_crtc_enabled = new_crtc_state->hw.active;
490 	bool turn_off, turn_on, visible, was_visible;
491 	int ret;
492 
493 	if (DISPLAY_VER(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
494 		ret = skl_update_scaler_plane(new_crtc_state, new_plane_state);
495 		if (ret)
496 			return ret;
497 	}
498 
499 	was_visible = old_plane_state->uapi.visible;
500 	visible = new_plane_state->uapi.visible;
501 
502 	if (!was_crtc_enabled && drm_WARN_ON(&dev_priv->drm, was_visible))
503 		was_visible = false;
504 
505 	/*
506 	 * Visibility is calculated as if the crtc was on, but
507 	 * after scaler setup everything depends on it being off
508 	 * when the crtc isn't active.
509 	 *
510 	 * FIXME this is wrong for watermarks. Watermarks should also
511 	 * be computed as if the pipe would be active. Perhaps move
512 	 * per-plane wm computation to the .check_plane() hook, and
513 	 * only combine the results from all planes in the current place?
514 	 */
515 	if (!is_crtc_enabled) {
516 		intel_plane_set_invisible(new_crtc_state, new_plane_state);
517 		visible = false;
518 	}
519 
520 	if (!was_visible && !visible)
521 		return 0;
522 
523 	turn_off = was_visible && (!visible || mode_changed);
524 	turn_on = visible && (!was_visible || mode_changed);
525 
526 	drm_dbg_atomic(&dev_priv->drm,
527 		       "[CRTC:%d:%s] with [PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
528 		       crtc->base.base.id, crtc->base.name,
529 		       plane->base.base.id, plane->base.name,
530 		       was_visible, visible,
531 		       turn_off, turn_on, mode_changed);
532 
533 	if (turn_on) {
534 		if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
535 			new_crtc_state->update_wm_pre = true;
536 	} else if (turn_off) {
537 		if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
538 			new_crtc_state->update_wm_post = true;
539 	} else if (intel_wm_need_update(old_plane_state, new_plane_state)) {
540 		if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv)) {
541 			/* FIXME bollocks */
542 			new_crtc_state->update_wm_pre = true;
543 			new_crtc_state->update_wm_post = true;
544 		}
545 	}
546 
547 	if (visible || was_visible)
548 		new_crtc_state->fb_bits |= plane->frontbuffer_bit;
549 
550 	if (HAS_GMCH(dev_priv) &&
551 	    i9xx_must_disable_cxsr(new_crtc_state, old_plane_state, new_plane_state))
552 		new_crtc_state->disable_cxsr = true;
553 
554 	/*
555 	 * ILK/SNB DVSACNTR/Sprite Enable
556 	 * IVB SPR_CTL/Sprite Enable
557 	 * "When in Self Refresh Big FIFO mode, a write to enable the
558 	 *  plane will be internally buffered and delayed while Big FIFO
559 	 *  mode is exiting."
560 	 *
561 	 * Which means that enabling the sprite can take an extra frame
562 	 * when we start in big FIFO mode (LP1+). Thus we need to drop
563 	 * down to LP0 and wait for vblank in order to make sure the
564 	 * sprite gets enabled on the next vblank after the register write.
565 	 * Doing otherwise would risk enabling the sprite one frame after
566 	 * we've already signalled flip completion. We can resume LP1+
567 	 * once the sprite has been enabled.
568 	 *
569 	 *
570 	 * WaCxSRDisabledForSpriteScaling:ivb
571 	 * IVB SPR_SCALE/Scaling Enable
572 	 * "Low Power watermarks must be disabled for at least one
573 	 *  frame before enabling sprite scaling, and kept disabled
574 	 *  until sprite scaling is disabled."
575 	 *
576 	 * ILK/SNB DVSASCALE/Scaling Enable
577 	 * "When in Self Refresh Big FIFO mode, scaling enable will be
578 	 *  masked off while Big FIFO mode is exiting."
579 	 *
580 	 * Despite the w/a only being listed for IVB we assume that
581 	 * the ILK/SNB note has similar ramifications, hence we apply
582 	 * the w/a on all three platforms.
583 	 *
584 	 * With experimental results seems this is needed also for primary
585 	 * plane, not only sprite plane.
586 	 */
587 	if (plane->id != PLANE_CURSOR &&
588 	    (IS_IRONLAKE(dev_priv) || IS_SANDYBRIDGE(dev_priv) ||
589 	     IS_IVYBRIDGE(dev_priv)) &&
590 	    (turn_on || (!intel_plane_is_scaled(old_plane_state) &&
591 			 intel_plane_is_scaled(new_plane_state))))
592 		new_crtc_state->disable_lp_wm = true;
593 
594 	if (intel_plane_do_async_flip(plane, old_crtc_state, new_crtc_state)) {
595 		new_crtc_state->do_async_flip = true;
596 		new_crtc_state->async_flip_planes |= BIT(plane->id);
597 	}
598 
599 	return 0;
600 }
601 
602 int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_state,
603 					struct intel_crtc_state *new_crtc_state,
604 					const struct intel_plane_state *old_plane_state,
605 					struct intel_plane_state *new_plane_state)
606 {
607 	struct intel_plane *plane = to_intel_plane(new_plane_state->uapi.plane);
608 	const struct drm_framebuffer *fb = new_plane_state->hw.fb;
609 	int ret;
610 
611 	intel_plane_set_invisible(new_crtc_state, new_plane_state);
612 	new_crtc_state->enabled_planes &= ~BIT(plane->id);
613 
614 	if (!new_plane_state->hw.crtc && !old_plane_state->hw.crtc)
615 		return 0;
616 
617 	ret = plane->check_plane(new_crtc_state, new_plane_state);
618 	if (ret)
619 		return ret;
620 
621 	if (fb)
622 		new_crtc_state->enabled_planes |= BIT(plane->id);
623 
624 	/* FIXME pre-g4x don't work like this */
625 	if (new_plane_state->uapi.visible)
626 		new_crtc_state->active_planes |= BIT(plane->id);
627 
628 	if (new_plane_state->uapi.visible &&
629 	    intel_plane_is_scaled(new_plane_state))
630 		new_crtc_state->scaled_planes |= BIT(plane->id);
631 
632 	if (new_plane_state->uapi.visible &&
633 	    intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier))
634 		new_crtc_state->nv12_planes |= BIT(plane->id);
635 
636 	if (new_plane_state->uapi.visible &&
637 	    fb->format->format == DRM_FORMAT_C8)
638 		new_crtc_state->c8_planes |= BIT(plane->id);
639 
640 	if (new_plane_state->uapi.visible || old_plane_state->uapi.visible)
641 		new_crtc_state->update_planes |= BIT(plane->id);
642 
643 	if (new_plane_state->uapi.visible &&
644 	    intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier)) {
645 		new_crtc_state->data_rate_y[plane->id] =
646 			intel_plane_data_rate(new_crtc_state, new_plane_state, 0);
647 		new_crtc_state->data_rate[plane->id] =
648 			intel_plane_data_rate(new_crtc_state, new_plane_state, 1);
649 
650 		new_crtc_state->rel_data_rate_y[plane->id] =
651 			intel_plane_relative_data_rate(new_crtc_state,
652 						       new_plane_state, 0);
653 		new_crtc_state->rel_data_rate[plane->id] =
654 			intel_plane_relative_data_rate(new_crtc_state,
655 						       new_plane_state, 1);
656 	} else if (new_plane_state->uapi.visible) {
657 		new_crtc_state->data_rate[plane->id] =
658 			intel_plane_data_rate(new_crtc_state, new_plane_state, 0);
659 
660 		new_crtc_state->rel_data_rate[plane->id] =
661 			intel_plane_relative_data_rate(new_crtc_state,
662 						       new_plane_state, 0);
663 	}
664 
665 	return intel_plane_atomic_calc_changes(old_crtc_state, new_crtc_state,
666 					       old_plane_state, new_plane_state);
667 }
668 
669 static struct intel_plane *
670 intel_crtc_get_plane(struct intel_crtc *crtc, enum plane_id plane_id)
671 {
672 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
673 	struct intel_plane *plane;
674 
675 	for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
676 		if (plane->id == plane_id)
677 			return plane;
678 	}
679 
680 	return NULL;
681 }
682 
683 int intel_plane_atomic_check(struct intel_atomic_state *state,
684 			     struct intel_plane *plane)
685 {
686 	struct drm_i915_private *i915 = to_i915(state->base.dev);
687 	struct intel_plane_state *new_plane_state =
688 		intel_atomic_get_new_plane_state(state, plane);
689 	const struct intel_plane_state *old_plane_state =
690 		intel_atomic_get_old_plane_state(state, plane);
691 	const struct intel_plane_state *new_master_plane_state;
692 	struct intel_crtc *crtc = intel_crtc_for_pipe(i915, plane->pipe);
693 	const struct intel_crtc_state *old_crtc_state =
694 		intel_atomic_get_old_crtc_state(state, crtc);
695 	struct intel_crtc_state *new_crtc_state =
696 		intel_atomic_get_new_crtc_state(state, crtc);
697 
698 	if (new_crtc_state && intel_crtc_is_bigjoiner_slave(new_crtc_state)) {
699 		struct intel_crtc *master_crtc =
700 			intel_master_crtc(new_crtc_state);
701 		struct intel_plane *master_plane =
702 			intel_crtc_get_plane(master_crtc, plane->id);
703 
704 		new_master_plane_state =
705 			intel_atomic_get_new_plane_state(state, master_plane);
706 	} else {
707 		new_master_plane_state = new_plane_state;
708 	}
709 
710 	intel_plane_copy_uapi_to_hw_state(new_plane_state,
711 					  new_master_plane_state,
712 					  crtc);
713 
714 	new_plane_state->uapi.visible = false;
715 	if (!new_crtc_state)
716 		return 0;
717 
718 	return intel_plane_atomic_check_with_state(old_crtc_state,
719 						   new_crtc_state,
720 						   old_plane_state,
721 						   new_plane_state);
722 }
723 
724 static struct intel_plane *
725 skl_next_plane_to_commit(struct intel_atomic_state *state,
726 			 struct intel_crtc *crtc,
727 			 struct skl_ddb_entry ddb[I915_MAX_PLANES],
728 			 struct skl_ddb_entry ddb_y[I915_MAX_PLANES],
729 			 unsigned int *update_mask)
730 {
731 	struct intel_crtc_state *crtc_state =
732 		intel_atomic_get_new_crtc_state(state, crtc);
733 	struct intel_plane_state __maybe_unused *plane_state;
734 	struct intel_plane *plane;
735 	int i;
736 
737 	if (*update_mask == 0)
738 		return NULL;
739 
740 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
741 		enum plane_id plane_id = plane->id;
742 
743 		if (crtc->pipe != plane->pipe ||
744 		    !(*update_mask & BIT(plane_id)))
745 			continue;
746 
747 		if (skl_ddb_allocation_overlaps(&crtc_state->wm.skl.plane_ddb[plane_id],
748 						ddb, I915_MAX_PLANES, plane_id) ||
749 		    skl_ddb_allocation_overlaps(&crtc_state->wm.skl.plane_ddb_y[plane_id],
750 						ddb_y, I915_MAX_PLANES, plane_id))
751 			continue;
752 
753 		*update_mask &= ~BIT(plane_id);
754 		ddb[plane_id] = crtc_state->wm.skl.plane_ddb[plane_id];
755 		ddb_y[plane_id] = crtc_state->wm.skl.plane_ddb_y[plane_id];
756 
757 		return plane;
758 	}
759 
760 	/* should never happen */
761 	drm_WARN_ON(state->base.dev, 1);
762 
763 	return NULL;
764 }
765 
766 void intel_plane_update_noarm(struct intel_plane *plane,
767 			      const struct intel_crtc_state *crtc_state,
768 			      const struct intel_plane_state *plane_state)
769 {
770 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
771 
772 	trace_intel_plane_update_noarm(plane, crtc);
773 
774 	if (plane->update_noarm)
775 		plane->update_noarm(plane, crtc_state, plane_state);
776 }
777 
778 void intel_plane_update_arm(struct intel_plane *plane,
779 			    const struct intel_crtc_state *crtc_state,
780 			    const struct intel_plane_state *plane_state)
781 {
782 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
783 
784 	trace_intel_plane_update_arm(plane, crtc);
785 
786 	if (crtc_state->do_async_flip && plane->async_flip)
787 		plane->async_flip(plane, crtc_state, plane_state, true);
788 	else
789 		plane->update_arm(plane, crtc_state, plane_state);
790 }
791 
792 void intel_plane_disable_arm(struct intel_plane *plane,
793 			     const struct intel_crtc_state *crtc_state)
794 {
795 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
796 
797 	trace_intel_plane_disable_arm(plane, crtc);
798 	plane->disable_arm(plane, crtc_state);
799 }
800 
801 void intel_crtc_planes_update_noarm(struct intel_atomic_state *state,
802 				    struct intel_crtc *crtc)
803 {
804 	struct intel_crtc_state *new_crtc_state =
805 		intel_atomic_get_new_crtc_state(state, crtc);
806 	u32 update_mask = new_crtc_state->update_planes;
807 	struct intel_plane_state *new_plane_state;
808 	struct intel_plane *plane;
809 	int i;
810 
811 	if (new_crtc_state->do_async_flip)
812 		return;
813 
814 	/*
815 	 * Since we only write non-arming registers here,
816 	 * the order does not matter even for skl+.
817 	 */
818 	for_each_new_intel_plane_in_state(state, plane, new_plane_state, i) {
819 		if (crtc->pipe != plane->pipe ||
820 		    !(update_mask & BIT(plane->id)))
821 			continue;
822 
823 		/* TODO: for mailbox updates this should be skipped */
824 		if (new_plane_state->uapi.visible ||
825 		    new_plane_state->planar_slave)
826 			intel_plane_update_noarm(plane, new_crtc_state, new_plane_state);
827 	}
828 }
829 
830 static void skl_crtc_planes_update_arm(struct intel_atomic_state *state,
831 				       struct intel_crtc *crtc)
832 {
833 	struct intel_crtc_state *old_crtc_state =
834 		intel_atomic_get_old_crtc_state(state, crtc);
835 	struct intel_crtc_state *new_crtc_state =
836 		intel_atomic_get_new_crtc_state(state, crtc);
837 	struct skl_ddb_entry ddb[I915_MAX_PLANES];
838 	struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
839 	u32 update_mask = new_crtc_state->update_planes;
840 	struct intel_plane *plane;
841 
842 	memcpy(ddb, old_crtc_state->wm.skl.plane_ddb,
843 	       sizeof(old_crtc_state->wm.skl.plane_ddb));
844 	memcpy(ddb_y, old_crtc_state->wm.skl.plane_ddb_y,
845 	       sizeof(old_crtc_state->wm.skl.plane_ddb_y));
846 
847 	while ((plane = skl_next_plane_to_commit(state, crtc, ddb, ddb_y, &update_mask))) {
848 		struct intel_plane_state *new_plane_state =
849 			intel_atomic_get_new_plane_state(state, plane);
850 
851 		/*
852 		 * TODO: for mailbox updates intel_plane_update_noarm()
853 		 * would have to be called here as well.
854 		 */
855 		if (new_plane_state->uapi.visible ||
856 		    new_plane_state->planar_slave)
857 			intel_plane_update_arm(plane, new_crtc_state, new_plane_state);
858 		else
859 			intel_plane_disable_arm(plane, new_crtc_state);
860 	}
861 }
862 
863 static void i9xx_crtc_planes_update_arm(struct intel_atomic_state *state,
864 					struct intel_crtc *crtc)
865 {
866 	struct intel_crtc_state *new_crtc_state =
867 		intel_atomic_get_new_crtc_state(state, crtc);
868 	u32 update_mask = new_crtc_state->update_planes;
869 	struct intel_plane_state *new_plane_state;
870 	struct intel_plane *plane;
871 	int i;
872 
873 	for_each_new_intel_plane_in_state(state, plane, new_plane_state, i) {
874 		if (crtc->pipe != plane->pipe ||
875 		    !(update_mask & BIT(plane->id)))
876 			continue;
877 
878 		/*
879 		 * TODO: for mailbox updates intel_plane_update_noarm()
880 		 * would have to be called here as well.
881 		 */
882 		if (new_plane_state->uapi.visible)
883 			intel_plane_update_arm(plane, new_crtc_state, new_plane_state);
884 		else
885 			intel_plane_disable_arm(plane, new_crtc_state);
886 	}
887 }
888 
889 void intel_crtc_planes_update_arm(struct intel_atomic_state *state,
890 				  struct intel_crtc *crtc)
891 {
892 	struct drm_i915_private *i915 = to_i915(state->base.dev);
893 
894 	if (DISPLAY_VER(i915) >= 9)
895 		skl_crtc_planes_update_arm(state, crtc);
896 	else
897 		i9xx_crtc_planes_update_arm(state, crtc);
898 }
899 
900 int intel_atomic_plane_check_clipping(struct intel_plane_state *plane_state,
901 				      struct intel_crtc_state *crtc_state,
902 				      int min_scale, int max_scale,
903 				      bool can_position)
904 {
905 	struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
906 	struct drm_framebuffer *fb = plane_state->hw.fb;
907 	struct drm_rect *src = &plane_state->uapi.src;
908 	struct drm_rect *dst = &plane_state->uapi.dst;
909 	const struct drm_rect *clip = &crtc_state->pipe_src;
910 	unsigned int rotation = plane_state->hw.rotation;
911 	int hscale, vscale;
912 
913 	if (!fb) {
914 		plane_state->uapi.visible = false;
915 		return 0;
916 	}
917 
918 	drm_rect_rotate(src, fb->width << 16, fb->height << 16, rotation);
919 
920 	/* Check scaling */
921 	hscale = drm_rect_calc_hscale(src, dst, min_scale, max_scale);
922 	vscale = drm_rect_calc_vscale(src, dst, min_scale, max_scale);
923 	if (hscale < 0 || vscale < 0) {
924 		drm_dbg_kms(&i915->drm, "Invalid scaling of plane\n");
925 		drm_rect_debug_print("src: ", src, true);
926 		drm_rect_debug_print("dst: ", dst, false);
927 		return -ERANGE;
928 	}
929 
930 	/*
931 	 * FIXME: This might need further adjustment for seamless scaling
932 	 * with phase information, for the 2p2 and 2p1 scenarios.
933 	 */
934 	plane_state->uapi.visible = drm_rect_clip_scaled(src, dst, clip);
935 
936 	drm_rect_rotate_inv(src, fb->width << 16, fb->height << 16, rotation);
937 
938 	if (!can_position && plane_state->uapi.visible &&
939 	    !drm_rect_equals(dst, clip)) {
940 		drm_dbg_kms(&i915->drm, "Plane must cover entire CRTC\n");
941 		drm_rect_debug_print("dst: ", dst, false);
942 		drm_rect_debug_print("clip: ", clip, false);
943 		return -EINVAL;
944 	}
945 
946 	/* final plane coordinates will be relative to the plane's pipe */
947 	drm_rect_translate(dst, -clip->x1, -clip->y1);
948 
949 	return 0;
950 }
951 
952 int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state)
953 {
954 	struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
955 	const struct drm_framebuffer *fb = plane_state->hw.fb;
956 	struct drm_rect *src = &plane_state->uapi.src;
957 	u32 src_x, src_y, src_w, src_h, hsub, vsub;
958 	bool rotated = drm_rotation_90_or_270(plane_state->hw.rotation);
959 
960 	/*
961 	 * FIXME hsub/vsub vs. block size is a mess. Pre-tgl CCS
962 	 * abuses hsub/vsub so we can't use them here. But as they
963 	 * are limited to 32bpp RGB formats we don't actually need
964 	 * to check anything.
965 	 */
966 	if (fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
967 	    fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS)
968 		return 0;
969 
970 	/*
971 	 * Hardware doesn't handle subpixel coordinates.
972 	 * Adjust to (macro)pixel boundary, but be careful not to
973 	 * increase the source viewport size, because that could
974 	 * push the downscaling factor out of bounds.
975 	 */
976 	src_x = src->x1 >> 16;
977 	src_w = drm_rect_width(src) >> 16;
978 	src_y = src->y1 >> 16;
979 	src_h = drm_rect_height(src) >> 16;
980 
981 	drm_rect_init(src, src_x << 16, src_y << 16,
982 		      src_w << 16, src_h << 16);
983 
984 	if (fb->format->format == DRM_FORMAT_RGB565 && rotated) {
985 		hsub = 2;
986 		vsub = 2;
987 	} else if (DISPLAY_VER(i915) >= 20 &&
988 		   intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier)) {
989 		/*
990 		 * This allows NV12 and P0xx formats to have odd size and/or odd
991 		 * source coordinates on DISPLAY_VER(i915) >= 20
992 		 */
993 		hsub = 1;
994 		vsub = 1;
995 	} else {
996 		hsub = fb->format->hsub;
997 		vsub = fb->format->vsub;
998 	}
999 
1000 	if (rotated)
1001 		hsub = vsub = max(hsub, vsub);
1002 
1003 	if (src_x % hsub || src_w % hsub) {
1004 		drm_dbg_kms(&i915->drm, "src x/w (%u, %u) must be a multiple of %u (rotated: %s)\n",
1005 			    src_x, src_w, hsub, str_yes_no(rotated));
1006 		return -EINVAL;
1007 	}
1008 
1009 	if (src_y % vsub || src_h % vsub) {
1010 		drm_dbg_kms(&i915->drm, "src y/h (%u, %u) must be a multiple of %u (rotated: %s)\n",
1011 			    src_y, src_h, vsub, str_yes_no(rotated));
1012 		return -EINVAL;
1013 	}
1014 
1015 	return 0;
1016 }
1017 
1018 static int add_dma_resv_fences(struct dma_resv *resv,
1019 			       struct drm_plane_state *new_plane_state)
1020 {
1021 	struct dma_fence *fence = dma_fence_get(new_plane_state->fence);
1022 	struct dma_fence *new;
1023 	int ret;
1024 
1025 	ret = dma_resv_get_singleton(resv, dma_resv_usage_rw(false), &new);
1026 	if (ret)
1027 		goto error;
1028 
1029 	if (new && fence) {
1030 		struct dma_fence_chain *chain = dma_fence_chain_alloc();
1031 
1032 		if (!chain) {
1033 			ret = -ENOMEM;
1034 			goto error;
1035 		}
1036 
1037 		dma_fence_chain_init(chain, fence, new, 1);
1038 		fence = &chain->base;
1039 
1040 	} else if (new) {
1041 		fence = new;
1042 	}
1043 
1044 	dma_fence_put(new_plane_state->fence);
1045 	new_plane_state->fence = fence;
1046 	return 0;
1047 
1048 error:
1049 	dma_fence_put(fence);
1050 	return ret;
1051 }
1052 
1053 /**
1054  * intel_prepare_plane_fb - Prepare fb for usage on plane
1055  * @_plane: drm plane to prepare for
1056  * @_new_plane_state: the plane state being prepared
1057  *
1058  * Prepares a framebuffer for usage on a display plane.  Generally this
1059  * involves pinning the underlying object and updating the frontbuffer tracking
1060  * bits.  Some older platforms need special physical address handling for
1061  * cursor planes.
1062  *
1063  * Returns 0 on success, negative error code on failure.
1064  */
1065 static int
1066 intel_prepare_plane_fb(struct drm_plane *_plane,
1067 		       struct drm_plane_state *_new_plane_state)
1068 {
1069 	struct i915_sched_attr attr = { .priority = I915_PRIORITY_DISPLAY };
1070 	struct intel_plane *plane = to_intel_plane(_plane);
1071 	struct intel_plane_state *new_plane_state =
1072 		to_intel_plane_state(_new_plane_state);
1073 	struct intel_atomic_state *state =
1074 		to_intel_atomic_state(new_plane_state->uapi.state);
1075 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
1076 	struct intel_plane_state *old_plane_state =
1077 		intel_atomic_get_old_plane_state(state, plane);
1078 	struct drm_i915_gem_object *obj = intel_fb_obj(new_plane_state->hw.fb);
1079 	struct drm_i915_gem_object *old_obj = intel_fb_obj(old_plane_state->hw.fb);
1080 	int ret;
1081 
1082 	if (old_obj) {
1083 		const struct intel_crtc_state *new_crtc_state =
1084 			intel_atomic_get_new_crtc_state(state,
1085 							to_intel_crtc(old_plane_state->hw.crtc));
1086 
1087 		/* Big Hammer, we also need to ensure that any pending
1088 		 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
1089 		 * current scanout is retired before unpinning the old
1090 		 * framebuffer. Note that we rely on userspace rendering
1091 		 * into the buffer attached to the pipe they are waiting
1092 		 * on. If not, userspace generates a GPU hang with IPEHR
1093 		 * point to the MI_WAIT_FOR_EVENT.
1094 		 *
1095 		 * This should only fail upon a hung GPU, in which case we
1096 		 * can safely continue.
1097 		 */
1098 		if (new_crtc_state && intel_crtc_needs_modeset(new_crtc_state)) {
1099 			ret = add_dma_resv_fences(intel_bo_to_drm_bo(old_obj)->resv,
1100 						  &new_plane_state->uapi);
1101 			if (ret < 0)
1102 				return ret;
1103 		}
1104 	}
1105 
1106 	if (!obj)
1107 		return 0;
1108 
1109 	ret = intel_plane_pin_fb(new_plane_state);
1110 	if (ret)
1111 		return ret;
1112 
1113 	ret = drm_gem_plane_helper_prepare_fb(&plane->base, &new_plane_state->uapi);
1114 	if (ret < 0)
1115 		goto unpin_fb;
1116 
1117 	if (new_plane_state->uapi.fence) {
1118 		i915_gem_fence_wait_priority(new_plane_state->uapi.fence,
1119 					     &attr);
1120 
1121 		intel_display_rps_boost_after_vblank(new_plane_state->hw.crtc,
1122 						     new_plane_state->uapi.fence);
1123 	}
1124 
1125 	/*
1126 	 * We declare pageflips to be interactive and so merit a small bias
1127 	 * towards upclocking to deliver the frame on time. By only changing
1128 	 * the RPS thresholds to sample more regularly and aim for higher
1129 	 * clocks we can hopefully deliver low power workloads (like kodi)
1130 	 * that are not quite steady state without resorting to forcing
1131 	 * maximum clocks following a vblank miss (see do_rps_boost()).
1132 	 */
1133 	intel_display_rps_mark_interactive(dev_priv, state, true);
1134 
1135 	return 0;
1136 
1137 unpin_fb:
1138 	intel_plane_unpin_fb(new_plane_state);
1139 
1140 	return ret;
1141 }
1142 
1143 /**
1144  * intel_cleanup_plane_fb - Cleans up an fb after plane use
1145  * @plane: drm plane to clean up for
1146  * @_old_plane_state: the state from the previous modeset
1147  *
1148  * Cleans up a framebuffer that has just been removed from a plane.
1149  */
1150 static void
1151 intel_cleanup_plane_fb(struct drm_plane *plane,
1152 		       struct drm_plane_state *_old_plane_state)
1153 {
1154 	struct intel_plane_state *old_plane_state =
1155 		to_intel_plane_state(_old_plane_state);
1156 	struct intel_atomic_state *state =
1157 		to_intel_atomic_state(old_plane_state->uapi.state);
1158 	struct drm_i915_private *dev_priv = to_i915(plane->dev);
1159 	struct drm_i915_gem_object *obj = intel_fb_obj(old_plane_state->hw.fb);
1160 
1161 	if (!obj)
1162 		return;
1163 
1164 	intel_display_rps_mark_interactive(dev_priv, state, false);
1165 
1166 	/* Should only be called after a successful intel_prepare_plane_fb()! */
1167 	intel_plane_unpin_fb(old_plane_state);
1168 }
1169 
1170 static const struct drm_plane_helper_funcs intel_plane_helper_funcs = {
1171 	.prepare_fb = intel_prepare_plane_fb,
1172 	.cleanup_fb = intel_cleanup_plane_fb,
1173 };
1174 
1175 void intel_plane_helper_add(struct intel_plane *plane)
1176 {
1177 	drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
1178 }
1179