xref: /linux/drivers/gpu/drm/i915/display/intel_fbc.c (revision 305a60de9b16f310e8e50d1405108609f6e6ec2b)
1 /*
2  * Copyright © 2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 /**
25  * DOC: Frame Buffer Compression (FBC)
26  *
27  * FBC tries to save memory bandwidth (and so power consumption) by
28  * compressing the amount of memory used by the display. It is total
29  * transparent to user space and completely handled in the kernel.
30  *
31  * The benefits of FBC are mostly visible with solid backgrounds and
32  * variation-less patterns. It comes from keeping the memory footprint small
33  * and having fewer memory pages opened and accessed for refreshing the display.
34  *
35  * i915 is responsible to reserve stolen memory for FBC and configure its
36  * offset on proper registers. The hardware takes care of all
37  * compress/decompress. However there are many known cases where we have to
38  * forcibly disable it to allow proper screen updates.
39  */
40 
41 #include <linux/debugfs.h>
42 #include <linux/string_helpers.h>
43 
44 #include <drm/drm_blend.h>
45 #include <drm/drm_fourcc.h>
46 #include <drm/drm_print.h>
47 
48 #include "i9xx_plane_regs.h"
49 #include "intel_de.h"
50 #include "intel_display_device.h"
51 #include "intel_display_regs.h"
52 #include "intel_display_rpm.h"
53 #include "intel_display_trace.h"
54 #include "intel_display_types.h"
55 #include "intel_display_utils.h"
56 #include "intel_display_wa.h"
57 #include "intel_fbc.h"
58 #include "intel_fbc_regs.h"
59 #include "intel_frontbuffer.h"
60 #include "intel_parent.h"
61 #include "intel_step.h"
62 
63 #define for_each_fbc_id(__display, __fbc_id) \
64 	for ((__fbc_id) = INTEL_FBC_A; (__fbc_id) < I915_MAX_FBCS; (__fbc_id)++) \
65 		for_each_if(DISPLAY_RUNTIME_INFO(__display)->fbc_mask & BIT(__fbc_id))
66 
67 #define for_each_intel_fbc(__display, __fbc, __fbc_id) \
68 	for_each_fbc_id((__display), (__fbc_id)) \
69 		for_each_if((__fbc) = (__display)->fbc.instances[(__fbc_id)])
70 
71 #define FBC_SYS_CACHE_ID_NONE	I915_MAX_FBCS
72 
73 struct intel_fbc_funcs {
74 	void (*activate)(struct intel_fbc *fbc);
75 	void (*deactivate)(struct intel_fbc *fbc);
76 	bool (*is_active)(struct intel_fbc *fbc);
77 	bool (*is_compressing)(struct intel_fbc *fbc);
78 	void (*nuke)(struct intel_fbc *fbc);
79 	void (*program_cfb)(struct intel_fbc *fbc);
80 	void (*set_false_color)(struct intel_fbc *fbc, bool enable);
81 };
82 
83 struct intel_fbc_state {
84 	struct intel_plane *plane;
85 	unsigned int cfb_stride;
86 	unsigned int cfb_size;
87 	unsigned int fence_y_offset;
88 	u16 override_cfb_stride;
89 	u16 interval;
90 	s8 fence_id;
91 	struct drm_rect dirty_rect;
92 };
93 
94 struct intel_fbc {
95 	struct intel_display *display;
96 	const struct intel_fbc_funcs *funcs;
97 
98 	/* This is always the outer lock when overlapping with stolen_lock */
99 	struct mutex lock;
100 	unsigned int busy_bits;
101 
102 	struct intel_stolen_node *compressed_fb;
103 	struct intel_stolen_node *compressed_llb;
104 
105 	enum intel_fbc_id id;
106 
107 	u8 limit;
108 
109 	bool false_color;
110 
111 	bool active;
112 	bool activated;
113 	bool flip_pending;
114 
115 	bool underrun_detected;
116 	struct work_struct underrun_work;
117 
118 	/*
119 	 * This structure contains everything that's relevant to program the
120 	 * hardware registers. When we want to figure out if we need to disable
121 	 * and re-enable FBC for a new configuration we just check if there's
122 	 * something different in the struct. The genx_fbc_activate functions
123 	 * are supposed to read from it in order to program the registers.
124 	 */
125 	struct intel_fbc_state state;
126 	const char *no_fbc_reason;
127 };
128 
129 static struct intel_fbc *intel_fbc_for_pipe(struct intel_display *display, enum pipe pipe)
130 {
131 	struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe);
132 	struct intel_plane *primary = NULL;
133 
134 	primary = to_intel_plane(crtc->base.primary);
135 
136 	if (drm_WARN_ON(display->drm, !primary))
137 		return NULL;
138 
139 	return primary->fbc;
140 }
141 
142 /* plane stride in pixels */
143 static unsigned int intel_fbc_plane_stride(const struct intel_plane_state *plane_state)
144 {
145 	const struct drm_framebuffer *fb = plane_state->hw.fb;
146 	unsigned int stride;
147 
148 	stride = plane_state->view.color_plane[0].mapping_stride;
149 	if (!drm_rotation_90_or_270(plane_state->hw.rotation))
150 		stride /= fb->format->cpp[0];
151 
152 	return stride;
153 }
154 
155 static unsigned int intel_fbc_cfb_cpp(const struct intel_plane_state *plane_state)
156 {
157 	const struct drm_framebuffer *fb = plane_state->hw.fb;
158 	unsigned int cpp = fb->format->cpp[0];
159 
160 	return max(cpp, 4);
161 }
162 
163 /* plane stride based cfb stride in bytes, assuming 1:1 compression limit */
164 static unsigned int intel_fbc_plane_cfb_stride(const struct intel_plane_state *plane_state)
165 {
166 	unsigned int cpp = intel_fbc_cfb_cpp(plane_state);
167 
168 	return intel_fbc_plane_stride(plane_state) * cpp;
169 }
170 
171 /* minimum acceptable cfb stride in bytes, assuming 1:1 compression limit */
172 static unsigned int skl_fbc_min_cfb_stride(struct intel_display *display,
173 					   unsigned int cpp, unsigned int width)
174 {
175 	unsigned int limit = 4; /* 1:4 compression limit is the worst case */
176 	unsigned int height = 4; /* FBC segment is 4 lines */
177 	unsigned int stride;
178 
179 	/* minimum segment stride we can use */
180 	stride = width * cpp * height / limit;
181 
182 	/*
183 	 * Wa_16011863758: icl+
184 	 * Avoid some hardware segment address miscalculation.
185 	 */
186 	if (intel_display_wa(display, INTEL_DISPLAY_WA_16011863758))
187 		stride += 64;
188 
189 	/*
190 	 * At least some of the platforms require each 4 line segment to
191 	 * be 512 byte aligned. Just do it always for simplicity.
192 	 */
193 	stride = ALIGN(stride, 512);
194 
195 	/* convert back to single line equivalent with 1:1 compression limit */
196 	return stride * limit / height;
197 }
198 
199 /* properly aligned cfb stride in bytes, assuming 1:1 compression limit */
200 static unsigned int _intel_fbc_cfb_stride(struct intel_display *display,
201 					  unsigned int cpp, unsigned int width,
202 					  unsigned int stride)
203 {
204 	/*
205 	 * At least some of the platforms require each 4 line segment to
206 	 * be 512 byte aligned. Aligning each line to 512 bytes guarantees
207 	 * that regardless of the compression limit we choose later.
208 	 */
209 	if (DISPLAY_VER(display) >= 9)
210 		return max(ALIGN(stride, 512), skl_fbc_min_cfb_stride(display, cpp, width));
211 	else
212 		return stride;
213 }
214 
215 static unsigned int intel_fbc_cfb_stride(const struct intel_plane_state *plane_state)
216 {
217 	struct intel_display *display = to_intel_display(plane_state);
218 	unsigned int stride = intel_fbc_plane_cfb_stride(plane_state);
219 	unsigned int width = drm_rect_width(&plane_state->uapi.src) >> 16;
220 	unsigned int cpp = intel_fbc_cfb_cpp(plane_state);
221 
222 	return _intel_fbc_cfb_stride(display, cpp, width, stride);
223 }
224 
225 /*
226  * Maximum height the hardware will compress, on HSW+
227  * additional lines (up to the actual plane height) will
228  * remain uncompressed.
229  */
230 static unsigned int intel_fbc_max_cfb_height(struct intel_display *display)
231 {
232 	if (DISPLAY_VER(display) >= 8)
233 		return 2560;
234 	else if (DISPLAY_VER(display) >= 5 || display->platform.g4x)
235 		return 2048;
236 	else
237 		return 1536;
238 }
239 
240 static unsigned int _intel_fbc_cfb_size(struct intel_display *display,
241 					unsigned int height, unsigned int stride)
242 {
243 	return min(height, intel_fbc_max_cfb_height(display)) * stride;
244 }
245 
246 static unsigned int intel_fbc_cfb_size(const struct intel_plane_state *plane_state)
247 {
248 	struct intel_display *display = to_intel_display(plane_state);
249 	unsigned int height = drm_rect_height(&plane_state->uapi.src) >> 16;
250 
251 	return _intel_fbc_cfb_size(display, height, intel_fbc_cfb_stride(plane_state));
252 }
253 
254 static u16 intel_fbc_override_cfb_stride(const struct intel_plane_state *plane_state)
255 {
256 	struct intel_display *display = to_intel_display(plane_state);
257 	unsigned int stride_aligned = intel_fbc_cfb_stride(plane_state);
258 	unsigned int stride = intel_fbc_plane_cfb_stride(plane_state);
259 	const struct drm_framebuffer *fb = plane_state->hw.fb;
260 
261 	/*
262 	 * Override stride in 64 byte units per 4 line segment.
263 	 *
264 	 * Gen9 hw miscalculates cfb stride for linear as
265 	 * PLANE_STRIDE*512 instead of PLANE_STRIDE*64, so
266 	 * we always need to use the override there.
267 	 *
268 	 * wa_14022269668 For bmg, always program the FBC_STRIDE before fbc enable
269 	 */
270 	if (stride != stride_aligned ||
271 	    (DISPLAY_VER(display) == 9 && fb->modifier == DRM_FORMAT_MOD_LINEAR) ||
272 	    display->platform.battlemage)
273 		return stride_aligned * 4 / 64;
274 
275 	return 0;
276 }
277 
278 static bool intel_fbc_has_fences(struct intel_display *display)
279 {
280 	return intel_parent_has_fenced_regions(display);
281 }
282 
283 static u32 i8xx_fbc_ctl(struct intel_fbc *fbc)
284 {
285 	struct intel_display *display = fbc->display;
286 	const struct intel_fbc_state *fbc_state = &fbc->state;
287 	unsigned int cfb_stride;
288 	u32 fbc_ctl;
289 
290 	cfb_stride = fbc_state->cfb_stride / fbc->limit;
291 
292 	/* FBC_CTL wants 32B or 64B units */
293 	if (DISPLAY_VER(display) == 2)
294 		cfb_stride = (cfb_stride / 32) - 1;
295 	else
296 		cfb_stride = (cfb_stride / 64) - 1;
297 
298 	fbc_ctl = FBC_CTL_PERIODIC |
299 		FBC_CTL_INTERVAL(fbc_state->interval) |
300 		FBC_CTL_STRIDE(cfb_stride);
301 
302 	if (display->platform.i945gm)
303 		fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
304 
305 	if (fbc_state->fence_id >= 0)
306 		fbc_ctl |= FBC_CTL_FENCENO(fbc_state->fence_id);
307 
308 	return fbc_ctl;
309 }
310 
311 static u32 i965_fbc_ctl2(struct intel_fbc *fbc)
312 {
313 	const struct intel_fbc_state *fbc_state = &fbc->state;
314 	u32 fbc_ctl2;
315 
316 	fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM |
317 		FBC_CTL_PLANE(fbc_state->plane->i9xx_plane);
318 
319 	if (fbc_state->fence_id >= 0)
320 		fbc_ctl2 |= FBC_CTL_CPU_FENCE_EN;
321 
322 	return fbc_ctl2;
323 }
324 
325 static void i8xx_fbc_deactivate(struct intel_fbc *fbc)
326 {
327 	struct intel_display *display = fbc->display;
328 	u32 fbc_ctl;
329 
330 	/* Disable compression */
331 	fbc_ctl = intel_de_read(display, FBC_CONTROL);
332 	if ((fbc_ctl & FBC_CTL_EN) == 0)
333 		return;
334 
335 	fbc_ctl &= ~FBC_CTL_EN;
336 	intel_de_write(display, FBC_CONTROL, fbc_ctl);
337 
338 	/* Wait for compressing bit to clear */
339 	if (intel_de_wait_for_clear_ms(display, FBC_STATUS,
340 				       FBC_STAT_COMPRESSING, 10)) {
341 		drm_dbg_kms(display->drm, "FBC idle timed out\n");
342 		return;
343 	}
344 }
345 
346 static void i8xx_fbc_activate(struct intel_fbc *fbc)
347 {
348 	struct intel_display *display = fbc->display;
349 	const struct intel_fbc_state *fbc_state = &fbc->state;
350 	int i;
351 
352 	/* Clear old tags */
353 	for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
354 		intel_de_write(display, FBC_TAG(i), 0);
355 
356 	if (DISPLAY_VER(display) == 4) {
357 		intel_de_write(display, FBC_CONTROL2,
358 			       i965_fbc_ctl2(fbc));
359 		intel_de_write(display, FBC_FENCE_OFF,
360 			       fbc_state->fence_y_offset);
361 	}
362 
363 	intel_de_write(display, FBC_CONTROL,
364 		       FBC_CTL_EN | i8xx_fbc_ctl(fbc));
365 }
366 
367 static bool i8xx_fbc_is_active(struct intel_fbc *fbc)
368 {
369 	return intel_de_read(fbc->display, FBC_CONTROL) & FBC_CTL_EN;
370 }
371 
372 static bool i8xx_fbc_is_compressing(struct intel_fbc *fbc)
373 {
374 	return intel_de_read(fbc->display, FBC_STATUS) &
375 		(FBC_STAT_COMPRESSING | FBC_STAT_COMPRESSED);
376 }
377 
378 static void i8xx_fbc_nuke(struct intel_fbc *fbc)
379 {
380 	struct intel_display *display = fbc->display;
381 	struct intel_fbc_state *fbc_state = &fbc->state;
382 	enum i9xx_plane_id i9xx_plane = fbc_state->plane->i9xx_plane;
383 
384 	intel_de_write_fw(display, DSPADDR(display, i9xx_plane),
385 			  intel_de_read_fw(display, DSPADDR(display, i9xx_plane)));
386 }
387 
388 static void i8xx_fbc_program_cfb(struct intel_fbc *fbc)
389 {
390 	struct intel_display *display = fbc->display;
391 
392 	drm_WARN_ON(display->drm,
393 		    range_end_overflows_t(u64, intel_parent_stolen_area_address(display),
394 					  intel_parent_stolen_node_offset(display, fbc->compressed_fb),
395 					  U32_MAX));
396 	drm_WARN_ON(display->drm,
397 		    range_end_overflows_t(u64, intel_parent_stolen_area_address(display),
398 					  intel_parent_stolen_node_offset(display, fbc->compressed_llb),
399 					  U32_MAX));
400 	intel_de_write(display, FBC_CFB_BASE,
401 		       intel_parent_stolen_node_address(display, fbc->compressed_fb));
402 	intel_de_write(display, FBC_LL_BASE,
403 		       intel_parent_stolen_node_address(display, fbc->compressed_llb));
404 }
405 
406 static const struct intel_fbc_funcs i8xx_fbc_funcs = {
407 	.activate = i8xx_fbc_activate,
408 	.deactivate = i8xx_fbc_deactivate,
409 	.is_active = i8xx_fbc_is_active,
410 	.is_compressing = i8xx_fbc_is_compressing,
411 	.nuke = i8xx_fbc_nuke,
412 	.program_cfb = i8xx_fbc_program_cfb,
413 };
414 
415 static void i965_fbc_nuke(struct intel_fbc *fbc)
416 {
417 	struct intel_display *display = fbc->display;
418 	struct intel_fbc_state *fbc_state = &fbc->state;
419 	enum i9xx_plane_id i9xx_plane = fbc_state->plane->i9xx_plane;
420 
421 	intel_de_write_fw(display, DSPSURF(display, i9xx_plane),
422 			  intel_de_read_fw(display, DSPSURF(display, i9xx_plane)));
423 }
424 
425 static const struct intel_fbc_funcs i965_fbc_funcs = {
426 	.activate = i8xx_fbc_activate,
427 	.deactivate = i8xx_fbc_deactivate,
428 	.is_active = i8xx_fbc_is_active,
429 	.is_compressing = i8xx_fbc_is_compressing,
430 	.nuke = i965_fbc_nuke,
431 	.program_cfb = i8xx_fbc_program_cfb,
432 };
433 
434 static u32 g4x_dpfc_ctl_limit(struct intel_fbc *fbc)
435 {
436 	switch (fbc->limit) {
437 	default:
438 		MISSING_CASE(fbc->limit);
439 		fallthrough;
440 	case 1:
441 		return DPFC_CTL_LIMIT_1X;
442 	case 2:
443 		return DPFC_CTL_LIMIT_2X;
444 	case 4:
445 		return DPFC_CTL_LIMIT_4X;
446 	}
447 }
448 
449 static u32 g4x_dpfc_ctl(struct intel_fbc *fbc)
450 {
451 	struct intel_display *display = fbc->display;
452 	const struct intel_fbc_state *fbc_state = &fbc->state;
453 	u32 dpfc_ctl;
454 
455 	dpfc_ctl = g4x_dpfc_ctl_limit(fbc) |
456 		DPFC_CTL_PLANE_G4X(fbc_state->plane->i9xx_plane);
457 
458 	if (display->platform.g4x)
459 		dpfc_ctl |= DPFC_CTL_SR_EN;
460 
461 	if (fbc_state->fence_id >= 0) {
462 		dpfc_ctl |= DPFC_CTL_FENCE_EN_G4X;
463 
464 		if (DISPLAY_VER(display) < 6)
465 			dpfc_ctl |= DPFC_CTL_FENCENO(fbc_state->fence_id);
466 	}
467 
468 	return dpfc_ctl;
469 }
470 
471 static void g4x_fbc_activate(struct intel_fbc *fbc)
472 {
473 	struct intel_display *display = fbc->display;
474 	const struct intel_fbc_state *fbc_state = &fbc->state;
475 
476 	intel_de_write(display, DPFC_FENCE_YOFF,
477 		       fbc_state->fence_y_offset);
478 
479 	intel_de_write(display, DPFC_CONTROL,
480 		       DPFC_CTL_EN | g4x_dpfc_ctl(fbc));
481 }
482 
483 static void g4x_fbc_deactivate(struct intel_fbc *fbc)
484 {
485 	struct intel_display *display = fbc->display;
486 	u32 dpfc_ctl;
487 
488 	/* Disable compression */
489 	dpfc_ctl = intel_de_read(display, DPFC_CONTROL);
490 	if (dpfc_ctl & DPFC_CTL_EN) {
491 		dpfc_ctl &= ~DPFC_CTL_EN;
492 		intel_de_write(display, DPFC_CONTROL, dpfc_ctl);
493 	}
494 }
495 
496 static bool g4x_fbc_is_active(struct intel_fbc *fbc)
497 {
498 	return intel_de_read(fbc->display, DPFC_CONTROL) & DPFC_CTL_EN;
499 }
500 
501 static bool g4x_fbc_is_compressing(struct intel_fbc *fbc)
502 {
503 	return intel_de_read(fbc->display, DPFC_STATUS) & DPFC_COMP_SEG_MASK;
504 }
505 
506 static void g4x_fbc_program_cfb(struct intel_fbc *fbc)
507 {
508 	struct intel_display *display = fbc->display;
509 
510 	intel_de_write(display, DPFC_CB_BASE,
511 		       intel_parent_stolen_node_offset(display, fbc->compressed_fb));
512 }
513 
514 static const struct intel_fbc_funcs g4x_fbc_funcs = {
515 	.activate = g4x_fbc_activate,
516 	.deactivate = g4x_fbc_deactivate,
517 	.is_active = g4x_fbc_is_active,
518 	.is_compressing = g4x_fbc_is_compressing,
519 	.nuke = i965_fbc_nuke,
520 	.program_cfb = g4x_fbc_program_cfb,
521 };
522 
523 static void ilk_fbc_activate(struct intel_fbc *fbc)
524 {
525 	struct intel_display *display = fbc->display;
526 	struct intel_fbc_state *fbc_state = &fbc->state;
527 
528 	intel_de_write(display, ILK_DPFC_FENCE_YOFF(fbc->id),
529 		       fbc_state->fence_y_offset);
530 
531 	intel_de_write(display, ILK_DPFC_CONTROL(fbc->id),
532 		       DPFC_CTL_EN | g4x_dpfc_ctl(fbc));
533 }
534 
535 static void fbc_compressor_clkgate_disable_wa(struct intel_fbc *fbc,
536 					      bool disable)
537 {
538 	struct intel_display *display = fbc->display;
539 
540 	if (display->platform.dg2)
541 		intel_de_rmw(display, GEN9_CLKGATE_DIS_4, DG2_DPFC_GATING_DIS,
542 			     disable ? DG2_DPFC_GATING_DIS : 0);
543 	else if (DISPLAY_VER(display) >= 14)
544 		intel_de_rmw(display, MTL_PIPE_CLKGATE_DIS2(fbc->id),
545 			     MTL_DPFC_GATING_DIS,
546 			     disable ? MTL_DPFC_GATING_DIS : 0);
547 }
548 
549 static void ilk_fbc_deactivate(struct intel_fbc *fbc)
550 {
551 	struct intel_display *display = fbc->display;
552 	u32 dpfc_ctl;
553 
554 	if (HAS_FBC_DIRTY_RECT(display))
555 		intel_de_write(display, XE3_FBC_DIRTY_CTL(fbc->id), 0);
556 
557 	/* Disable compression */
558 	dpfc_ctl = intel_de_read(display, ILK_DPFC_CONTROL(fbc->id));
559 	if (dpfc_ctl & DPFC_CTL_EN) {
560 		dpfc_ctl &= ~DPFC_CTL_EN;
561 		intel_de_write(display, ILK_DPFC_CONTROL(fbc->id), dpfc_ctl);
562 	}
563 }
564 
565 static bool ilk_fbc_is_active(struct intel_fbc *fbc)
566 {
567 	return intel_de_read(fbc->display, ILK_DPFC_CONTROL(fbc->id)) & DPFC_CTL_EN;
568 }
569 
570 static bool ilk_fbc_is_compressing(struct intel_fbc *fbc)
571 {
572 	return intel_de_read(fbc->display, ILK_DPFC_STATUS(fbc->id)) & DPFC_COMP_SEG_MASK;
573 }
574 
575 static void ilk_fbc_program_cfb(struct intel_fbc *fbc)
576 {
577 	struct intel_display *display = fbc->display;
578 
579 	intel_de_write(display, ILK_DPFC_CB_BASE(fbc->id),
580 		       intel_parent_stolen_node_offset(display, fbc->compressed_fb));
581 }
582 
583 static const struct intel_fbc_funcs ilk_fbc_funcs = {
584 	.activate = ilk_fbc_activate,
585 	.deactivate = ilk_fbc_deactivate,
586 	.is_active = ilk_fbc_is_active,
587 	.is_compressing = ilk_fbc_is_compressing,
588 	.nuke = i965_fbc_nuke,
589 	.program_cfb = ilk_fbc_program_cfb,
590 };
591 
592 static void snb_fbc_program_fence(struct intel_fbc *fbc)
593 {
594 	struct intel_display *display = fbc->display;
595 	const struct intel_fbc_state *fbc_state = &fbc->state;
596 	u32 ctl = 0;
597 
598 	if (fbc_state->fence_id >= 0)
599 		ctl = SNB_DPFC_FENCE_EN | SNB_DPFC_FENCENO(fbc_state->fence_id);
600 
601 	intel_de_write(display, SNB_DPFC_CTL_SA, ctl);
602 	intel_de_write(display, SNB_DPFC_CPU_FENCE_OFFSET, fbc_state->fence_y_offset);
603 }
604 
605 static void snb_fbc_activate(struct intel_fbc *fbc)
606 {
607 	snb_fbc_program_fence(fbc);
608 
609 	ilk_fbc_activate(fbc);
610 }
611 
612 static void snb_fbc_nuke(struct intel_fbc *fbc)
613 {
614 	struct intel_display *display = fbc->display;
615 
616 	intel_de_write(display, MSG_FBC_REND_STATE(fbc->id), FBC_REND_NUKE);
617 	intel_de_posting_read(display, MSG_FBC_REND_STATE(fbc->id));
618 }
619 
620 static const struct intel_fbc_funcs snb_fbc_funcs = {
621 	.activate = snb_fbc_activate,
622 	.deactivate = ilk_fbc_deactivate,
623 	.is_active = ilk_fbc_is_active,
624 	.is_compressing = ilk_fbc_is_compressing,
625 	.nuke = snb_fbc_nuke,
626 	.program_cfb = ilk_fbc_program_cfb,
627 };
628 
629 static void glk_fbc_program_cfb_stride(struct intel_fbc *fbc)
630 {
631 	struct intel_display *display = fbc->display;
632 	const struct intel_fbc_state *fbc_state = &fbc->state;
633 	u32 val = 0;
634 
635 	if (fbc_state->override_cfb_stride)
636 		val |= FBC_STRIDE_OVERRIDE |
637 			FBC_STRIDE(fbc_state->override_cfb_stride / fbc->limit);
638 
639 	intel_de_write(display, GLK_FBC_STRIDE(fbc->id), val);
640 }
641 
642 static void skl_fbc_program_cfb_stride(struct intel_fbc *fbc)
643 {
644 	struct intel_display *display = fbc->display;
645 	const struct intel_fbc_state *fbc_state = &fbc->state;
646 	u32 val = 0;
647 
648 	/* Display WA #0529: skl, kbl, bxt. */
649 	if (fbc_state->override_cfb_stride)
650 		val |= CHICKEN_FBC_STRIDE_OVERRIDE |
651 			CHICKEN_FBC_STRIDE(fbc_state->override_cfb_stride / fbc->limit);
652 
653 	intel_de_rmw(display, CHICKEN_MISC_4,
654 		     CHICKEN_FBC_STRIDE_OVERRIDE |
655 		     CHICKEN_FBC_STRIDE_MASK, val);
656 }
657 
658 static u32 ivb_dpfc_ctl(struct intel_fbc *fbc)
659 {
660 	struct intel_display *display = fbc->display;
661 	const struct intel_fbc_state *fbc_state = &fbc->state;
662 	u32 dpfc_ctl;
663 
664 	dpfc_ctl = g4x_dpfc_ctl_limit(fbc);
665 
666 	if (display->platform.ivybridge)
667 		dpfc_ctl |= DPFC_CTL_PLANE_IVB(fbc_state->plane->i9xx_plane);
668 
669 	if (DISPLAY_VER(display) >= 20)
670 		dpfc_ctl |= DPFC_CTL_PLANE_BINDING(fbc_state->plane->id);
671 
672 	if (fbc_state->fence_id >= 0)
673 		dpfc_ctl |= DPFC_CTL_FENCE_EN_IVB;
674 
675 	if (fbc->false_color)
676 		dpfc_ctl |= DPFC_CTL_FALSE_COLOR;
677 
678 	return dpfc_ctl;
679 }
680 
681 static void ivb_fbc_activate(struct intel_fbc *fbc)
682 {
683 	struct intel_display *display = fbc->display;
684 	u32 dpfc_ctl;
685 
686 	if (DISPLAY_VER(display) >= 10)
687 		glk_fbc_program_cfb_stride(fbc);
688 	else if (DISPLAY_VER(display) == 9)
689 		skl_fbc_program_cfb_stride(fbc);
690 
691 	if (intel_fbc_has_fences(display))
692 		snb_fbc_program_fence(fbc);
693 
694 	/* wa_14019417088 Alternative WA*/
695 	dpfc_ctl = ivb_dpfc_ctl(fbc);
696 	if (DISPLAY_VER(display) >= 20)
697 		intel_de_write(display, ILK_DPFC_CONTROL(fbc->id), dpfc_ctl);
698 
699 	if (HAS_FBC_DIRTY_RECT(display))
700 		intel_de_write(display, XE3_FBC_DIRTY_CTL(fbc->id),
701 			       FBC_DIRTY_RECT_EN);
702 
703 	intel_de_write(display, ILK_DPFC_CONTROL(fbc->id),
704 		       DPFC_CTL_EN | dpfc_ctl);
705 }
706 
707 static bool ivb_fbc_is_compressing(struct intel_fbc *fbc)
708 {
709 	return intel_de_read(fbc->display, ILK_DPFC_STATUS2(fbc->id)) & DPFC_COMP_SEG_MASK_IVB;
710 }
711 
712 static void ivb_fbc_set_false_color(struct intel_fbc *fbc,
713 				    bool enable)
714 {
715 	intel_de_rmw(fbc->display, ILK_DPFC_CONTROL(fbc->id),
716 		     DPFC_CTL_FALSE_COLOR, enable ? DPFC_CTL_FALSE_COLOR : 0);
717 }
718 
719 static const struct intel_fbc_funcs ivb_fbc_funcs = {
720 	.activate = ivb_fbc_activate,
721 	.deactivate = ilk_fbc_deactivate,
722 	.is_active = ilk_fbc_is_active,
723 	.is_compressing = ivb_fbc_is_compressing,
724 	.nuke = snb_fbc_nuke,
725 	.program_cfb = ilk_fbc_program_cfb,
726 	.set_false_color = ivb_fbc_set_false_color,
727 };
728 
729 static bool intel_fbc_hw_is_active(struct intel_fbc *fbc)
730 {
731 	return fbc->funcs->is_active(fbc);
732 }
733 
734 static void intel_fbc_hw_activate(struct intel_fbc *fbc)
735 {
736 	trace_intel_fbc_activate(fbc->state.plane);
737 
738 	fbc->active = true;
739 	fbc->activated = true;
740 
741 	fbc->funcs->activate(fbc);
742 }
743 
744 static void intel_fbc_hw_deactivate(struct intel_fbc *fbc)
745 {
746 	trace_intel_fbc_deactivate(fbc->state.plane);
747 
748 	fbc->active = false;
749 
750 	fbc->funcs->deactivate(fbc);
751 }
752 
753 static bool intel_fbc_is_compressing(struct intel_fbc *fbc)
754 {
755 	return fbc->funcs->is_compressing(fbc);
756 }
757 
758 static void intel_fbc_nuke(struct intel_fbc *fbc)
759 {
760 	struct intel_display *display = fbc->display;
761 
762 	lockdep_assert_held(&fbc->lock);
763 	drm_WARN_ON(display->drm, fbc->flip_pending);
764 
765 	trace_intel_fbc_nuke(fbc->state.plane);
766 
767 	fbc->funcs->nuke(fbc);
768 }
769 
770 static void intel_fbc_activate(struct intel_fbc *fbc)
771 {
772 	struct intel_display *display = fbc->display;
773 
774 	lockdep_assert_held(&fbc->lock);
775 
776 	/* only the fence can change for a flip nuke */
777 	if (fbc->active && !intel_fbc_has_fences(display))
778 		return;
779 	/*
780 	 * In case of FBC dirt rect, any updates to the FBC registers will
781 	 * trigger the nuke.
782 	 */
783 	drm_WARN_ON(display->drm, fbc->active && HAS_FBC_DIRTY_RECT(display));
784 
785 	intel_fbc_hw_activate(fbc);
786 	intel_fbc_nuke(fbc);
787 
788 	fbc->no_fbc_reason = NULL;
789 }
790 
791 static void intel_fbc_deactivate(struct intel_fbc *fbc, const char *reason)
792 {
793 	lockdep_assert_held(&fbc->lock);
794 
795 	if (fbc->active)
796 		intel_fbc_hw_deactivate(fbc);
797 
798 	fbc->no_fbc_reason = reason;
799 }
800 
801 static u64 intel_fbc_cfb_base_max(struct intel_display *display)
802 {
803 	if (DISPLAY_VER(display) >= 5 || display->platform.g4x)
804 		return BIT_ULL(28);
805 	else
806 		return BIT_ULL(32);
807 }
808 
809 static u64 intel_fbc_stolen_end(struct intel_display *display)
810 {
811 	u64 end = intel_fbc_cfb_base_max(display);
812 
813 	/*
814 	 * The FBC hardware for BDW/SKL doesn't have access to the stolen
815 	 * reserved range size, so it always assumes the maximum (8mb) is used.
816 	 * If we enable FBC using a CFB on that memory range we'll get FIFO
817 	 * underruns, even if that range is not reserved by the BIOS.
818 	 */
819 	if (display->platform.broadwell ||
820 	    (DISPLAY_VER(display) == 9 && !display->platform.broxton)) {
821 		u64 stolen_area_size = intel_parent_stolen_area_size(display);
822 
823 		/*
824 		 * If stolen_area_size is less than SZ_8M, use
825 		 * intel_fbc_cfb_base_max instead.  This should not happen,
826 		 * so warn if it does.
827 		 */
828 		if (drm_WARN_ON(display->drm,
829 				check_sub_overflow(stolen_area_size,
830 						   SZ_8M, &stolen_area_size)))
831 			return end;
832 
833 		return min(end, stolen_area_size);
834 	}
835 
836 	return end;
837 }
838 
839 static int intel_fbc_min_limit(const struct intel_plane_state *plane_state)
840 {
841 	return plane_state->hw.fb->format->cpp[0] == 2 ? 2 : 1;
842 }
843 
844 static int intel_fbc_max_limit(struct intel_display *display)
845 {
846 	/* WaFbcOnly1to1Ratio:ctg */
847 	if (display->platform.g4x)
848 		return 1;
849 
850 	/*
851 	 * FBC2 can only do 1:1, 1:2, 1:4, we limit
852 	 * FBC1 to the same out of convenience.
853 	 */
854 	return 4;
855 }
856 
857 static int find_compression_limit(struct intel_fbc *fbc,
858 				  unsigned int size, int min_limit)
859 {
860 	struct intel_display *display = fbc->display;
861 	u64 end = intel_fbc_stolen_end(display);
862 	int ret, limit = min_limit;
863 
864 	size /= limit;
865 
866 	/* Try to over-allocate to reduce reallocations and fragmentation. */
867 	ret = intel_parent_stolen_insert_node_in_range(display, fbc->compressed_fb,
868 						       size <<= 1, 4096, 0, end);
869 	if (ret == 0)
870 		return limit;
871 
872 	for (; limit <= intel_fbc_max_limit(display); limit <<= 1) {
873 		ret = intel_parent_stolen_insert_node_in_range(display, fbc->compressed_fb,
874 							       size >>= 1, 4096, 0, end);
875 		if (ret == 0)
876 			return limit;
877 	}
878 
879 	return 0;
880 }
881 
882 static int intel_fbc_alloc_cfb(struct intel_fbc *fbc,
883 			       unsigned int size, int min_limit)
884 {
885 	struct intel_display *display = fbc->display;
886 	int ret;
887 
888 	drm_WARN_ON(display->drm,
889 		    intel_parent_stolen_node_allocated(display, fbc->compressed_fb));
890 	drm_WARN_ON(display->drm,
891 		    intel_parent_stolen_node_allocated(display, fbc->compressed_llb));
892 
893 	if (DISPLAY_VER(display) < 5 && !display->platform.g4x) {
894 		ret = intel_parent_stolen_insert_node(display, fbc->compressed_llb, 4096, 4096);
895 		if (ret)
896 			goto err;
897 	}
898 
899 	ret = find_compression_limit(fbc, size, min_limit);
900 	if (!ret)
901 		goto err_llb;
902 	else if (ret > min_limit)
903 		drm_info_once(display->drm,
904 			      "Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");
905 
906 	fbc->limit = ret;
907 
908 	drm_dbg_kms(display->drm,
909 		    "reserved %llu bytes of contiguous stolen space for FBC, limit: %d\n",
910 		    intel_parent_stolen_node_size(display, fbc->compressed_fb), fbc->limit);
911 	return 0;
912 
913 err_llb:
914 	if (intel_parent_stolen_node_allocated(display, fbc->compressed_llb))
915 		intel_parent_stolen_remove_node(display, fbc->compressed_llb);
916 err:
917 	if (intel_parent_stolen_initialized(display))
918 		drm_info_once(display->drm,
919 			      "not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
920 	return -ENOSPC;
921 }
922 
923 static void intel_fbc_program_cfb(struct intel_fbc *fbc)
924 {
925 	fbc->funcs->program_cfb(fbc);
926 }
927 
928 static void intel_fbc_program_workarounds(struct intel_fbc *fbc)
929 {
930 	struct intel_display *display = fbc->display;
931 
932 	if (display->platform.skylake || display->platform.broxton) {
933 		/*
934 		 * WaFbcHighMemBwCorruptionAvoidance:skl,bxt
935 		 * Display WA #0883: skl,bxt
936 		 */
937 		intel_de_rmw(display, ILK_DPFC_CHICKEN(fbc->id),
938 			     0, DPFC_DISABLE_DUMMY0);
939 	}
940 
941 	if (display->platform.skylake || display->platform.kabylake ||
942 	    display->platform.coffeelake || display->platform.cometlake) {
943 		/*
944 		 * WaFbcNukeOnHostModify:skl,kbl,cfl
945 		 * Display WA #0873: skl,kbl,cfl
946 		 */
947 		intel_de_rmw(display, ILK_DPFC_CHICKEN(fbc->id),
948 			     0, DPFC_NUKE_ON_ANY_MODIFICATION);
949 	}
950 
951 	/* Wa_1409120013:icl,jsl,tgl,dg1 */
952 	if (intel_display_wa(display, INTEL_DISPLAY_WA_1409120013))
953 		intel_de_rmw(display, ILK_DPFC_CHICKEN(fbc->id),
954 			     0, DPFC_CHICKEN_COMP_DUMMY_PIXEL);
955 	/*
956 	 * Wa_22014263786
957 	 * Fixes: Screen flicker with FBC and Package C state enabled
958 	 * Workaround: Forced SLB invalidation before start of new frame.
959 	 */
960 	if (intel_display_wa(display, INTEL_DISPLAY_WA_22014263786))
961 		intel_de_rmw(display, ILK_DPFC_CHICKEN(fbc->id),
962 			     0, DPFC_CHICKEN_FORCE_SLB_INVALIDATION);
963 
964 	/* wa_18038517565 Disable DPFC clock gating before FBC enable */
965 	if (display->platform.dg2 || DISPLAY_VER(display) >= 14)
966 		fbc_compressor_clkgate_disable_wa(fbc, true);
967 }
968 
969 static void fbc_sys_cache_update_config(struct intel_display *display, u32 reg,
970 					enum intel_fbc_id id)
971 {
972 	if (!HAS_FBC_SYS_CACHE(display))
973 		return;
974 
975 	lockdep_assert_held(&display->fbc.sys_cache.lock);
976 
977 	/*
978 	 * Wa_14025769978:
979 	 * Fixes: SoC hardware issue in read caching
980 	 * Workaround: disable cache read setting which is enabled by default.
981 	 */
982 	if (!intel_display_wa(display, INTEL_DISPLAY_WA_14025769978))
983 		/* Cache read enable is set by default */
984 		reg |= FBC_SYS_CACHE_READ_ENABLE;
985 
986 	intel_de_write(display, XE3P_LPD_FBC_SYS_CACHE_USAGE_CFG, reg);
987 
988 	display->fbc.sys_cache.id = id;
989 }
990 
991 static void fbc_sys_cache_disable(const struct intel_fbc *fbc)
992 {
993 	struct intel_display *display = fbc->display;
994 	struct sys_cache_cfg *sys_cache = &display->fbc.sys_cache;
995 
996 	mutex_lock(&sys_cache->lock);
997 	/* clear only if "fbc" reserved the cache */
998 	if (sys_cache->id == fbc->id)
999 		fbc_sys_cache_update_config(display, 0, FBC_SYS_CACHE_ID_NONE);
1000 	mutex_unlock(&sys_cache->lock);
1001 }
1002 
1003 static int fbc_sys_cache_limit(struct intel_display *display)
1004 {
1005 	if (DISPLAY_VER(display) == 35)
1006 		return 2 * 1024 * 1024;
1007 
1008 	return 0;
1009 }
1010 
1011 static void fbc_sys_cache_enable(const struct intel_fbc *fbc)
1012 {
1013 	struct intel_display *display = fbc->display;
1014 	struct sys_cache_cfg *sys_cache = &display->fbc.sys_cache;
1015 	int range, offset;
1016 	u32 cfg;
1017 
1018 	if (!HAS_FBC_SYS_CACHE(display))
1019 		return;
1020 
1021 	range = fbc_sys_cache_limit(display) / (64 * 1024);
1022 
1023 	offset = intel_parent_stolen_node_offset(display, fbc->compressed_fb) / (4 * 1024);
1024 
1025 	cfg = FBC_SYS_CACHE_TAG_USE_RES_SPACE | FBC_SYS_CACHEABLE_RANGE(range) |
1026 	      FBC_SYS_CACHE_START_BASE(offset);
1027 
1028 	mutex_lock(&sys_cache->lock);
1029 	/* update sys cache config only if sys cache is unassigned */
1030 	if (sys_cache->id == FBC_SYS_CACHE_ID_NONE)
1031 		fbc_sys_cache_update_config(display, cfg, fbc->id);
1032 	mutex_unlock(&sys_cache->lock);
1033 }
1034 
1035 static void __intel_fbc_cleanup_cfb(struct intel_fbc *fbc)
1036 {
1037 	struct intel_display *display = fbc->display;
1038 
1039 	if (WARN_ON(intel_fbc_hw_is_active(fbc)))
1040 		return;
1041 
1042 	if (intel_parent_stolen_node_allocated(display, fbc->compressed_llb))
1043 		intel_parent_stolen_remove_node(display, fbc->compressed_llb);
1044 	if (intel_parent_stolen_node_allocated(display, fbc->compressed_fb))
1045 		intel_parent_stolen_remove_node(display, fbc->compressed_fb);
1046 }
1047 
1048 void intel_fbc_cleanup(struct intel_display *display)
1049 {
1050 	struct intel_fbc *fbc;
1051 	enum intel_fbc_id fbc_id;
1052 
1053 	for_each_intel_fbc(display, fbc, fbc_id) {
1054 		mutex_lock(&fbc->lock);
1055 		__intel_fbc_cleanup_cfb(fbc);
1056 		mutex_unlock(&fbc->lock);
1057 
1058 		intel_parent_stolen_node_free(display, fbc->compressed_fb);
1059 		intel_parent_stolen_node_free(display, fbc->compressed_llb);
1060 
1061 		kfree(fbc);
1062 	}
1063 
1064 	mutex_lock(&display->fbc.sys_cache.lock);
1065 	drm_WARN_ON(display->drm,
1066 		    display->fbc.sys_cache.id != FBC_SYS_CACHE_ID_NONE);
1067 	mutex_unlock(&display->fbc.sys_cache.lock);
1068 }
1069 
1070 static bool i8xx_fbc_stride_is_valid(const struct intel_plane_state *plane_state)
1071 {
1072 	const struct drm_framebuffer *fb = plane_state->hw.fb;
1073 	unsigned int stride = intel_fbc_plane_stride(plane_state) *
1074 		fb->format->cpp[0];
1075 
1076 	return stride == 4096 || stride == 8192;
1077 }
1078 
1079 static bool i965_fbc_stride_is_valid(const struct intel_plane_state *plane_state)
1080 {
1081 	const struct drm_framebuffer *fb = plane_state->hw.fb;
1082 	unsigned int stride = intel_fbc_plane_stride(plane_state) *
1083 		fb->format->cpp[0];
1084 
1085 	return stride >= 2048 && stride <= 16384;
1086 }
1087 
1088 static bool g4x_fbc_stride_is_valid(const struct intel_plane_state *plane_state)
1089 {
1090 	return true;
1091 }
1092 
1093 static bool skl_fbc_stride_is_valid(const struct intel_plane_state *plane_state)
1094 {
1095 	const struct drm_framebuffer *fb = plane_state->hw.fb;
1096 	unsigned int stride = intel_fbc_plane_stride(plane_state) *
1097 		fb->format->cpp[0];
1098 
1099 	/* Display WA #1105: skl,bxt,kbl,cfl,glk */
1100 	if (fb->modifier == DRM_FORMAT_MOD_LINEAR && stride & 511)
1101 		return false;
1102 
1103 	return true;
1104 }
1105 
1106 static bool icl_fbc_stride_is_valid(const struct intel_plane_state *plane_state)
1107 {
1108 	return true;
1109 }
1110 
1111 static bool stride_is_valid(const struct intel_plane_state *plane_state)
1112 {
1113 	struct intel_display *display = to_intel_display(plane_state);
1114 
1115 	if (DISPLAY_VER(display) >= 11)
1116 		return icl_fbc_stride_is_valid(plane_state);
1117 	else if (DISPLAY_VER(display) >= 9)
1118 		return skl_fbc_stride_is_valid(plane_state);
1119 	else if (DISPLAY_VER(display) >= 5 || display->platform.g4x)
1120 		return g4x_fbc_stride_is_valid(plane_state);
1121 	else if (DISPLAY_VER(display) == 4)
1122 		return i965_fbc_stride_is_valid(plane_state);
1123 	else
1124 		return i8xx_fbc_stride_is_valid(plane_state);
1125 }
1126 
1127 static bool i8xx_fbc_pixel_format_is_valid(const struct intel_plane_state *plane_state)
1128 {
1129 	struct intel_display *display = to_intel_display(plane_state);
1130 	const struct drm_framebuffer *fb = plane_state->hw.fb;
1131 
1132 	switch (fb->format->format) {
1133 	case DRM_FORMAT_XRGB8888:
1134 	case DRM_FORMAT_XBGR8888:
1135 		return true;
1136 	case DRM_FORMAT_XRGB1555:
1137 	case DRM_FORMAT_RGB565:
1138 		/* 16bpp not supported on gen2 */
1139 		if (DISPLAY_VER(display) == 2)
1140 			return false;
1141 		return true;
1142 	default:
1143 		return false;
1144 	}
1145 }
1146 
1147 static bool g4x_fbc_pixel_format_is_valid(const struct intel_plane_state *plane_state)
1148 {
1149 	struct intel_display *display = to_intel_display(plane_state);
1150 	const struct drm_framebuffer *fb = plane_state->hw.fb;
1151 
1152 	switch (fb->format->format) {
1153 	case DRM_FORMAT_XRGB8888:
1154 	case DRM_FORMAT_XBGR8888:
1155 		return true;
1156 	case DRM_FORMAT_RGB565:
1157 		/* WaFbcOnly1to1Ratio:ctg */
1158 		if (display->platform.g4x)
1159 			return false;
1160 		return true;
1161 	default:
1162 		return false;
1163 	}
1164 }
1165 
1166 static bool lnl_fbc_pixel_format_is_valid(const struct intel_plane_state *plane_state)
1167 {
1168 	const struct drm_framebuffer *fb = plane_state->hw.fb;
1169 
1170 	switch (fb->format->format) {
1171 	case DRM_FORMAT_XRGB8888:
1172 	case DRM_FORMAT_XBGR8888:
1173 	case DRM_FORMAT_ARGB8888:
1174 	case DRM_FORMAT_ABGR8888:
1175 	case DRM_FORMAT_RGB565:
1176 		return true;
1177 	default:
1178 		return false;
1179 	}
1180 }
1181 
1182 static bool
1183 xe3p_lpd_fbc_fp16_format_is_valid(const struct intel_plane_state *plane_state)
1184 {
1185 	const struct drm_framebuffer *fb = plane_state->hw.fb;
1186 
1187 	switch (fb->format->format) {
1188 	case DRM_FORMAT_ARGB16161616F:
1189 	case DRM_FORMAT_ABGR16161616F:
1190 		return true;
1191 	default:
1192 		return false;
1193 	}
1194 }
1195 
1196 static bool xe3p_lpd_fbc_pixel_format_is_valid(const struct intel_plane_state *plane_state)
1197 {
1198 	const struct drm_framebuffer *fb = plane_state->hw.fb;
1199 
1200 	if (lnl_fbc_pixel_format_is_valid(plane_state))
1201 		return true;
1202 
1203 	if (xe3p_lpd_fbc_fp16_format_is_valid(plane_state))
1204 		return true;
1205 
1206 	switch (fb->format->format) {
1207 	case DRM_FORMAT_XRGB16161616:
1208 	case DRM_FORMAT_XBGR16161616:
1209 	case DRM_FORMAT_ARGB16161616:
1210 	case DRM_FORMAT_ABGR16161616:
1211 		return true;
1212 	default:
1213 		return false;
1214 	}
1215 }
1216 
1217 bool intel_fbc_need_pixel_normalizer(const struct intel_plane_state *plane_state)
1218 {
1219 	struct intel_display *display = to_intel_display(plane_state);
1220 
1221 	if (HAS_PIXEL_NORMALIZER(display) &&
1222 	    xe3p_lpd_fbc_fp16_format_is_valid(plane_state))
1223 		return true;
1224 
1225 	return false;
1226 }
1227 
1228 static bool pixel_format_is_valid(const struct intel_plane_state *plane_state)
1229 {
1230 	struct intel_display *display = to_intel_display(plane_state);
1231 
1232 	if (DISPLAY_VER(display) >= 35)
1233 		return xe3p_lpd_fbc_pixel_format_is_valid(plane_state);
1234 	else if (DISPLAY_VER(display) >= 20)
1235 		return lnl_fbc_pixel_format_is_valid(plane_state);
1236 	else if (DISPLAY_VER(display) >= 5 || display->platform.g4x)
1237 		return g4x_fbc_pixel_format_is_valid(plane_state);
1238 	else
1239 		return i8xx_fbc_pixel_format_is_valid(plane_state);
1240 }
1241 
1242 static bool i8xx_fbc_rotation_is_valid(const struct intel_plane_state *plane_state)
1243 {
1244 	return plane_state->hw.rotation == DRM_MODE_ROTATE_0;
1245 }
1246 
1247 static bool g4x_fbc_rotation_is_valid(const struct intel_plane_state *plane_state)
1248 {
1249 	return true;
1250 }
1251 
1252 static bool skl_fbc_rotation_is_valid(const struct intel_plane_state *plane_state)
1253 {
1254 	const struct drm_framebuffer *fb = plane_state->hw.fb;
1255 	unsigned int rotation = plane_state->hw.rotation;
1256 
1257 	if (fb->format->format == DRM_FORMAT_RGB565 &&
1258 	    drm_rotation_90_or_270(rotation))
1259 		return false;
1260 
1261 	return true;
1262 }
1263 
1264 static bool rotation_is_valid(const struct intel_plane_state *plane_state)
1265 {
1266 	struct intel_display *display = to_intel_display(plane_state);
1267 
1268 	if (DISPLAY_VER(display) >= 9)
1269 		return skl_fbc_rotation_is_valid(plane_state);
1270 	else if (DISPLAY_VER(display) >= 5 || display->platform.g4x)
1271 		return g4x_fbc_rotation_is_valid(plane_state);
1272 	else
1273 		return i8xx_fbc_rotation_is_valid(plane_state);
1274 }
1275 
1276 static void intel_fbc_max_surface_size(struct intel_display *display,
1277 				       unsigned int *w, unsigned int *h)
1278 {
1279 	if (DISPLAY_VER(display) >= 11) {
1280 		*w = 8192;
1281 		*h = 4096;
1282 	} else if (DISPLAY_VER(display) >= 10) {
1283 		*w = 5120;
1284 		*h = 4096;
1285 	} else if (DISPLAY_VER(display) >= 7) {
1286 		*w = 4096;
1287 		*h = 4096;
1288 	} else if (DISPLAY_VER(display) >= 5 || display->platform.g4x) {
1289 		*w = 4096;
1290 		*h = 2048;
1291 	} else {
1292 		*w = 2048;
1293 		*h = 1536;
1294 	}
1295 }
1296 
1297 /*
1298  * For some reason, the hardware tracking starts looking at whatever we
1299  * programmed as the display plane base address register. It does not look at
1300  * the X and Y offset registers. That's why we include the src x/y offsets
1301  * instead of just looking at the plane size.
1302  */
1303 static bool intel_fbc_surface_size_ok(const struct intel_plane_state *plane_state)
1304 {
1305 	struct intel_display *display = to_intel_display(plane_state);
1306 	unsigned int effective_w, effective_h, max_w, max_h;
1307 
1308 	intel_fbc_max_surface_size(display, &max_w, &max_h);
1309 
1310 	effective_w = plane_state->view.color_plane[0].x +
1311 		(drm_rect_width(&plane_state->uapi.src) >> 16);
1312 	effective_h = plane_state->view.color_plane[0].y +
1313 		(drm_rect_height(&plane_state->uapi.src) >> 16);
1314 
1315 	return effective_w <= max_w && effective_h <= max_h;
1316 }
1317 
1318 static void intel_fbc_max_plane_size(struct intel_display *display,
1319 				     unsigned int *w, unsigned int *h)
1320 {
1321 	if (DISPLAY_VER(display) >= 10) {
1322 		*w = 5120;
1323 		*h = 4096;
1324 	} else if (DISPLAY_VER(display) >= 8 || display->platform.haswell) {
1325 		*w = 4096;
1326 		*h = 4096;
1327 	} else if (DISPLAY_VER(display) >= 5 || display->platform.g4x) {
1328 		*w = 4096;
1329 		*h = 2048;
1330 	} else {
1331 		*w = 2048;
1332 		*h = 1536;
1333 	}
1334 }
1335 
1336 static bool intel_fbc_plane_size_valid(const struct intel_plane_state *plane_state)
1337 {
1338 	struct intel_display *display = to_intel_display(plane_state);
1339 	unsigned int w, h, max_w, max_h;
1340 
1341 	intel_fbc_max_plane_size(display, &max_w, &max_h);
1342 
1343 	w = drm_rect_width(&plane_state->uapi.src) >> 16;
1344 	h = drm_rect_height(&plane_state->uapi.src) >> 16;
1345 
1346 	return w <= max_w && h <= max_h;
1347 }
1348 
1349 static bool i8xx_fbc_tiling_valid(const struct intel_plane_state *plane_state)
1350 {
1351 	const struct drm_framebuffer *fb = plane_state->hw.fb;
1352 
1353 	return fb->modifier == I915_FORMAT_MOD_X_TILED;
1354 }
1355 
1356 static bool skl_fbc_tiling_valid(const struct intel_plane_state *plane_state)
1357 {
1358 	return true;
1359 }
1360 
1361 static bool tiling_is_valid(const struct intel_plane_state *plane_state)
1362 {
1363 	struct intel_display *display = to_intel_display(plane_state);
1364 
1365 	if (DISPLAY_VER(display) >= 9)
1366 		return skl_fbc_tiling_valid(plane_state);
1367 	else
1368 		return i8xx_fbc_tiling_valid(plane_state);
1369 }
1370 
1371 static void
1372 intel_fbc_invalidate_dirty_rect(struct intel_fbc *fbc)
1373 {
1374 	lockdep_assert_held(&fbc->lock);
1375 
1376 	fbc->state.dirty_rect = DRM_RECT_INIT(0, 0, 0, 0);
1377 }
1378 
1379 static void
1380 intel_fbc_program_dirty_rect(struct intel_dsb *dsb, struct intel_fbc *fbc,
1381 			     const struct drm_rect *fbc_dirty_rect)
1382 {
1383 	struct intel_display *display = fbc->display;
1384 
1385 	drm_WARN_ON(display->drm, fbc_dirty_rect->y2 == 0);
1386 
1387 	intel_de_write_dsb(display, dsb, XE3_FBC_DIRTY_RECT(fbc->id),
1388 			   FBC_DIRTY_RECT_START_LINE(fbc_dirty_rect->y1) |
1389 			   FBC_DIRTY_RECT_END_LINE(fbc_dirty_rect->y2 - 1));
1390 }
1391 
1392 static void
1393 intel_fbc_dirty_rect_update(struct intel_dsb *dsb, struct intel_fbc *fbc)
1394 {
1395 	const struct drm_rect *fbc_dirty_rect = &fbc->state.dirty_rect;
1396 
1397 	lockdep_assert_held(&fbc->lock);
1398 
1399 	if (!drm_rect_visible(fbc_dirty_rect))
1400 		return;
1401 
1402 	intel_fbc_program_dirty_rect(dsb, fbc, fbc_dirty_rect);
1403 }
1404 
1405 void
1406 intel_fbc_dirty_rect_update_noarm(struct intel_dsb *dsb,
1407 				  struct intel_plane *plane)
1408 {
1409 	struct intel_display *display = to_intel_display(plane);
1410 	struct intel_fbc *fbc = plane->fbc;
1411 
1412 	if (!HAS_FBC_DIRTY_RECT(display))
1413 		return;
1414 
1415 	mutex_lock(&fbc->lock);
1416 
1417 	if (fbc->state.plane == plane)
1418 		intel_fbc_dirty_rect_update(dsb, fbc);
1419 
1420 	mutex_unlock(&fbc->lock);
1421 }
1422 
1423 static void
1424 intel_fbc_hw_intialize_dirty_rect(struct intel_fbc *fbc,
1425 				  const struct intel_plane_state *plane_state)
1426 {
1427 	struct drm_rect src;
1428 
1429 	/*
1430 	 * Initializing the FBC HW with the whole plane area as the dirty rect.
1431 	 * This is to ensure that we have valid coords be written to the
1432 	 * HW as dirty rect.
1433 	 */
1434 	drm_rect_fp_to_int(&src, &plane_state->uapi.src);
1435 
1436 	intel_fbc_program_dirty_rect(NULL, fbc, &src);
1437 }
1438 
1439 static void intel_fbc_update_state(struct intel_atomic_state *state,
1440 				   struct intel_crtc *crtc,
1441 				   struct intel_plane *plane)
1442 {
1443 	struct intel_display *display = to_intel_display(state);
1444 	const struct intel_crtc_state *crtc_state =
1445 		intel_atomic_get_new_crtc_state(state, crtc);
1446 	const struct intel_plane_state *plane_state =
1447 		intel_atomic_get_new_plane_state(state, plane);
1448 	struct intel_fbc *fbc = plane->fbc;
1449 	struct intel_fbc_state *fbc_state = &fbc->state;
1450 
1451 	WARN_ON(plane_state->no_fbc_reason);
1452 	WARN_ON(fbc_state->plane && fbc_state->plane != plane);
1453 
1454 	fbc_state->plane = plane;
1455 
1456 	/* FBC1 compression interval: arbitrary choice of 1 second */
1457 	fbc_state->interval = drm_mode_vrefresh(&crtc_state->hw.adjusted_mode);
1458 
1459 	fbc_state->fence_y_offset = intel_plane_fence_y_offset(plane_state);
1460 
1461 	drm_WARN_ON(display->drm, plane_state->flags & PLANE_HAS_FENCE &&
1462 		    !intel_fbc_has_fences(display));
1463 
1464 	if (plane_state->flags & PLANE_HAS_FENCE)
1465 		fbc_state->fence_id = intel_parent_vma_fence_id(display, plane_state->ggtt_vma);
1466 	else
1467 		fbc_state->fence_id = -1;
1468 
1469 	fbc_state->cfb_stride = intel_fbc_cfb_stride(plane_state);
1470 	fbc_state->cfb_size = intel_fbc_cfb_size(plane_state);
1471 	fbc_state->override_cfb_stride = intel_fbc_override_cfb_stride(plane_state);
1472 }
1473 
1474 static bool intel_fbc_is_fence_ok(const struct intel_plane_state *plane_state)
1475 {
1476 	struct intel_display *display = to_intel_display(plane_state);
1477 
1478 	/*
1479 	 * The use of a CPU fence is one of two ways to detect writes by the
1480 	 * CPU to the scanout and trigger updates to the FBC.
1481 	 *
1482 	 * The other method is by software tracking (see
1483 	 * intel_fbc_invalidate/flush()), it will manually notify FBC and nuke
1484 	 * the current compressed buffer and recompress it.
1485 	 *
1486 	 * Note that is possible for a tiled surface to be unmappable (and
1487 	 * so have no fence associated with it) due to aperture constraints
1488 	 * at the time of pinning.
1489 	 */
1490 	return DISPLAY_VER(display) >= 9 ||
1491 		(plane_state->flags & PLANE_HAS_FENCE &&
1492 		 intel_parent_vma_fence_id(display, plane_state->ggtt_vma) != -1);
1493 }
1494 
1495 static bool intel_fbc_is_cfb_ok(const struct intel_plane_state *plane_state)
1496 {
1497 	struct intel_display *display = to_intel_display(plane_state);
1498 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
1499 	struct intel_fbc *fbc = plane->fbc;
1500 
1501 	return intel_fbc_min_limit(plane_state) <= fbc->limit &&
1502 		intel_fbc_cfb_size(plane_state) <= fbc->limit *
1503 			intel_parent_stolen_node_size(display, fbc->compressed_fb);
1504 }
1505 
1506 static bool intel_fbc_is_ok(const struct intel_plane_state *plane_state)
1507 {
1508 	return !plane_state->no_fbc_reason &&
1509 		intel_fbc_is_fence_ok(plane_state) &&
1510 		intel_fbc_is_cfb_ok(plane_state);
1511 }
1512 
1513 static void
1514 __intel_fbc_prepare_dirty_rect(const struct intel_plane_state *plane_state,
1515 			       const struct intel_crtc_state *crtc_state)
1516 {
1517 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
1518 	struct intel_fbc *fbc = plane->fbc;
1519 	struct drm_rect *fbc_dirty_rect = &fbc->state.dirty_rect;
1520 	int width = drm_rect_width(&plane_state->uapi.src) >> 16;
1521 	const struct drm_rect *damage = &plane_state->damage;
1522 	int y_offset = plane_state->view.color_plane[0].y;
1523 
1524 	lockdep_assert_held(&fbc->lock);
1525 
1526 	if (intel_crtc_needs_modeset(crtc_state) ||
1527 	    !intel_fbc_is_ok(plane_state)) {
1528 		intel_fbc_invalidate_dirty_rect(fbc);
1529 		return;
1530 	}
1531 
1532 	if (drm_rect_visible(damage))
1533 		*fbc_dirty_rect = *damage;
1534 	else
1535 		/* dirty rect must cover at least one line */
1536 		*fbc_dirty_rect = DRM_RECT_INIT(0, y_offset, width, 1);
1537 }
1538 
1539 void
1540 intel_fbc_prepare_dirty_rect(struct intel_atomic_state *state,
1541 			     struct intel_crtc *crtc)
1542 {
1543 	struct intel_display *display = to_intel_display(state);
1544 	const struct intel_crtc_state *crtc_state =
1545 		intel_atomic_get_new_crtc_state(state, crtc);
1546 	struct intel_plane_state *plane_state;
1547 	struct intel_plane *plane;
1548 	int i;
1549 
1550 	if (!HAS_FBC_DIRTY_RECT(display))
1551 		return;
1552 
1553 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
1554 		struct intel_fbc *fbc = plane->fbc;
1555 
1556 		if (!fbc || plane->pipe != crtc->pipe)
1557 			continue;
1558 
1559 		mutex_lock(&fbc->lock);
1560 
1561 		if (fbc->state.plane == plane)
1562 			__intel_fbc_prepare_dirty_rect(plane_state,
1563 						       crtc_state);
1564 
1565 		mutex_unlock(&fbc->lock);
1566 	}
1567 }
1568 
1569 static int _intel_fbc_min_cdclk(const struct intel_crtc_state *crtc_state)
1570 {
1571 	struct intel_display *display = to_intel_display(crtc_state);
1572 
1573 	/* WaFbcExceedCdClockThreshold:hsw,bdw */
1574 	if (display->platform.haswell || display->platform.broadwell)
1575 		return DIV_ROUND_UP(crtc_state->pixel_rate * 100, 95);
1576 
1577 	/* no FBC specific limits to worry about */
1578 	return 0;
1579 }
1580 
1581 static int intel_fbc_check_plane(struct intel_atomic_state *state,
1582 				 struct intel_plane *plane)
1583 {
1584 	struct intel_display *display = to_intel_display(state);
1585 	struct intel_plane_state *plane_state =
1586 		intel_atomic_get_new_plane_state(state, plane);
1587 	const struct drm_framebuffer *fb = plane_state->hw.fb;
1588 	struct intel_crtc *crtc = to_intel_crtc(plane_state->hw.crtc);
1589 	const struct intel_crtc_state *crtc_state;
1590 	struct intel_fbc *fbc = plane->fbc;
1591 
1592 	if (!fbc)
1593 		return 0;
1594 
1595 	if (!intel_parent_stolen_initialized(display)) {
1596 		plane_state->no_fbc_reason = "stolen memory not initialised";
1597 		return 0;
1598 	}
1599 
1600 	if (intel_parent_vgpu_active(display)) {
1601 		plane_state->no_fbc_reason = "VGPU active";
1602 		return 0;
1603 	}
1604 
1605 	if (!display->params.enable_fbc) {
1606 		plane_state->no_fbc_reason = "disabled per module param or by default";
1607 		return 0;
1608 	}
1609 
1610 	if (!plane_state->uapi.visible) {
1611 		plane_state->no_fbc_reason = "plane not visible";
1612 		return 0;
1613 	}
1614 
1615 	if (intel_display_wa(display, INTEL_DISPLAY_WA_16023588340)) {
1616 		plane_state->no_fbc_reason = "Wa_16023588340";
1617 		return 0;
1618 	}
1619 
1620 	/*
1621 	 * Wa_15018326506:
1622 	 * Fixes: Underrun during media decode
1623 	 * Workaround: Do not enable FBC
1624 	 */
1625 	if (intel_display_wa(display, INTEL_DISPLAY_WA_15018326506)) {
1626 		plane_state->no_fbc_reason = "Wa_15018326506";
1627 		return 0;
1628 	}
1629 
1630 	/* WaFbcTurnOffFbcWhenHyperVisorIsUsed:skl,bxt */
1631 	if (intel_display_vtd_active(display) &&
1632 	    (display->platform.skylake || display->platform.broxton)) {
1633 		plane_state->no_fbc_reason = "VT-d enabled";
1634 		return 0;
1635 	}
1636 
1637 	crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
1638 
1639 	if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
1640 		plane_state->no_fbc_reason = "interlaced mode not supported";
1641 		return 0;
1642 	}
1643 
1644 	if (crtc_state->double_wide) {
1645 		plane_state->no_fbc_reason = "double wide pipe not supported";
1646 		return 0;
1647 	}
1648 
1649 	/*
1650 	 * Display 12+ is not supporting FBC with PSR2.
1651 	 * Recommendation is to keep this combination disabled
1652 	 * Bspec: 50422 HSD: 14010260002
1653 	 *
1654 	 * TODO: Implement a logic to select between PSR2 selective fetch and
1655 	 * FBC based on Bspec: 68881 in xe2lpd onwards.
1656 	 *
1657 	 * As we still see some strange underruns in those platforms while
1658 	 * disabling PSR2, keep FBC disabled in case of selective update is on
1659 	 * until the selection logic is implemented.
1660 	 */
1661 	if (DISPLAY_VER(display) >= 12 && crtc_state->has_sel_update) {
1662 		plane_state->no_fbc_reason = "Selective update enabled";
1663 		return 0;
1664 	}
1665 
1666 	/* Wa_14016291713 */
1667 	if ((IS_DISPLAY_VER(display, 12, 13) ||
1668 	     IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_C0)) &&
1669 	    crtc_state->has_psr && !crtc_state->has_panel_replay) {
1670 		plane_state->no_fbc_reason = "PSR1 enabled (Wa_14016291713)";
1671 		return 0;
1672 	}
1673 
1674 	if (!pixel_format_is_valid(plane_state)) {
1675 		plane_state->no_fbc_reason = "pixel format not supported";
1676 		return 0;
1677 	}
1678 
1679 	if (!tiling_is_valid(plane_state)) {
1680 		plane_state->no_fbc_reason = "tiling not supported";
1681 		return 0;
1682 	}
1683 
1684 	if (!rotation_is_valid(plane_state)) {
1685 		plane_state->no_fbc_reason = "rotation not supported";
1686 		return 0;
1687 	}
1688 
1689 	if (!stride_is_valid(plane_state)) {
1690 		plane_state->no_fbc_reason = "stride not supported";
1691 		return 0;
1692 	}
1693 
1694 	if (DISPLAY_VER(display) < 20 &&
1695 	    plane_state->hw.pixel_blend_mode != DRM_MODE_BLEND_PIXEL_NONE &&
1696 	    fb->format->has_alpha) {
1697 		plane_state->no_fbc_reason = "per-pixel alpha not supported";
1698 		return 0;
1699 	}
1700 
1701 	if (!intel_fbc_plane_size_valid(plane_state)) {
1702 		plane_state->no_fbc_reason = "plane size too big";
1703 		return 0;
1704 	}
1705 
1706 	if (!intel_fbc_surface_size_ok(plane_state)) {
1707 		plane_state->no_fbc_reason = "surface size too big";
1708 		return 0;
1709 	}
1710 
1711 	/*
1712 	 * Work around a problem on GEN9+ HW, where enabling FBC on a plane
1713 	 * having a Y offset that isn't divisible by 4 causes FIFO underrun
1714 	 * and screen flicker.
1715 	 */
1716 	if (IS_DISPLAY_VER(display, 9, 12) &&
1717 	    plane_state->view.color_plane[0].y & 3) {
1718 		plane_state->no_fbc_reason = "plane start Y offset misaligned";
1719 		return 0;
1720 	}
1721 
1722 	/* Wa_22010751166: icl, ehl, tgl, dg1, rkl */
1723 	if (IS_DISPLAY_VER(display, 9, 12) &&
1724 	    (plane_state->view.color_plane[0].y +
1725 	     (drm_rect_height(&plane_state->uapi.src) >> 16)) & 3) {
1726 		plane_state->no_fbc_reason = "plane end Y offset misaligned";
1727 		return 0;
1728 	}
1729 
1730 	if (_intel_fbc_min_cdclk(crtc_state) > display->cdclk.max_cdclk_freq) {
1731 		plane_state->no_fbc_reason = "pixel rate too high";
1732 		return 0;
1733 	}
1734 
1735 	plane_state->no_fbc_reason = NULL;
1736 
1737 	return 0;
1738 }
1739 
1740 int intel_fbc_min_cdclk(const struct intel_crtc_state *crtc_state)
1741 {
1742 	struct intel_display *display = to_intel_display(crtc_state);
1743 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1744 	struct intel_plane *plane = to_intel_plane(crtc->base.primary);
1745 	int min_cdclk;
1746 
1747 	if (!plane->fbc)
1748 		return 0;
1749 
1750 	min_cdclk = _intel_fbc_min_cdclk(crtc_state);
1751 
1752 	/*
1753 	 * Do not ask for more than the max CDCLK frequency,
1754 	 * if that is not enough FBC will simply not be used.
1755 	 */
1756 	if (min_cdclk > display->cdclk.max_cdclk_freq)
1757 		return 0;
1758 
1759 	return min_cdclk;
1760 }
1761 
1762 static bool intel_fbc_can_flip_nuke(struct intel_atomic_state *state,
1763 				    struct intel_crtc *crtc,
1764 				    struct intel_plane *plane)
1765 {
1766 	const struct intel_crtc_state *new_crtc_state =
1767 		intel_atomic_get_new_crtc_state(state, crtc);
1768 	const struct intel_plane_state *old_plane_state =
1769 		intel_atomic_get_old_plane_state(state, plane);
1770 	const struct intel_plane_state *new_plane_state =
1771 		intel_atomic_get_new_plane_state(state, plane);
1772 	const struct drm_framebuffer *old_fb = old_plane_state->hw.fb;
1773 	const struct drm_framebuffer *new_fb = new_plane_state->hw.fb;
1774 
1775 	if (intel_crtc_needs_modeset(new_crtc_state))
1776 		return false;
1777 
1778 	if (!intel_fbc_is_ok(old_plane_state) ||
1779 	    !intel_fbc_is_ok(new_plane_state))
1780 		return false;
1781 
1782 	if (old_fb->format->format != new_fb->format->format)
1783 		return false;
1784 
1785 	if (old_fb->modifier != new_fb->modifier)
1786 		return false;
1787 
1788 	if (intel_fbc_plane_stride(old_plane_state) !=
1789 	    intel_fbc_plane_stride(new_plane_state))
1790 		return false;
1791 
1792 	if (intel_fbc_cfb_stride(old_plane_state) !=
1793 	    intel_fbc_cfb_stride(new_plane_state))
1794 		return false;
1795 
1796 	if (intel_fbc_cfb_size(old_plane_state) !=
1797 	    intel_fbc_cfb_size(new_plane_state))
1798 		return false;
1799 
1800 	if (intel_fbc_override_cfb_stride(old_plane_state) !=
1801 	    intel_fbc_override_cfb_stride(new_plane_state))
1802 		return false;
1803 
1804 	return true;
1805 }
1806 
1807 static bool __intel_fbc_pre_update(struct intel_atomic_state *state,
1808 				   struct intel_crtc *crtc,
1809 				   struct intel_plane *plane)
1810 {
1811 	struct intel_display *display = to_intel_display(state);
1812 	struct intel_fbc *fbc = plane->fbc;
1813 	bool need_vblank_wait = false;
1814 
1815 	lockdep_assert_held(&fbc->lock);
1816 
1817 	fbc->flip_pending = true;
1818 
1819 	if (intel_fbc_can_flip_nuke(state, crtc, plane))
1820 		return need_vblank_wait;
1821 
1822 	intel_fbc_deactivate(fbc, "update pending");
1823 
1824 	/*
1825 	 * Display WA #1198: glk+
1826 	 * Need an extra vblank wait between FBC disable and most plane
1827 	 * updates. Bspec says this is only needed for plane disable, but
1828 	 * that is not true. Touching most plane registers will cause the
1829 	 * corruption to appear. Also SKL/derivatives do not seem to be
1830 	 * affected.
1831 	 *
1832 	 * TODO: could optimize this a bit by sampling the frame
1833 	 * counter when we disable FBC (if it was already done earlier)
1834 	 * and skipping the extra vblank wait before the plane update
1835 	 * if at least one frame has already passed.
1836 	 */
1837 	if (fbc->activated && DISPLAY_VER(display) >= 10)
1838 		need_vblank_wait = true;
1839 	fbc->activated = false;
1840 
1841 	return need_vblank_wait;
1842 }
1843 
1844 bool intel_fbc_pre_update(struct intel_atomic_state *state,
1845 			  struct intel_crtc *crtc)
1846 {
1847 	const struct intel_plane_state __maybe_unused *plane_state;
1848 	bool need_vblank_wait = false;
1849 	struct intel_plane *plane;
1850 	int i;
1851 
1852 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
1853 		struct intel_fbc *fbc = plane->fbc;
1854 
1855 		if (!fbc || plane->pipe != crtc->pipe)
1856 			continue;
1857 
1858 		mutex_lock(&fbc->lock);
1859 
1860 		if (fbc->state.plane == plane)
1861 			need_vblank_wait |= __intel_fbc_pre_update(state, crtc, plane);
1862 
1863 		mutex_unlock(&fbc->lock);
1864 	}
1865 
1866 	return need_vblank_wait;
1867 }
1868 
1869 static void __intel_fbc_disable(struct intel_fbc *fbc)
1870 {
1871 	struct intel_display *display = fbc->display;
1872 	struct intel_plane *plane = fbc->state.plane;
1873 
1874 	lockdep_assert_held(&fbc->lock);
1875 	drm_WARN_ON(display->drm, fbc->active);
1876 
1877 	drm_dbg_kms(display->drm, "Disabling FBC on [PLANE:%d:%s]\n",
1878 		    plane->base.base.id, plane->base.name);
1879 
1880 	intel_fbc_invalidate_dirty_rect(fbc);
1881 
1882 	__intel_fbc_cleanup_cfb(fbc);
1883 
1884 	fbc_sys_cache_disable(fbc);
1885 
1886 	/* wa_18038517565 Enable DPFC clock gating after FBC disable */
1887 	if (display->platform.dg2 || DISPLAY_VER(display) >= 14)
1888 		fbc_compressor_clkgate_disable_wa(fbc, false);
1889 
1890 	fbc->state.plane = NULL;
1891 	fbc->flip_pending = false;
1892 	fbc->busy_bits = 0;
1893 }
1894 
1895 static void __intel_fbc_post_update(struct intel_fbc *fbc)
1896 {
1897 	lockdep_assert_held(&fbc->lock);
1898 
1899 	fbc->flip_pending = false;
1900 	fbc->busy_bits = 0;
1901 
1902 	intel_fbc_activate(fbc);
1903 }
1904 
1905 void intel_fbc_post_update(struct intel_atomic_state *state,
1906 			   struct intel_crtc *crtc)
1907 {
1908 	const struct intel_plane_state __maybe_unused *plane_state;
1909 	struct intel_plane *plane;
1910 	int i;
1911 
1912 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
1913 		struct intel_fbc *fbc = plane->fbc;
1914 
1915 		if (!fbc || plane->pipe != crtc->pipe)
1916 			continue;
1917 
1918 		mutex_lock(&fbc->lock);
1919 
1920 		if (fbc->state.plane == plane)
1921 			__intel_fbc_post_update(fbc);
1922 
1923 		mutex_unlock(&fbc->lock);
1924 	}
1925 }
1926 
1927 static unsigned int intel_fbc_get_frontbuffer_bit(struct intel_fbc *fbc)
1928 {
1929 	if (fbc->state.plane)
1930 		return fbc->state.plane->frontbuffer_bit;
1931 	else
1932 		return 0;
1933 }
1934 
1935 static void __intel_fbc_invalidate(struct intel_fbc *fbc,
1936 				   unsigned int frontbuffer_bits,
1937 				   enum fb_op_origin origin)
1938 {
1939 	if (origin == ORIGIN_FLIP || origin == ORIGIN_CURSOR_UPDATE)
1940 		return;
1941 
1942 	mutex_lock(&fbc->lock);
1943 
1944 	frontbuffer_bits &= intel_fbc_get_frontbuffer_bit(fbc);
1945 	if (!frontbuffer_bits)
1946 		goto out;
1947 
1948 	fbc->busy_bits |= frontbuffer_bits;
1949 	intel_fbc_deactivate(fbc, "frontbuffer write");
1950 
1951 out:
1952 	mutex_unlock(&fbc->lock);
1953 }
1954 
1955 void intel_fbc_invalidate(struct intel_display *display,
1956 			  unsigned int frontbuffer_bits,
1957 			  enum fb_op_origin origin)
1958 {
1959 	struct intel_fbc *fbc;
1960 	enum intel_fbc_id fbc_id;
1961 
1962 	for_each_intel_fbc(display, fbc, fbc_id)
1963 		__intel_fbc_invalidate(fbc, frontbuffer_bits, origin);
1964 
1965 }
1966 
1967 static void __intel_fbc_flush(struct intel_fbc *fbc,
1968 			      unsigned int frontbuffer_bits,
1969 			      enum fb_op_origin origin)
1970 {
1971 	mutex_lock(&fbc->lock);
1972 
1973 	frontbuffer_bits &= intel_fbc_get_frontbuffer_bit(fbc);
1974 	if (!frontbuffer_bits)
1975 		goto out;
1976 
1977 	fbc->busy_bits &= ~frontbuffer_bits;
1978 
1979 	if (origin == ORIGIN_FLIP || origin == ORIGIN_CURSOR_UPDATE)
1980 		goto out;
1981 
1982 	if (fbc->busy_bits || fbc->flip_pending)
1983 		goto out;
1984 
1985 	if (fbc->active)
1986 		intel_fbc_nuke(fbc);
1987 	else
1988 		intel_fbc_activate(fbc);
1989 
1990 out:
1991 	mutex_unlock(&fbc->lock);
1992 }
1993 
1994 void intel_fbc_flush(struct intel_display *display,
1995 		     unsigned int frontbuffer_bits,
1996 		     enum fb_op_origin origin)
1997 {
1998 	struct intel_fbc *fbc;
1999 	enum intel_fbc_id fbc_id;
2000 
2001 	for_each_intel_fbc(display, fbc, fbc_id)
2002 		__intel_fbc_flush(fbc, frontbuffer_bits, origin);
2003 }
2004 
2005 int intel_fbc_atomic_check(struct intel_atomic_state *state)
2006 {
2007 	struct intel_plane_state __maybe_unused *plane_state;
2008 	struct intel_plane *plane;
2009 	int i;
2010 
2011 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
2012 		int ret;
2013 
2014 		ret = intel_fbc_check_plane(state, plane);
2015 		if (ret)
2016 			return ret;
2017 	}
2018 
2019 	return 0;
2020 }
2021 
2022 static void __intel_fbc_enable(struct intel_atomic_state *state,
2023 			       struct intel_crtc *crtc,
2024 			       struct intel_plane *plane)
2025 {
2026 	struct intel_display *display = to_intel_display(state);
2027 	const struct intel_plane_state *plane_state =
2028 		intel_atomic_get_new_plane_state(state, plane);
2029 	struct intel_fbc *fbc = plane->fbc;
2030 
2031 	lockdep_assert_held(&fbc->lock);
2032 
2033 	if (fbc->state.plane) {
2034 		if (fbc->state.plane != plane)
2035 			return;
2036 
2037 		if (intel_fbc_is_ok(plane_state)) {
2038 			intel_fbc_update_state(state, crtc, plane);
2039 			return;
2040 		}
2041 
2042 		__intel_fbc_disable(fbc);
2043 	}
2044 
2045 	drm_WARN_ON(display->drm, fbc->active);
2046 
2047 	fbc->no_fbc_reason = plane_state->no_fbc_reason;
2048 	if (fbc->no_fbc_reason)
2049 		return;
2050 
2051 	if (!intel_fbc_is_fence_ok(plane_state)) {
2052 		fbc->no_fbc_reason = "framebuffer not fenced";
2053 		return;
2054 	}
2055 
2056 	if (fbc->underrun_detected) {
2057 		fbc->no_fbc_reason = "FIFO underrun";
2058 		return;
2059 	}
2060 
2061 	if (intel_fbc_alloc_cfb(fbc, intel_fbc_cfb_size(plane_state),
2062 				intel_fbc_min_limit(plane_state))) {
2063 		fbc->no_fbc_reason = "not enough stolen memory";
2064 		return;
2065 	}
2066 
2067 	drm_dbg_kms(display->drm, "Enabling FBC on [PLANE:%d:%s]\n",
2068 		    plane->base.base.id, plane->base.name);
2069 	fbc->no_fbc_reason = "FBC enabled but not active yet\n";
2070 
2071 	intel_fbc_update_state(state, crtc, plane);
2072 
2073 	if (HAS_FBC_DIRTY_RECT(display))
2074 		intel_fbc_hw_intialize_dirty_rect(fbc, plane_state);
2075 
2076 	intel_fbc_program_workarounds(fbc);
2077 	intel_fbc_program_cfb(fbc);
2078 
2079 	fbc_sys_cache_enable(fbc);
2080 }
2081 
2082 /**
2083  * intel_fbc_disable - disable FBC if it's associated with crtc
2084  * @crtc: the CRTC
2085  *
2086  * This function disables FBC if it's associated with the provided CRTC.
2087  */
2088 void intel_fbc_disable(struct intel_crtc *crtc)
2089 {
2090 	struct intel_display *display = to_intel_display(crtc);
2091 	struct intel_plane *plane;
2092 
2093 	for_each_intel_plane(display->drm, plane) {
2094 		struct intel_fbc *fbc = plane->fbc;
2095 
2096 		if (!fbc || plane->pipe != crtc->pipe)
2097 			continue;
2098 
2099 		mutex_lock(&fbc->lock);
2100 		if (fbc->state.plane == plane)
2101 			__intel_fbc_disable(fbc);
2102 		mutex_unlock(&fbc->lock);
2103 	}
2104 }
2105 
2106 void intel_fbc_update(struct intel_atomic_state *state,
2107 		      struct intel_crtc *crtc)
2108 {
2109 	const struct intel_crtc_state *crtc_state =
2110 		intel_atomic_get_new_crtc_state(state, crtc);
2111 	const struct intel_plane_state *plane_state;
2112 	struct intel_plane *plane;
2113 	int i;
2114 
2115 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
2116 		struct intel_fbc *fbc = plane->fbc;
2117 
2118 		if (!fbc || plane->pipe != crtc->pipe)
2119 			continue;
2120 
2121 		mutex_lock(&fbc->lock);
2122 
2123 		if (intel_crtc_needs_fastset(crtc_state) &&
2124 		    plane_state->no_fbc_reason) {
2125 			if (fbc->state.plane == plane)
2126 				__intel_fbc_disable(fbc);
2127 		} else {
2128 			__intel_fbc_enable(state, crtc, plane);
2129 		}
2130 
2131 		mutex_unlock(&fbc->lock);
2132 	}
2133 }
2134 
2135 static void intel_fbc_underrun_work_fn(struct work_struct *work)
2136 {
2137 	struct intel_fbc *fbc = container_of(work, typeof(*fbc), underrun_work);
2138 	struct intel_display *display = fbc->display;
2139 
2140 	mutex_lock(&fbc->lock);
2141 
2142 	/* Maybe we were scheduled twice. */
2143 	if (fbc->underrun_detected || !fbc->state.plane)
2144 		goto out;
2145 
2146 	drm_dbg_kms(display->drm, "Disabling FBC due to FIFO underrun.\n");
2147 	fbc->underrun_detected = true;
2148 
2149 	intel_fbc_deactivate(fbc, "FIFO underrun");
2150 	if (!fbc->flip_pending)
2151 		intel_crtc_wait_for_next_vblank(intel_crtc_for_pipe(display, fbc->state.plane->pipe));
2152 	__intel_fbc_disable(fbc);
2153 out:
2154 	mutex_unlock(&fbc->lock);
2155 }
2156 
2157 static void __intel_fbc_reset_underrun(struct intel_fbc *fbc)
2158 {
2159 	struct intel_display *display = fbc->display;
2160 
2161 	cancel_work_sync(&fbc->underrun_work);
2162 
2163 	mutex_lock(&fbc->lock);
2164 
2165 	if (fbc->underrun_detected) {
2166 		drm_dbg_kms(display->drm,
2167 			    "Re-allowing FBC after fifo underrun\n");
2168 		fbc->no_fbc_reason = "FIFO underrun cleared";
2169 	}
2170 
2171 	fbc->underrun_detected = false;
2172 	mutex_unlock(&fbc->lock);
2173 }
2174 
2175 /*
2176  * intel_fbc_reset_underrun - reset FBC fifo underrun status.
2177  * @display: display
2178  *
2179  * See intel_fbc_handle_fifo_underrun_irq(). For automated testing we
2180  * want to re-enable FBC after an underrun to increase test coverage.
2181  */
2182 void intel_fbc_reset_underrun(struct intel_display *display)
2183 {
2184 	struct intel_fbc *fbc;
2185 	enum intel_fbc_id fbc_id;
2186 
2187 	for_each_intel_fbc(display, fbc, fbc_id)
2188 		__intel_fbc_reset_underrun(fbc);
2189 }
2190 
2191 static void __intel_fbc_handle_fifo_underrun_irq(struct intel_fbc *fbc)
2192 {
2193 	struct intel_display *display = fbc->display;
2194 
2195 	/*
2196 	 * There's no guarantee that underrun_detected won't be set to true
2197 	 * right after this check and before the work is scheduled, but that's
2198 	 * not a problem since we'll check it again under the work function
2199 	 * while FBC is locked. This check here is just to prevent us from
2200 	 * unnecessarily scheduling the work, and it relies on the fact that we
2201 	 * never switch underrun_detect back to false after it's true.
2202 	 */
2203 	if (READ_ONCE(fbc->underrun_detected))
2204 		return;
2205 
2206 	queue_work(display->wq.unordered, &fbc->underrun_work);
2207 }
2208 
2209 /**
2210  * intel_fbc_handle_fifo_underrun_irq - disable FBC when we get a FIFO underrun
2211  * @display: display
2212  *
2213  * Without FBC, most underruns are harmless and don't really cause too many
2214  * problems, except for an annoying message on dmesg. With FBC, underruns can
2215  * become black screens or even worse, especially when paired with bad
2216  * watermarks. So in order for us to be on the safe side, completely disable FBC
2217  * in case we ever detect a FIFO underrun on any pipe. An underrun on any pipe
2218  * already suggests that watermarks may be bad, so try to be as safe as
2219  * possible.
2220  *
2221  * This function is called from the IRQ handler.
2222  */
2223 void intel_fbc_handle_fifo_underrun_irq(struct intel_display *display)
2224 {
2225 	struct intel_fbc *fbc;
2226 	enum intel_fbc_id fbc_id;
2227 
2228 	for_each_intel_fbc(display, fbc, fbc_id)
2229 		__intel_fbc_handle_fifo_underrun_irq(fbc);
2230 }
2231 
2232 /**
2233  * intel_fbc_read_underrun_dbg_info - Read and log FBC-related FIFO underrun debug info
2234  * @display: display device instance
2235  * @pipe: the pipe possibly containing the FBC
2236  * @log: log the info?
2237  *
2238  * If @pipe does not contain an FBC instance, this function bails early.
2239  * Otherwise, FBC-related FIFO underrun is read and cleared, and then, if @log
2240  * is true, printed with error level.
2241  */
2242 void intel_fbc_read_underrun_dbg_info(struct intel_display *display,
2243 				      enum pipe pipe, bool log)
2244 {
2245 	struct intel_fbc *fbc = intel_fbc_for_pipe(display, pipe);
2246 	u32 val;
2247 
2248 	if (!fbc)
2249 		return;
2250 
2251 	val = intel_de_read(display, FBC_DEBUG_STATUS(fbc->id));
2252 	if (!(val & FBC_UNDERRUN_DECMPR))
2253 		return;
2254 
2255 	intel_de_write(display, FBC_DEBUG_STATUS(fbc->id), FBC_UNDERRUN_DECMPR);
2256 
2257 	if (log)
2258 		drm_err(display->drm,
2259 			"Pipe %c FIFO underrun info: FBC decompressing\n",
2260 			pipe_name(pipe));
2261 }
2262 
2263 /*
2264  * The DDX driver changes its behavior depending on the value it reads from
2265  * i915.enable_fbc, so sanitize it by translating the default value into either
2266  * 0 or 1 in order to allow it to know what's going on.
2267  *
2268  * Notice that this is done at driver initialization and we still allow user
2269  * space to change the value during runtime without sanitizing it again. IGT
2270  * relies on being able to change i915.enable_fbc at runtime.
2271  */
2272 static int intel_sanitize_fbc_option(struct intel_display *display)
2273 {
2274 	if (display->params.enable_fbc >= 0)
2275 		return !!display->params.enable_fbc;
2276 
2277 	if (!HAS_FBC(display))
2278 		return 0;
2279 
2280 	if (display->platform.broadwell || DISPLAY_VER(display) >= 9)
2281 		return 1;
2282 
2283 	return 0;
2284 }
2285 
2286 void intel_fbc_add_plane(struct intel_fbc *fbc, struct intel_plane *plane)
2287 {
2288 	plane->fbc = fbc;
2289 }
2290 
2291 static struct intel_fbc *intel_fbc_create(struct intel_display *display,
2292 					  enum intel_fbc_id fbc_id)
2293 {
2294 	struct intel_fbc *fbc;
2295 
2296 	fbc = kzalloc_obj(*fbc);
2297 	if (!fbc)
2298 		return NULL;
2299 
2300 	fbc->compressed_fb = intel_parent_stolen_node_alloc(display);
2301 	if (!fbc->compressed_fb)
2302 		goto err;
2303 	fbc->compressed_llb = intel_parent_stolen_node_alloc(display);
2304 	if (!fbc->compressed_llb)
2305 		goto err;
2306 
2307 	fbc->id = fbc_id;
2308 	fbc->display = display;
2309 	INIT_WORK(&fbc->underrun_work, intel_fbc_underrun_work_fn);
2310 	mutex_init(&fbc->lock);
2311 
2312 	if (DISPLAY_VER(display) >= 7)
2313 		fbc->funcs = &ivb_fbc_funcs;
2314 	else if (DISPLAY_VER(display) == 6)
2315 		fbc->funcs = &snb_fbc_funcs;
2316 	else if (DISPLAY_VER(display) == 5)
2317 		fbc->funcs = &ilk_fbc_funcs;
2318 	else if (display->platform.g4x)
2319 		fbc->funcs = &g4x_fbc_funcs;
2320 	else if (DISPLAY_VER(display) == 4)
2321 		fbc->funcs = &i965_fbc_funcs;
2322 	else
2323 		fbc->funcs = &i8xx_fbc_funcs;
2324 
2325 	return fbc;
2326 
2327 err:
2328 	intel_parent_stolen_node_free(display, fbc->compressed_llb);
2329 	intel_parent_stolen_node_free(display, fbc->compressed_fb);
2330 	kfree(fbc);
2331 
2332 	return NULL;
2333 }
2334 
2335 /**
2336  * intel_fbc_init - Initialize FBC
2337  * @display: display
2338  *
2339  * This function might be called during PM init process.
2340  */
2341 void intel_fbc_init(struct intel_display *display)
2342 {
2343 	enum intel_fbc_id fbc_id;
2344 
2345 	display->params.enable_fbc = intel_sanitize_fbc_option(display);
2346 	drm_dbg_kms(display->drm, "Sanitized enable_fbc value: %d\n",
2347 		    display->params.enable_fbc);
2348 
2349 	for_each_fbc_id(display, fbc_id)
2350 		display->fbc.instances[fbc_id] = intel_fbc_create(display, fbc_id);
2351 
2352 	mutex_init(&display->fbc.sys_cache.lock);
2353 	display->fbc.sys_cache.id = FBC_SYS_CACHE_ID_NONE;
2354 }
2355 
2356 /**
2357  * intel_fbc_sanitize - Sanitize FBC
2358  * @display: display
2359  *
2360  * Make sure FBC is initially disabled since we have no
2361  * idea eg. into which parts of stolen it might be scribbling
2362  * into.
2363  */
2364 void intel_fbc_sanitize(struct intel_display *display)
2365 {
2366 	struct intel_fbc *fbc;
2367 	enum intel_fbc_id fbc_id;
2368 
2369 	for_each_intel_fbc(display, fbc, fbc_id) {
2370 		if (intel_fbc_hw_is_active(fbc))
2371 			intel_fbc_hw_deactivate(fbc);
2372 	}
2373 
2374 	/* Ensure the sys cache usage config is clear as well */
2375 	mutex_lock(&display->fbc.sys_cache.lock);
2376 	fbc_sys_cache_update_config(display, 0, FBC_SYS_CACHE_ID_NONE);
2377 	mutex_unlock(&display->fbc.sys_cache.lock);
2378 }
2379 
2380 static int intel_fbc_debugfs_status_show(struct seq_file *m, void *unused)
2381 {
2382 	struct intel_fbc *fbc = m->private;
2383 	struct intel_display *display = fbc->display;
2384 	struct intel_plane *plane;
2385 	struct ref_tracker *wakeref;
2386 
2387 	drm_modeset_lock_all(display->drm);
2388 
2389 	wakeref = intel_display_rpm_get(display);
2390 	mutex_lock(&fbc->lock);
2391 
2392 	if (fbc->active) {
2393 		seq_puts(m, "FBC enabled\n");
2394 		seq_printf(m, "Compressing: %s\n",
2395 			   str_yes_no(intel_fbc_is_compressing(fbc)));
2396 
2397 		mutex_lock(&display->fbc.sys_cache.lock);
2398 		seq_printf(m, "Using system cache: %s\n",
2399 			   str_yes_no(display->fbc.sys_cache.id == fbc->id));
2400 		mutex_unlock(&display->fbc.sys_cache.lock);
2401 	} else {
2402 		seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
2403 	}
2404 
2405 	for_each_intel_plane(display->drm, plane) {
2406 		const struct intel_plane_state *plane_state =
2407 			to_intel_plane_state(plane->base.state);
2408 
2409 		if (plane->fbc != fbc)
2410 			continue;
2411 
2412 		seq_printf(m, "%c [PLANE:%d:%s]: %s\n",
2413 			   fbc->state.plane == plane ? '*' : ' ',
2414 			   plane->base.base.id, plane->base.name,
2415 			   plane_state->no_fbc_reason ?: "FBC possible");
2416 	}
2417 
2418 	mutex_unlock(&fbc->lock);
2419 	intel_display_rpm_put(display, wakeref);
2420 
2421 	drm_modeset_unlock_all(display->drm);
2422 
2423 	return 0;
2424 }
2425 
2426 DEFINE_SHOW_ATTRIBUTE(intel_fbc_debugfs_status);
2427 
2428 static int intel_fbc_debugfs_false_color_get(void *data, u64 *val)
2429 {
2430 	struct intel_fbc *fbc = data;
2431 
2432 	*val = fbc->false_color;
2433 
2434 	return 0;
2435 }
2436 
2437 static int intel_fbc_debugfs_false_color_set(void *data, u64 val)
2438 {
2439 	struct intel_fbc *fbc = data;
2440 
2441 	mutex_lock(&fbc->lock);
2442 
2443 	fbc->false_color = val;
2444 
2445 	if (fbc->active)
2446 		fbc->funcs->set_false_color(fbc, fbc->false_color);
2447 
2448 	mutex_unlock(&fbc->lock);
2449 
2450 	return 0;
2451 }
2452 
2453 DEFINE_DEBUGFS_ATTRIBUTE(intel_fbc_debugfs_false_color_fops,
2454 			 intel_fbc_debugfs_false_color_get,
2455 			 intel_fbc_debugfs_false_color_set,
2456 			 "%llu\n");
2457 
2458 static void intel_fbc_debugfs_add(struct intel_fbc *fbc,
2459 				  struct dentry *parent)
2460 {
2461 	debugfs_create_file("i915_fbc_status", 0444, parent,
2462 			    fbc, &intel_fbc_debugfs_status_fops);
2463 
2464 	if (fbc->funcs->set_false_color)
2465 		debugfs_create_file_unsafe("i915_fbc_false_color", 0644, parent,
2466 					   fbc, &intel_fbc_debugfs_false_color_fops);
2467 }
2468 
2469 void intel_fbc_crtc_debugfs_add(struct intel_crtc *crtc)
2470 {
2471 	struct intel_plane *plane = to_intel_plane(crtc->base.primary);
2472 
2473 	if (plane->fbc)
2474 		intel_fbc_debugfs_add(plane->fbc, crtc->base.debugfs_entry);
2475 }
2476 
2477 /* FIXME: remove this once igt is on board with per-crtc stuff */
2478 void intel_fbc_debugfs_register(struct intel_display *display)
2479 {
2480 	struct intel_fbc *fbc;
2481 
2482 	fbc = display->fbc.instances[INTEL_FBC_A];
2483 	if (fbc)
2484 		intel_fbc_debugfs_add(fbc, display->drm->debugfs_root);
2485 }
2486