xref: /linux/drivers/gpu/drm/i915/display/intel_frontbuffer.h (revision 13c072b8e91a5ccb5855ca1ba6fe3ea467dbf94d)
1 /*
2  * Copyright (c) 2014-2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #ifndef __INTEL_FRONTBUFFER_H__
25 #define __INTEL_FRONTBUFFER_H__
26 
27 #include <linux/atomic.h>
28 #include <linux/bits.h>
29 #include <linux/workqueue_types.h>
30 
31 struct drm_device;
32 struct drm_gem_object;
33 struct intel_display;
34 
35 enum fb_op_origin {
36 	ORIGIN_CPU = 0,
37 	ORIGIN_CS,
38 	ORIGIN_FLIP,
39 	ORIGIN_DIRTYFB,
40 	ORIGIN_CURSOR_UPDATE,
41 };
42 
43 struct intel_frontbuffer {
44 	struct intel_display *display;
45 	atomic_t bits;
46 	struct work_struct flush_work;
47 };
48 
49 /*
50  * Frontbuffer tracking bits. Set in obj->frontbuffer_bits while a gem bo is
51  * considered to be the frontbuffer for the given plane interface-wise. This
52  * doesn't mean that the hw necessarily already scans it out, but that any
53  * rendering (by the cpu or gpu) will land in the frontbuffer eventually.
54  *
55  * We have one bit per pipe and per scanout plane type.
56  */
57 #define INTEL_FRONTBUFFER_BITS_PER_PIPE 8
58 #define INTEL_FRONTBUFFER(pipe, plane_id) \
59 	BIT((plane_id) + INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe));
60 #define INTEL_FRONTBUFFER_OVERLAY(pipe) \
61 	BIT(INTEL_FRONTBUFFER_BITS_PER_PIPE - 1 + INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))
62 #define INTEL_FRONTBUFFER_ALL_MASK(pipe) \
63 	GENMASK(INTEL_FRONTBUFFER_BITS_PER_PIPE * ((pipe) + 1) - 1,	\
64 		INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))
65 
66 void intel_frontbuffer_flip(struct intel_display *display,
67 			    unsigned frontbuffer_bits);
68 
69 void __intel_frontbuffer_invalidate(struct intel_frontbuffer *front,
70 				    enum fb_op_origin origin,
71 				    unsigned int frontbuffer_bits);
72 
73 /**
74  * intel_frontbuffer_invalidate - invalidate frontbuffer object
75  * @front: GEM object to invalidate
76  * @origin: which operation caused the invalidation
77  *
78  * This function gets called every time rendering on the given object starts and
79  * frontbuffer caching (fbc, low refresh rate for DRRS, panel self refresh) must
80  * be invalidated. For ORIGIN_CS any subsequent invalidation will be delayed
81  * until the rendering completes or a flip on this frontbuffer plane is
82  * scheduled.
83  */
84 static inline bool intel_frontbuffer_invalidate(struct intel_frontbuffer *front,
85 						enum fb_op_origin origin)
86 {
87 	unsigned int frontbuffer_bits;
88 
89 	if (!front)
90 		return false;
91 
92 	frontbuffer_bits = atomic_read(&front->bits);
93 	if (!frontbuffer_bits)
94 		return false;
95 
96 	__intel_frontbuffer_invalidate(front, origin, frontbuffer_bits);
97 	return true;
98 }
99 
100 void __intel_frontbuffer_flush(struct intel_frontbuffer *front,
101 			       enum fb_op_origin origin,
102 			       unsigned int frontbuffer_bits);
103 
104 /**
105  * intel_frontbuffer_flush - flush frontbuffer object
106  * @front: GEM object to flush
107  * @origin: which operation caused the flush
108  *
109  * This function gets called every time rendering on the given object has
110  * completed and frontbuffer caching can be started again.
111  */
112 static inline void intel_frontbuffer_flush(struct intel_frontbuffer *front,
113 					   enum fb_op_origin origin)
114 {
115 	unsigned int frontbuffer_bits;
116 
117 	if (!front)
118 		return;
119 
120 	frontbuffer_bits = atomic_read(&front->bits);
121 	if (!frontbuffer_bits)
122 		return;
123 
124 	__intel_frontbuffer_flush(front, origin, frontbuffer_bits);
125 }
126 
127 void intel_frontbuffer_queue_flush(struct intel_frontbuffer *front);
128 
129 void intel_frontbuffer_track(struct intel_frontbuffer *old,
130 			     struct intel_frontbuffer *new,
131 			     unsigned int frontbuffer_bits);
132 
133 void intel_frontbuffer_init(struct intel_frontbuffer *front, struct drm_device *drm);
134 void intel_frontbuffer_fini(struct intel_frontbuffer *front);
135 
136 #endif /* __INTEL_FRONTBUFFER_H__ */
137