xref: /linux/drivers/gpu/drm/i915/display/intel_frontbuffer.h (revision 0526b56cbc3c489642bd6a5fe4b718dea7ef0ee8)
1 /*
2  * Copyright (c) 2014-2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #ifndef __INTEL_FRONTBUFFER_H__
25 #define __INTEL_FRONTBUFFER_H__
26 
27 #include <linux/atomic.h>
28 #include <linux/bits.h>
29 #include <linux/kref.h>
30 
31 #include "gem/i915_gem_object_types.h"
32 #include "i915_active_types.h"
33 
34 struct drm_i915_private;
35 
36 enum fb_op_origin {
37 	ORIGIN_CPU = 0,
38 	ORIGIN_CS,
39 	ORIGIN_FLIP,
40 	ORIGIN_DIRTYFB,
41 	ORIGIN_CURSOR_UPDATE,
42 };
43 
44 struct intel_frontbuffer {
45 	struct kref ref;
46 	atomic_t bits;
47 	struct i915_active write;
48 	struct drm_i915_gem_object *obj;
49 	struct rcu_head rcu;
50 };
51 
52 /*
53  * Frontbuffer tracking bits. Set in obj->frontbuffer_bits while a gem bo is
54  * considered to be the frontbuffer for the given plane interface-wise. This
55  * doesn't mean that the hw necessarily already scans it out, but that any
56  * rendering (by the cpu or gpu) will land in the frontbuffer eventually.
57  *
58  * We have one bit per pipe and per scanout plane type.
59  */
60 #define INTEL_FRONTBUFFER_BITS_PER_PIPE 8
61 #define INTEL_FRONTBUFFER(pipe, plane_id) \
62 	BIT((plane_id) + INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe));
63 #define INTEL_FRONTBUFFER_OVERLAY(pipe) \
64 	BIT(INTEL_FRONTBUFFER_BITS_PER_PIPE - 1 + INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))
65 #define INTEL_FRONTBUFFER_ALL_MASK(pipe) \
66 	GENMASK(INTEL_FRONTBUFFER_BITS_PER_PIPE * ((pipe) + 1) - 1,	\
67 		INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))
68 
69 void intel_frontbuffer_flip_prepare(struct drm_i915_private *i915,
70 				    unsigned frontbuffer_bits);
71 void intel_frontbuffer_flip_complete(struct drm_i915_private *i915,
72 				     unsigned frontbuffer_bits);
73 void intel_frontbuffer_flip(struct drm_i915_private *i915,
74 			    unsigned frontbuffer_bits);
75 
76 void intel_frontbuffer_put(struct intel_frontbuffer *front);
77 
78 static inline struct intel_frontbuffer *
79 __intel_frontbuffer_get(const struct drm_i915_gem_object *obj)
80 {
81 	struct intel_frontbuffer *front;
82 
83 	if (likely(!rcu_access_pointer(obj->frontbuffer)))
84 		return NULL;
85 
86 	rcu_read_lock();
87 	do {
88 		front = rcu_dereference(obj->frontbuffer);
89 		if (!front)
90 			break;
91 
92 		if (unlikely(!kref_get_unless_zero(&front->ref)))
93 			continue;
94 
95 		if (likely(front == rcu_access_pointer(obj->frontbuffer)))
96 			break;
97 
98 		intel_frontbuffer_put(front);
99 	} while (1);
100 	rcu_read_unlock();
101 
102 	return front;
103 }
104 
105 struct intel_frontbuffer *
106 intel_frontbuffer_get(struct drm_i915_gem_object *obj);
107 
108 void __intel_fb_invalidate(struct intel_frontbuffer *front,
109 			   enum fb_op_origin origin,
110 			   unsigned int frontbuffer_bits);
111 
112 /**
113  * intel_frontbuffer_invalidate - invalidate frontbuffer object
114  * @front: GEM object to invalidate
115  * @origin: which operation caused the invalidation
116  *
117  * This function gets called every time rendering on the given object starts and
118  * frontbuffer caching (fbc, low refresh rate for DRRS, panel self refresh) must
119  * be invalidated. For ORIGIN_CS any subsequent invalidation will be delayed
120  * until the rendering completes or a flip on this frontbuffer plane is
121  * scheduled.
122  */
123 static inline bool intel_frontbuffer_invalidate(struct intel_frontbuffer *front,
124 						enum fb_op_origin origin)
125 {
126 	unsigned int frontbuffer_bits;
127 
128 	if (!front)
129 		return false;
130 
131 	frontbuffer_bits = atomic_read(&front->bits);
132 	if (!frontbuffer_bits)
133 		return false;
134 
135 	__intel_fb_invalidate(front, origin, frontbuffer_bits);
136 	return true;
137 }
138 
139 void __intel_fb_flush(struct intel_frontbuffer *front,
140 		      enum fb_op_origin origin,
141 		      unsigned int frontbuffer_bits);
142 
143 /**
144  * intel_frontbuffer_flush - flush frontbuffer object
145  * @front: GEM object to flush
146  * @origin: which operation caused the flush
147  *
148  * This function gets called every time rendering on the given object has
149  * completed and frontbuffer caching can be started again.
150  */
151 static inline void intel_frontbuffer_flush(struct intel_frontbuffer *front,
152 					   enum fb_op_origin origin)
153 {
154 	unsigned int frontbuffer_bits;
155 
156 	if (!front)
157 		return;
158 
159 	frontbuffer_bits = atomic_read(&front->bits);
160 	if (!frontbuffer_bits)
161 		return;
162 
163 	__intel_fb_flush(front, origin, frontbuffer_bits);
164 }
165 
166 void intel_frontbuffer_track(struct intel_frontbuffer *old,
167 			     struct intel_frontbuffer *new,
168 			     unsigned int frontbuffer_bits);
169 
170 #endif /* __INTEL_FRONTBUFFER_H__ */
171