1 // SPDX-License-Identifier: GPL-2.0-or-later
2
3 #include <linux/dma-resv.h>
4 #include <linux/dma-fence-chain.h>
5 #include <linux/export.h>
6
7 #include <drm/drm_atomic_state_helper.h>
8 #include <drm/drm_atomic_uapi.h>
9 #include <drm/drm_framebuffer.h>
10 #include <drm/drm_gem.h>
11 #include <drm/drm_gem_atomic_helper.h>
12 #include <drm/drm_gem_framebuffer_helper.h>
13 #include <drm/drm_simple_kms_helper.h>
14
15 #include "drm_internal.h"
16
17 /**
18 * DOC: overview
19 *
20 * The GEM atomic helpers library implements generic atomic-commit
21 * functions for drivers that use GEM objects. Currently, it provides
22 * synchronization helpers, and plane state and framebuffer BO mappings
23 * for planes with shadow buffers.
24 *
25 * Before scanout, a plane's framebuffer needs to be synchronized with
26 * possible writers that draw into the framebuffer. All drivers should
27 * call drm_gem_plane_helper_prepare_fb() from their implementation of
28 * struct &drm_plane_helper.prepare_fb . It sets the plane's fence from
29 * the framebuffer so that the DRM core can synchronize access automatically.
30 * drm_gem_plane_helper_prepare_fb() can also be used directly as
31 * implementation of prepare_fb.
32 *
33 * .. code-block:: c
34 *
35 * #include <drm/drm_gem_atomic_helper.h>
36 *
37 * struct drm_plane_helper_funcs driver_plane_helper_funcs = {
38 * ...,
39 * . prepare_fb = drm_gem_plane_helper_prepare_fb,
40 * };
41 *
42 * A driver using a shadow buffer copies the content of the shadow buffers
43 * into the HW's framebuffer memory during an atomic update. This requires
44 * a mapping of the shadow buffer into kernel address space. The mappings
45 * cannot be established by commit-tail functions, such as atomic_update,
46 * as this would violate locking rules around dma_buf_vmap().
47 *
48 * The helpers for shadow-buffered planes establish and release mappings,
49 * and provide struct drm_shadow_plane_state, which stores the plane's mapping
50 * for commit-tail functions.
51 *
52 * Shadow-buffered planes can easily be enabled by using the provided macros
53 * %DRM_GEM_SHADOW_PLANE_FUNCS and %DRM_GEM_SHADOW_PLANE_HELPER_FUNCS.
54 * These macros set up the plane and plane-helper callbacks to point to the
55 * shadow-buffer helpers.
56 *
57 * .. code-block:: c
58 *
59 * #include <drm/drm_gem_atomic_helper.h>
60 *
61 * struct drm_plane_funcs driver_plane_funcs = {
62 * ...,
63 * DRM_GEM_SHADOW_PLANE_FUNCS,
64 * };
65 *
66 * struct drm_plane_helper_funcs driver_plane_helper_funcs = {
67 * ...,
68 * DRM_GEM_SHADOW_PLANE_HELPER_FUNCS,
69 * };
70 *
71 * In the driver's atomic-update function, shadow-buffer mappings are available
72 * from the plane state. Use to_drm_shadow_plane_state() to upcast from
73 * struct drm_plane_state.
74 *
75 * .. code-block:: c
76 *
77 * void driver_plane_atomic_update(struct drm_plane *plane,
78 * struct drm_plane_state *old_plane_state)
79 * {
80 * struct drm_plane_state *plane_state = plane->state;
81 * struct drm_shadow_plane_state *shadow_plane_state =
82 * to_drm_shadow_plane_state(plane_state);
83 *
84 * // access shadow buffer via shadow_plane_state->map
85 * }
86 *
87 * A mapping address for each of the framebuffer's buffer object is stored in
88 * struct &drm_shadow_plane_state.map. The mappings are valid while the state
89 * is being used.
90 *
91 * Drivers that use struct drm_simple_display_pipe can use
92 * %DRM_GEM_SIMPLE_DISPLAY_PIPE_SHADOW_PLANE_FUNCS to initialize the rsp
93 * callbacks. Access to shadow-buffer mappings is similar to regular
94 * atomic_update.
95 *
96 * .. code-block:: c
97 *
98 * struct drm_simple_display_pipe_funcs driver_pipe_funcs = {
99 * ...,
100 * DRM_GEM_SIMPLE_DISPLAY_PIPE_SHADOW_PLANE_FUNCS,
101 * };
102 *
103 * void driver_pipe_enable(struct drm_simple_display_pipe *pipe,
104 * struct drm_crtc_state *crtc_state,
105 * struct drm_plane_state *plane_state)
106 * {
107 * struct drm_shadow_plane_state *shadow_plane_state =
108 * to_drm_shadow_plane_state(plane_state);
109 *
110 * // access shadow buffer via shadow_plane_state->map
111 * }
112 */
113
114 /*
115 * Plane Helpers
116 */
117
118 /**
119 * drm_gem_plane_helper_prepare_fb() - Prepare a GEM backed framebuffer
120 * @plane: Plane
121 * @state: Plane state the fence will be attached to
122 *
123 * This function extracts the exclusive fence from &drm_gem_object.resv and
124 * attaches it to plane state for the atomic helper to wait on. This is
125 * necessary to correctly implement implicit synchronization for any buffers
126 * shared as a struct &dma_buf. This function can be used as the
127 * &drm_plane_helper_funcs.prepare_fb callback.
128 *
129 * There is no need for &drm_plane_helper_funcs.cleanup_fb hook for simple
130 * GEM based framebuffer drivers which have their buffers always pinned in
131 * memory.
132 *
133 * This function is the default implementation for GEM drivers of
134 * &drm_plane_helper_funcs.prepare_fb if no callback is provided.
135 */
drm_gem_plane_helper_prepare_fb(struct drm_plane * plane,struct drm_plane_state * state)136 int drm_gem_plane_helper_prepare_fb(struct drm_plane *plane,
137 struct drm_plane_state *state)
138 {
139 struct dma_fence *fence = dma_fence_get(state->fence);
140 enum dma_resv_usage usage;
141 size_t i;
142 int ret;
143
144 if (!state->fb)
145 return 0;
146
147 /*
148 * Only add the kernel fences here if there is already a fence set via
149 * explicit fencing interfaces on the atomic ioctl.
150 *
151 * This way explicit fencing can be used to overrule implicit fencing,
152 * which is important to make explicit fencing use-cases work: One
153 * example is using one buffer for 2 screens with different refresh
154 * rates. Implicit fencing will clamp rendering to the refresh rate of
155 * the slower screen, whereas explicit fence allows 2 independent
156 * render and display loops on a single buffer. If a driver allows
157 * obeys both implicit and explicit fences for plane updates, then it
158 * will break all the benefits of explicit fencing.
159 */
160 usage = fence ? DMA_RESV_USAGE_KERNEL : DMA_RESV_USAGE_WRITE;
161
162 for (i = 0; i < state->fb->format->num_planes; ++i) {
163 struct drm_gem_object *obj = drm_gem_fb_get_obj(state->fb, i);
164 struct dma_fence *new;
165
166 if (!obj) {
167 ret = -EINVAL;
168 goto error;
169 }
170
171 ret = dma_resv_get_singleton(obj->resv, usage, &new);
172 if (ret)
173 goto error;
174
175 if (new && fence) {
176 struct dma_fence_chain *chain = dma_fence_chain_alloc();
177
178 if (!chain) {
179 ret = -ENOMEM;
180 goto error;
181 }
182
183 dma_fence_chain_init(chain, fence, new, 1);
184 fence = &chain->base;
185
186 } else if (new) {
187 fence = new;
188 }
189 }
190
191 dma_fence_put(state->fence);
192 state->fence = fence;
193 return 0;
194
195 error:
196 dma_fence_put(fence);
197 return ret;
198 }
199 EXPORT_SYMBOL_GPL(drm_gem_plane_helper_prepare_fb);
200
201 /*
202 * Shadow-buffered Planes
203 */
204
205 /**
206 * __drm_gem_duplicate_shadow_plane_state - duplicates shadow-buffered plane state
207 * @plane: the plane
208 * @new_shadow_plane_state: the new shadow-buffered plane state
209 *
210 * This function duplicates shadow-buffered plane state. This is helpful for drivers
211 * that subclass struct drm_shadow_plane_state.
212 *
213 * The function does not duplicate existing mappings of the shadow buffers.
214 * Mappings are maintained during the atomic commit by the plane's prepare_fb
215 * and cleanup_fb helpers. See drm_gem_prepare_shadow_fb() and drm_gem_cleanup_shadow_fb()
216 * for corresponding helpers.
217 */
218 void
__drm_gem_duplicate_shadow_plane_state(struct drm_plane * plane,struct drm_shadow_plane_state * new_shadow_plane_state)219 __drm_gem_duplicate_shadow_plane_state(struct drm_plane *plane,
220 struct drm_shadow_plane_state *new_shadow_plane_state)
221 {
222 struct drm_plane_state *plane_state = plane->state;
223 struct drm_shadow_plane_state *shadow_plane_state =
224 to_drm_shadow_plane_state(plane_state);
225
226 __drm_atomic_helper_plane_duplicate_state(plane, &new_shadow_plane_state->base);
227
228 drm_format_conv_state_copy(&new_shadow_plane_state->fmtcnv_state,
229 &shadow_plane_state->fmtcnv_state);
230 }
231 EXPORT_SYMBOL(__drm_gem_duplicate_shadow_plane_state);
232
233 /**
234 * drm_gem_duplicate_shadow_plane_state - duplicates shadow-buffered plane state
235 * @plane: the plane
236 *
237 * This function implements struct &drm_plane_funcs.atomic_duplicate_state for
238 * shadow-buffered planes. It assumes the existing state to be of type
239 * struct drm_shadow_plane_state and it allocates the new state to be of this
240 * type.
241 *
242 * The function does not duplicate existing mappings of the shadow buffers.
243 * Mappings are maintained during the atomic commit by the plane's prepare_fb
244 * and cleanup_fb helpers. See drm_gem_prepare_shadow_fb() and drm_gem_cleanup_shadow_fb()
245 * for corresponding helpers.
246 *
247 * Returns:
248 * A pointer to a new plane state on success, or NULL otherwise.
249 */
250 struct drm_plane_state *
drm_gem_duplicate_shadow_plane_state(struct drm_plane * plane)251 drm_gem_duplicate_shadow_plane_state(struct drm_plane *plane)
252 {
253 struct drm_plane_state *plane_state = plane->state;
254 struct drm_shadow_plane_state *new_shadow_plane_state;
255
256 if (!plane_state)
257 return NULL;
258
259 new_shadow_plane_state = kzalloc(sizeof(*new_shadow_plane_state), GFP_KERNEL);
260 if (!new_shadow_plane_state)
261 return NULL;
262 __drm_gem_duplicate_shadow_plane_state(plane, new_shadow_plane_state);
263
264 return &new_shadow_plane_state->base;
265 }
266 EXPORT_SYMBOL(drm_gem_duplicate_shadow_plane_state);
267
268 /**
269 * __drm_gem_destroy_shadow_plane_state - cleans up shadow-buffered plane state
270 * @shadow_plane_state: the shadow-buffered plane state
271 *
272 * This function cleans up shadow-buffered plane state. Helpful for drivers that
273 * subclass struct drm_shadow_plane_state.
274 */
__drm_gem_destroy_shadow_plane_state(struct drm_shadow_plane_state * shadow_plane_state)275 void __drm_gem_destroy_shadow_plane_state(struct drm_shadow_plane_state *shadow_plane_state)
276 {
277 drm_format_conv_state_release(&shadow_plane_state->fmtcnv_state);
278 __drm_atomic_helper_plane_destroy_state(&shadow_plane_state->base);
279 }
280 EXPORT_SYMBOL(__drm_gem_destroy_shadow_plane_state);
281
282 /**
283 * drm_gem_destroy_shadow_plane_state - deletes shadow-buffered plane state
284 * @plane: the plane
285 * @plane_state: the plane state of type struct drm_shadow_plane_state
286 *
287 * This function implements struct &drm_plane_funcs.atomic_destroy_state
288 * for shadow-buffered planes. It expects that mappings of shadow buffers
289 * have been released already.
290 */
drm_gem_destroy_shadow_plane_state(struct drm_plane * plane,struct drm_plane_state * plane_state)291 void drm_gem_destroy_shadow_plane_state(struct drm_plane *plane,
292 struct drm_plane_state *plane_state)
293 {
294 struct drm_shadow_plane_state *shadow_plane_state =
295 to_drm_shadow_plane_state(plane_state);
296
297 __drm_gem_destroy_shadow_plane_state(shadow_plane_state);
298 kfree(shadow_plane_state);
299 }
300 EXPORT_SYMBOL(drm_gem_destroy_shadow_plane_state);
301
302 /**
303 * __drm_gem_reset_shadow_plane - resets a shadow-buffered plane
304 * @plane: the plane
305 * @shadow_plane_state: the shadow-buffered plane state
306 *
307 * This function resets state for shadow-buffered planes. Helpful
308 * for drivers that subclass struct drm_shadow_plane_state.
309 */
__drm_gem_reset_shadow_plane(struct drm_plane * plane,struct drm_shadow_plane_state * shadow_plane_state)310 void __drm_gem_reset_shadow_plane(struct drm_plane *plane,
311 struct drm_shadow_plane_state *shadow_plane_state)
312 {
313 if (shadow_plane_state) {
314 __drm_atomic_helper_plane_reset(plane, &shadow_plane_state->base);
315 drm_format_conv_state_init(&shadow_plane_state->fmtcnv_state);
316 } else {
317 __drm_atomic_helper_plane_reset(plane, NULL);
318 }
319 }
320 EXPORT_SYMBOL(__drm_gem_reset_shadow_plane);
321
322 /**
323 * drm_gem_reset_shadow_plane - resets a shadow-buffered plane
324 * @plane: the plane
325 *
326 * This function implements struct &drm_plane_funcs.reset_plane for
327 * shadow-buffered planes. It assumes the current plane state to be
328 * of type struct drm_shadow_plane and it allocates the new state of
329 * this type.
330 */
drm_gem_reset_shadow_plane(struct drm_plane * plane)331 void drm_gem_reset_shadow_plane(struct drm_plane *plane)
332 {
333 struct drm_shadow_plane_state *shadow_plane_state;
334
335 if (plane->state) {
336 drm_gem_destroy_shadow_plane_state(plane, plane->state);
337 plane->state = NULL; /* must be set to NULL here */
338 }
339
340 shadow_plane_state = kzalloc(sizeof(*shadow_plane_state), GFP_KERNEL);
341 if (!shadow_plane_state)
342 return;
343 __drm_gem_reset_shadow_plane(plane, shadow_plane_state);
344 }
345 EXPORT_SYMBOL(drm_gem_reset_shadow_plane);
346
347 /**
348 * drm_gem_begin_shadow_fb_access - prepares shadow framebuffers for CPU access
349 * @plane: the plane
350 * @plane_state: the plane state of type struct drm_shadow_plane_state
351 *
352 * This function implements struct &drm_plane_helper_funcs.begin_fb_access. It
353 * maps all buffer objects of the plane's framebuffer into kernel address
354 * space and stores them in struct &drm_shadow_plane_state.map. The first data
355 * bytes are available in struct &drm_shadow_plane_state.data.
356 *
357 * See drm_gem_end_shadow_fb_access() for cleanup.
358 *
359 * Returns:
360 * 0 on success, or a negative errno code otherwise.
361 */
drm_gem_begin_shadow_fb_access(struct drm_plane * plane,struct drm_plane_state * plane_state)362 int drm_gem_begin_shadow_fb_access(struct drm_plane *plane, struct drm_plane_state *plane_state)
363 {
364 struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
365 struct drm_framebuffer *fb = plane_state->fb;
366
367 if (!fb)
368 return 0;
369
370 return drm_gem_fb_vmap(fb, shadow_plane_state->map, shadow_plane_state->data);
371 }
372 EXPORT_SYMBOL(drm_gem_begin_shadow_fb_access);
373
374 /**
375 * drm_gem_end_shadow_fb_access - releases shadow framebuffers from CPU access
376 * @plane: the plane
377 * @plane_state: the plane state of type struct drm_shadow_plane_state
378 *
379 * This function implements struct &drm_plane_helper_funcs.end_fb_access. It
380 * undoes all effects of drm_gem_begin_shadow_fb_access() in reverse order.
381 *
382 * See drm_gem_begin_shadow_fb_access() for more information.
383 */
drm_gem_end_shadow_fb_access(struct drm_plane * plane,struct drm_plane_state * plane_state)384 void drm_gem_end_shadow_fb_access(struct drm_plane *plane, struct drm_plane_state *plane_state)
385 {
386 struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
387 struct drm_framebuffer *fb = plane_state->fb;
388
389 if (!fb)
390 return;
391
392 drm_gem_fb_vunmap(fb, shadow_plane_state->map);
393 }
394 EXPORT_SYMBOL(drm_gem_end_shadow_fb_access);
395
396 /**
397 * drm_gem_simple_kms_begin_shadow_fb_access - prepares shadow framebuffers for CPU access
398 * @pipe: the simple display pipe
399 * @plane_state: the plane state of type struct drm_shadow_plane_state
400 *
401 * This function implements struct drm_simple_display_funcs.begin_fb_access.
402 *
403 * See drm_gem_begin_shadow_fb_access() for details and
404 * drm_gem_simple_kms_cleanup_shadow_fb() for cleanup.
405 *
406 * Returns:
407 * 0 on success, or a negative errno code otherwise.
408 */
drm_gem_simple_kms_begin_shadow_fb_access(struct drm_simple_display_pipe * pipe,struct drm_plane_state * plane_state)409 int drm_gem_simple_kms_begin_shadow_fb_access(struct drm_simple_display_pipe *pipe,
410 struct drm_plane_state *plane_state)
411 {
412 return drm_gem_begin_shadow_fb_access(&pipe->plane, plane_state);
413 }
414 EXPORT_SYMBOL(drm_gem_simple_kms_begin_shadow_fb_access);
415
416 /**
417 * drm_gem_simple_kms_end_shadow_fb_access - releases shadow framebuffers from CPU access
418 * @pipe: the simple display pipe
419 * @plane_state: the plane state of type struct drm_shadow_plane_state
420 *
421 * This function implements struct drm_simple_display_funcs.end_fb_access.
422 * It undoes all effects of drm_gem_simple_kms_begin_shadow_fb_access() in
423 * reverse order.
424 *
425 * See drm_gem_simple_kms_begin_shadow_fb_access().
426 */
drm_gem_simple_kms_end_shadow_fb_access(struct drm_simple_display_pipe * pipe,struct drm_plane_state * plane_state)427 void drm_gem_simple_kms_end_shadow_fb_access(struct drm_simple_display_pipe *pipe,
428 struct drm_plane_state *plane_state)
429 {
430 drm_gem_end_shadow_fb_access(&pipe->plane, plane_state);
431 }
432 EXPORT_SYMBOL(drm_gem_simple_kms_end_shadow_fb_access);
433
434 /**
435 * drm_gem_simple_kms_reset_shadow_plane - resets a shadow-buffered plane
436 * @pipe: the simple display pipe
437 *
438 * This function implements struct drm_simple_display_funcs.reset_plane
439 * for shadow-buffered planes.
440 */
drm_gem_simple_kms_reset_shadow_plane(struct drm_simple_display_pipe * pipe)441 void drm_gem_simple_kms_reset_shadow_plane(struct drm_simple_display_pipe *pipe)
442 {
443 drm_gem_reset_shadow_plane(&pipe->plane);
444 }
445 EXPORT_SYMBOL(drm_gem_simple_kms_reset_shadow_plane);
446
447 /**
448 * drm_gem_simple_kms_duplicate_shadow_plane_state - duplicates shadow-buffered plane state
449 * @pipe: the simple display pipe
450 *
451 * This function implements struct drm_simple_display_funcs.duplicate_plane_state
452 * for shadow-buffered planes. It does not duplicate existing mappings of the shadow
453 * buffers. Mappings are maintained during the atomic commit by the plane's prepare_fb
454 * and cleanup_fb helpers.
455 *
456 * Returns:
457 * A pointer to a new plane state on success, or NULL otherwise.
458 */
459 struct drm_plane_state *
drm_gem_simple_kms_duplicate_shadow_plane_state(struct drm_simple_display_pipe * pipe)460 drm_gem_simple_kms_duplicate_shadow_plane_state(struct drm_simple_display_pipe *pipe)
461 {
462 return drm_gem_duplicate_shadow_plane_state(&pipe->plane);
463 }
464 EXPORT_SYMBOL(drm_gem_simple_kms_duplicate_shadow_plane_state);
465
466 /**
467 * drm_gem_simple_kms_destroy_shadow_plane_state - resets shadow-buffered plane state
468 * @pipe: the simple display pipe
469 * @plane_state: the plane state of type struct drm_shadow_plane_state
470 *
471 * This function implements struct drm_simple_display_funcs.destroy_plane_state
472 * for shadow-buffered planes. It expects that mappings of shadow buffers
473 * have been released already.
474 */
drm_gem_simple_kms_destroy_shadow_plane_state(struct drm_simple_display_pipe * pipe,struct drm_plane_state * plane_state)475 void drm_gem_simple_kms_destroy_shadow_plane_state(struct drm_simple_display_pipe *pipe,
476 struct drm_plane_state *plane_state)
477 {
478 drm_gem_destroy_shadow_plane_state(&pipe->plane, plane_state);
479 }
480 EXPORT_SYMBOL(drm_gem_simple_kms_destroy_shadow_plane_state);
481