xref: /linux/drivers/gpu/drm/drm_gem_atomic_helper.c (revision 69050f8d6d075dc01af7a5f2f550a8067510366f)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 
3 #include <linux/dma-resv.h>
4 #include <linux/dma-fence-chain.h>
5 #include <linux/export.h>
6 
7 #include <drm/drm_atomic_state_helper.h>
8 #include <drm/drm_atomic_uapi.h>
9 #include <drm/drm_framebuffer.h>
10 #include <drm/drm_gem.h>
11 #include <drm/drm_gem_atomic_helper.h>
12 #include <drm/drm_gem_framebuffer_helper.h>
13 #include <drm/drm_simple_kms_helper.h>
14 
15 #include "drm_internal.h"
16 
17 /**
18  * DOC: overview
19  *
20  * The GEM atomic helpers library implements generic atomic-commit
21  * functions for drivers that use GEM objects. Currently, it provides
22  * synchronization helpers, and plane state and framebuffer BO mappings
23  * for planes with shadow buffers.
24  *
25  * Before scanout, a plane's framebuffer needs to be synchronized with
26  * possible writers that draw into the framebuffer. All drivers should
27  * call drm_gem_plane_helper_prepare_fb() from their implementation of
28  * struct &drm_plane_helper.prepare_fb . It sets the plane's fence from
29  * the framebuffer so that the DRM core can synchronize access automatically.
30  * drm_gem_plane_helper_prepare_fb() can also be used directly as
31  * implementation of prepare_fb.
32  *
33  * .. code-block:: c
34  *
35  *	#include <drm/drm_gem_atomic_helper.h>
36  *
37  *	struct drm_plane_helper_funcs driver_plane_helper_funcs = {
38  *		...,
39  *		. prepare_fb = drm_gem_plane_helper_prepare_fb,
40  *	};
41  *
42  * A driver using a shadow buffer copies the content of the shadow buffers
43  * into the HW's framebuffer memory during an atomic update. This requires
44  * a mapping of the shadow buffer into kernel address space. The mappings
45  * cannot be established by commit-tail functions, such as atomic_update,
46  * as this would violate locking rules around dma_buf_vmap().
47  *
48  * The helpers for shadow-buffered planes establish and release mappings,
49  * and provide struct drm_shadow_plane_state, which stores the plane's mapping
50  * for commit-tail functions.
51  *
52  * Shadow-buffered planes can easily be enabled by using the provided macros
53  * %DRM_GEM_SHADOW_PLANE_FUNCS and %DRM_GEM_SHADOW_PLANE_HELPER_FUNCS.
54  * These macros set up the plane and plane-helper callbacks to point to the
55  * shadow-buffer helpers.
56  *
57  * .. code-block:: c
58  *
59  *	#include <drm/drm_gem_atomic_helper.h>
60  *
61  *	struct drm_plane_funcs driver_plane_funcs = {
62  *		...,
63  *		DRM_GEM_SHADOW_PLANE_FUNCS,
64  *	};
65  *
66  *	struct drm_plane_helper_funcs driver_plane_helper_funcs = {
67  *		...,
68  *		DRM_GEM_SHADOW_PLANE_HELPER_FUNCS,
69  *	};
70  *
71  * In the driver's atomic-update function, shadow-buffer mappings are available
72  * from the plane state. Use to_drm_shadow_plane_state() to upcast from
73  * struct drm_plane_state.
74  *
75  * .. code-block:: c
76  *
77  *	void driver_plane_atomic_update(struct drm_plane *plane,
78  *					struct drm_plane_state *old_plane_state)
79  *	{
80  *		struct drm_plane_state *plane_state = plane->state;
81  *		struct drm_shadow_plane_state *shadow_plane_state =
82  *			to_drm_shadow_plane_state(plane_state);
83  *
84  *		// access shadow buffer via shadow_plane_state->map
85  *	}
86  *
87  * A mapping address for each of the framebuffer's buffer object is stored in
88  * struct &drm_shadow_plane_state.map. The mappings are valid while the state
89  * is being used.
90  *
91  * Drivers that use struct drm_simple_display_pipe can use
92  * %DRM_GEM_SIMPLE_DISPLAY_PIPE_SHADOW_PLANE_FUNCS to initialize the rsp
93  * callbacks. Access to shadow-buffer mappings is similar to regular
94  * atomic_update.
95  *
96  * .. code-block:: c
97  *
98  *	struct drm_simple_display_pipe_funcs driver_pipe_funcs = {
99  *		...,
100  *		DRM_GEM_SIMPLE_DISPLAY_PIPE_SHADOW_PLANE_FUNCS,
101  *	};
102  *
103  *	void driver_pipe_enable(struct drm_simple_display_pipe *pipe,
104  *				struct drm_crtc_state *crtc_state,
105  *				struct drm_plane_state *plane_state)
106  *	{
107  *		struct drm_shadow_plane_state *shadow_plane_state =
108  *			to_drm_shadow_plane_state(plane_state);
109  *
110  *		// access shadow buffer via shadow_plane_state->map
111  *	}
112  */
113 
114 /*
115  * Plane Helpers
116  */
117 
118 /**
119  * drm_gem_plane_helper_prepare_fb() - Prepare a GEM backed framebuffer
120  * @plane: Plane
121  * @state: Plane state the fence will be attached to
122  *
123  * This function extracts the exclusive fence from &drm_gem_object.resv and
124  * attaches it to plane state for the atomic helper to wait on. This is
125  * necessary to correctly implement implicit synchronization for any buffers
126  * shared as a struct &dma_buf. This function can be used as the
127  * &drm_plane_helper_funcs.prepare_fb callback.
128  *
129  * There is no need for &drm_plane_helper_funcs.cleanup_fb hook for simple
130  * GEM based framebuffer drivers which have their buffers always pinned in
131  * memory.
132  *
133  * This function is the default implementation for GEM drivers of
134  * &drm_plane_helper_funcs.prepare_fb if no callback is provided.
135  */
136 int drm_gem_plane_helper_prepare_fb(struct drm_plane *plane,
137 				    struct drm_plane_state *state)
138 {
139 	struct dma_fence *fence = dma_fence_get(state->fence);
140 	enum dma_resv_usage usage;
141 	size_t i;
142 	int ret;
143 
144 	if (!state->fb)
145 		return 0;
146 
147 	/*
148 	 * Only add the kernel fences here if there is already a fence set via
149 	 * explicit fencing interfaces on the atomic ioctl.
150 	 *
151 	 * This way explicit fencing can be used to overrule implicit fencing,
152 	 * which is important to make explicit fencing use-cases work: One
153 	 * example is using one buffer for 2 screens with different refresh
154 	 * rates. Implicit fencing will clamp rendering to the refresh rate of
155 	 * the slower screen, whereas explicit fence allows 2 independent
156 	 * render and display loops on a single buffer. If a driver allows
157 	 * obeys both implicit and explicit fences for plane updates, then it
158 	 * will break all the benefits of explicit fencing.
159 	 */
160 	usage = fence ? DMA_RESV_USAGE_KERNEL : DMA_RESV_USAGE_WRITE;
161 
162 	for (i = 0; i < state->fb->format->num_planes; ++i) {
163 		struct drm_gem_object *obj = drm_gem_fb_get_obj(state->fb, i);
164 		struct dma_fence *new;
165 
166 		if (!obj) {
167 			ret = -EINVAL;
168 			goto error;
169 		}
170 
171 		ret = dma_resv_get_singleton(obj->resv, usage, &new);
172 		if (ret)
173 			goto error;
174 
175 		if (new && fence) {
176 			struct dma_fence_chain *chain = dma_fence_chain_alloc();
177 
178 			if (!chain) {
179 				ret = -ENOMEM;
180 				goto error;
181 			}
182 
183 			dma_fence_chain_init(chain, fence, new, 1);
184 			fence = &chain->base;
185 
186 		} else if (new) {
187 			fence = new;
188 		}
189 	}
190 
191 	dma_fence_put(state->fence);
192 	state->fence = fence;
193 	return 0;
194 
195 error:
196 	dma_fence_put(fence);
197 	return ret;
198 }
199 EXPORT_SYMBOL_GPL(drm_gem_plane_helper_prepare_fb);
200 
201 /*
202  * Shadow-buffered Planes
203  */
204 
205 /**
206  * __drm_gem_duplicate_shadow_plane_state - duplicates shadow-buffered plane state
207  * @plane: the plane
208  * @new_shadow_plane_state: the new shadow-buffered plane state
209  *
210  * This function duplicates shadow-buffered plane state. This is helpful for drivers
211  * that subclass struct drm_shadow_plane_state.
212  *
213  * The function does not duplicate existing mappings of the shadow buffers.
214  * Mappings are maintained during the atomic commit by the plane's prepare_fb
215  * and cleanup_fb helpers. See drm_gem_prepare_shadow_fb() and drm_gem_cleanup_shadow_fb()
216  * for corresponding helpers.
217  */
218 void
219 __drm_gem_duplicate_shadow_plane_state(struct drm_plane *plane,
220 				       struct drm_shadow_plane_state *new_shadow_plane_state)
221 {
222 	struct drm_plane_state *plane_state = plane->state;
223 	struct drm_shadow_plane_state *shadow_plane_state =
224 		to_drm_shadow_plane_state(plane_state);
225 
226 	__drm_atomic_helper_plane_duplicate_state(plane, &new_shadow_plane_state->base);
227 
228 	drm_format_conv_state_copy(&new_shadow_plane_state->fmtcnv_state,
229 				   &shadow_plane_state->fmtcnv_state);
230 }
231 EXPORT_SYMBOL(__drm_gem_duplicate_shadow_plane_state);
232 
233 /**
234  * drm_gem_duplicate_shadow_plane_state - duplicates shadow-buffered plane state
235  * @plane: the plane
236  *
237  * This function implements struct &drm_plane_funcs.atomic_duplicate_state for
238  * shadow-buffered planes. It assumes the existing state to be of type
239  * struct drm_shadow_plane_state and it allocates the new state to be of this
240  * type.
241  *
242  * The function does not duplicate existing mappings of the shadow buffers.
243  * Mappings are maintained during the atomic commit by the plane's prepare_fb
244  * and cleanup_fb helpers. See drm_gem_prepare_shadow_fb() and drm_gem_cleanup_shadow_fb()
245  * for corresponding helpers.
246  *
247  * Returns:
248  * A pointer to a new plane state on success, or NULL otherwise.
249  */
250 struct drm_plane_state *
251 drm_gem_duplicate_shadow_plane_state(struct drm_plane *plane)
252 {
253 	struct drm_plane_state *plane_state = plane->state;
254 	struct drm_shadow_plane_state *new_shadow_plane_state;
255 
256 	if (!plane_state)
257 		return NULL;
258 
259 	new_shadow_plane_state = kzalloc_obj(*new_shadow_plane_state,
260 					     GFP_KERNEL);
261 	if (!new_shadow_plane_state)
262 		return NULL;
263 	__drm_gem_duplicate_shadow_plane_state(plane, new_shadow_plane_state);
264 
265 	return &new_shadow_plane_state->base;
266 }
267 EXPORT_SYMBOL(drm_gem_duplicate_shadow_plane_state);
268 
269 /**
270  * __drm_gem_destroy_shadow_plane_state - cleans up shadow-buffered plane state
271  * @shadow_plane_state: the shadow-buffered plane state
272  *
273  * This function cleans up shadow-buffered plane state. Helpful for drivers that
274  * subclass struct drm_shadow_plane_state.
275  */
276 void __drm_gem_destroy_shadow_plane_state(struct drm_shadow_plane_state *shadow_plane_state)
277 {
278 	drm_format_conv_state_release(&shadow_plane_state->fmtcnv_state);
279 	__drm_atomic_helper_plane_destroy_state(&shadow_plane_state->base);
280 }
281 EXPORT_SYMBOL(__drm_gem_destroy_shadow_plane_state);
282 
283 /**
284  * drm_gem_destroy_shadow_plane_state - deletes shadow-buffered plane state
285  * @plane: the plane
286  * @plane_state: the plane state of type struct drm_shadow_plane_state
287  *
288  * This function implements struct &drm_plane_funcs.atomic_destroy_state
289  * for shadow-buffered planes. It expects that mappings of shadow buffers
290  * have been released already.
291  */
292 void drm_gem_destroy_shadow_plane_state(struct drm_plane *plane,
293 					struct drm_plane_state *plane_state)
294 {
295 	struct drm_shadow_plane_state *shadow_plane_state =
296 		to_drm_shadow_plane_state(plane_state);
297 
298 	__drm_gem_destroy_shadow_plane_state(shadow_plane_state);
299 	kfree(shadow_plane_state);
300 }
301 EXPORT_SYMBOL(drm_gem_destroy_shadow_plane_state);
302 
303 /**
304  * __drm_gem_reset_shadow_plane - resets a shadow-buffered plane
305  * @plane: the plane
306  * @shadow_plane_state: the shadow-buffered plane state
307  *
308  * This function resets state for shadow-buffered planes. Helpful
309  * for drivers that subclass struct drm_shadow_plane_state.
310  */
311 void __drm_gem_reset_shadow_plane(struct drm_plane *plane,
312 				  struct drm_shadow_plane_state *shadow_plane_state)
313 {
314 	if (shadow_plane_state) {
315 		__drm_atomic_helper_plane_reset(plane, &shadow_plane_state->base);
316 		drm_format_conv_state_init(&shadow_plane_state->fmtcnv_state);
317 	} else {
318 		__drm_atomic_helper_plane_reset(plane, NULL);
319 	}
320 }
321 EXPORT_SYMBOL(__drm_gem_reset_shadow_plane);
322 
323 /**
324  * drm_gem_reset_shadow_plane - resets a shadow-buffered plane
325  * @plane: the plane
326  *
327  * This function implements struct &drm_plane_funcs.reset_plane for
328  * shadow-buffered planes. It assumes the current plane state to be
329  * of type struct drm_shadow_plane and it allocates the new state of
330  * this type.
331  */
332 void drm_gem_reset_shadow_plane(struct drm_plane *plane)
333 {
334 	struct drm_shadow_plane_state *shadow_plane_state;
335 
336 	if (plane->state) {
337 		drm_gem_destroy_shadow_plane_state(plane, plane->state);
338 		plane->state = NULL; /* must be set to NULL here */
339 	}
340 
341 	shadow_plane_state = kzalloc_obj(*shadow_plane_state, GFP_KERNEL);
342 	__drm_gem_reset_shadow_plane(plane, shadow_plane_state);
343 }
344 EXPORT_SYMBOL(drm_gem_reset_shadow_plane);
345 
346 /**
347  * drm_gem_begin_shadow_fb_access - prepares shadow framebuffers for CPU access
348  * @plane: the plane
349  * @plane_state: the plane state of type struct drm_shadow_plane_state
350  *
351  * This function implements struct &drm_plane_helper_funcs.begin_fb_access. It
352  * maps all buffer objects of the plane's framebuffer into kernel address
353  * space and stores them in struct &drm_shadow_plane_state.map. The first data
354  * bytes are available in struct &drm_shadow_plane_state.data.
355  *
356  * See drm_gem_end_shadow_fb_access() for cleanup.
357  *
358  * Returns:
359  * 0 on success, or a negative errno code otherwise.
360  */
361 int drm_gem_begin_shadow_fb_access(struct drm_plane *plane, struct drm_plane_state *plane_state)
362 {
363 	struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
364 	struct drm_framebuffer *fb = plane_state->fb;
365 
366 	if (!fb)
367 		return 0;
368 
369 	return drm_gem_fb_vmap(fb, shadow_plane_state->map, shadow_plane_state->data);
370 }
371 EXPORT_SYMBOL(drm_gem_begin_shadow_fb_access);
372 
373 /**
374  * drm_gem_end_shadow_fb_access - releases shadow framebuffers from CPU access
375  * @plane: the plane
376  * @plane_state: the plane state of type struct drm_shadow_plane_state
377  *
378  * This function implements struct &drm_plane_helper_funcs.end_fb_access. It
379  * undoes all effects of drm_gem_begin_shadow_fb_access() in reverse order.
380  *
381  * See drm_gem_begin_shadow_fb_access() for more information.
382  */
383 void drm_gem_end_shadow_fb_access(struct drm_plane *plane, struct drm_plane_state *plane_state)
384 {
385 	struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
386 	struct drm_framebuffer *fb = plane_state->fb;
387 
388 	if (!fb)
389 		return;
390 
391 	drm_gem_fb_vunmap(fb, shadow_plane_state->map);
392 }
393 EXPORT_SYMBOL(drm_gem_end_shadow_fb_access);
394 
395 /**
396  * drm_gem_simple_kms_begin_shadow_fb_access - prepares shadow framebuffers for CPU access
397  * @pipe: the simple display pipe
398  * @plane_state: the plane state of type struct drm_shadow_plane_state
399  *
400  * This function implements struct drm_simple_display_funcs.begin_fb_access.
401  *
402  * See drm_gem_begin_shadow_fb_access() for details and
403  * drm_gem_simple_kms_cleanup_shadow_fb() for cleanup.
404  *
405  * Returns:
406  * 0 on success, or a negative errno code otherwise.
407  */
408 int drm_gem_simple_kms_begin_shadow_fb_access(struct drm_simple_display_pipe *pipe,
409 					      struct drm_plane_state *plane_state)
410 {
411 	return drm_gem_begin_shadow_fb_access(&pipe->plane, plane_state);
412 }
413 EXPORT_SYMBOL(drm_gem_simple_kms_begin_shadow_fb_access);
414 
415 /**
416  * drm_gem_simple_kms_end_shadow_fb_access - releases shadow framebuffers from CPU access
417  * @pipe: the simple display pipe
418  * @plane_state: the plane state of type struct drm_shadow_plane_state
419  *
420  * This function implements struct drm_simple_display_funcs.end_fb_access.
421  * It undoes all effects of drm_gem_simple_kms_begin_shadow_fb_access() in
422  * reverse order.
423  *
424  * See drm_gem_simple_kms_begin_shadow_fb_access().
425  */
426 void drm_gem_simple_kms_end_shadow_fb_access(struct drm_simple_display_pipe *pipe,
427 					     struct drm_plane_state *plane_state)
428 {
429 	drm_gem_end_shadow_fb_access(&pipe->plane, plane_state);
430 }
431 EXPORT_SYMBOL(drm_gem_simple_kms_end_shadow_fb_access);
432 
433 /**
434  * drm_gem_simple_kms_reset_shadow_plane - resets a shadow-buffered plane
435  * @pipe: the simple display pipe
436  *
437  * This function implements struct drm_simple_display_funcs.reset_plane
438  * for shadow-buffered planes.
439  */
440 void drm_gem_simple_kms_reset_shadow_plane(struct drm_simple_display_pipe *pipe)
441 {
442 	drm_gem_reset_shadow_plane(&pipe->plane);
443 }
444 EXPORT_SYMBOL(drm_gem_simple_kms_reset_shadow_plane);
445 
446 /**
447  * drm_gem_simple_kms_duplicate_shadow_plane_state - duplicates shadow-buffered plane state
448  * @pipe: the simple display pipe
449  *
450  * This function implements struct drm_simple_display_funcs.duplicate_plane_state
451  * for shadow-buffered planes. It does not duplicate existing mappings of the shadow
452  * buffers. Mappings are maintained during the atomic commit by the plane's prepare_fb
453  * and cleanup_fb helpers.
454  *
455  * Returns:
456  * A pointer to a new plane state on success, or NULL otherwise.
457  */
458 struct drm_plane_state *
459 drm_gem_simple_kms_duplicate_shadow_plane_state(struct drm_simple_display_pipe *pipe)
460 {
461 	return drm_gem_duplicate_shadow_plane_state(&pipe->plane);
462 }
463 EXPORT_SYMBOL(drm_gem_simple_kms_duplicate_shadow_plane_state);
464 
465 /**
466  * drm_gem_simple_kms_destroy_shadow_plane_state - resets shadow-buffered plane state
467  * @pipe: the simple display pipe
468  * @plane_state: the plane state of type struct drm_shadow_plane_state
469  *
470  * This function implements struct drm_simple_display_funcs.destroy_plane_state
471  * for shadow-buffered planes. It expects that mappings of shadow buffers
472  * have been released already.
473  */
474 void drm_gem_simple_kms_destroy_shadow_plane_state(struct drm_simple_display_pipe *pipe,
475 						   struct drm_plane_state *plane_state)
476 {
477 	drm_gem_destroy_shadow_plane_state(&pipe->plane, plane_state);
478 }
479 EXPORT_SYMBOL(drm_gem_simple_kms_destroy_shadow_plane_state);
480