1 // SPDX-License-Identifier: GPL-2.0-or-later 2 3 #include <linux/dma-resv.h> 4 #include <linux/dma-fence-chain.h> 5 6 #include <drm/drm_atomic_state_helper.h> 7 #include <drm/drm_atomic_uapi.h> 8 #include <drm/drm_framebuffer.h> 9 #include <drm/drm_gem.h> 10 #include <drm/drm_gem_atomic_helper.h> 11 #include <drm/drm_gem_framebuffer_helper.h> 12 #include <drm/drm_simple_kms_helper.h> 13 14 #include "drm_internal.h" 15 16 /** 17 * DOC: overview 18 * 19 * The GEM atomic helpers library implements generic atomic-commit 20 * functions for drivers that use GEM objects. Currently, it provides 21 * synchronization helpers, and plane state and framebuffer BO mappings 22 * for planes with shadow buffers. 23 * 24 * Before scanout, a plane's framebuffer needs to be synchronized with 25 * possible writers that draw into the framebuffer. All drivers should 26 * call drm_gem_plane_helper_prepare_fb() from their implementation of 27 * struct &drm_plane_helper.prepare_fb . It sets the plane's fence from 28 * the framebuffer so that the DRM core can synchronize access automatically. 29 * drm_gem_plane_helper_prepare_fb() can also be used directly as 30 * implementation of prepare_fb. 31 * 32 * .. code-block:: c 33 * 34 * #include <drm/drm_gem_atomic_helper.h> 35 * 36 * struct drm_plane_helper_funcs driver_plane_helper_funcs = { 37 * ..., 38 * . prepare_fb = drm_gem_plane_helper_prepare_fb, 39 * }; 40 * 41 * A driver using a shadow buffer copies the content of the shadow buffers 42 * into the HW's framebuffer memory during an atomic update. This requires 43 * a mapping of the shadow buffer into kernel address space. The mappings 44 * cannot be established by commit-tail functions, such as atomic_update, 45 * as this would violate locking rules around dma_buf_vmap(). 46 * 47 * The helpers for shadow-buffered planes establish and release mappings, 48 * and provide struct drm_shadow_plane_state, which stores the plane's mapping 49 * for commit-tail functions. 50 * 51 * Shadow-buffered planes can easily be enabled by using the provided macros 52 * %DRM_GEM_SHADOW_PLANE_FUNCS and %DRM_GEM_SHADOW_PLANE_HELPER_FUNCS. 53 * These macros set up the plane and plane-helper callbacks to point to the 54 * shadow-buffer helpers. 55 * 56 * .. code-block:: c 57 * 58 * #include <drm/drm_gem_atomic_helper.h> 59 * 60 * struct drm_plane_funcs driver_plane_funcs = { 61 * ..., 62 * DRM_GEM_SHADOW_PLANE_FUNCS, 63 * }; 64 * 65 * struct drm_plane_helper_funcs driver_plane_helper_funcs = { 66 * ..., 67 * DRM_GEM_SHADOW_PLANE_HELPER_FUNCS, 68 * }; 69 * 70 * In the driver's atomic-update function, shadow-buffer mappings are available 71 * from the plane state. Use to_drm_shadow_plane_state() to upcast from 72 * struct drm_plane_state. 73 * 74 * .. code-block:: c 75 * 76 * void driver_plane_atomic_update(struct drm_plane *plane, 77 * struct drm_plane_state *old_plane_state) 78 * { 79 * struct drm_plane_state *plane_state = plane->state; 80 * struct drm_shadow_plane_state *shadow_plane_state = 81 * to_drm_shadow_plane_state(plane_state); 82 * 83 * // access shadow buffer via shadow_plane_state->map 84 * } 85 * 86 * A mapping address for each of the framebuffer's buffer object is stored in 87 * struct &drm_shadow_plane_state.map. The mappings are valid while the state 88 * is being used. 89 * 90 * Drivers that use struct drm_simple_display_pipe can use 91 * %DRM_GEM_SIMPLE_DISPLAY_PIPE_SHADOW_PLANE_FUNCS to initialize the rsp 92 * callbacks. Access to shadow-buffer mappings is similar to regular 93 * atomic_update. 94 * 95 * .. code-block:: c 96 * 97 * struct drm_simple_display_pipe_funcs driver_pipe_funcs = { 98 * ..., 99 * DRM_GEM_SIMPLE_DISPLAY_PIPE_SHADOW_PLANE_FUNCS, 100 * }; 101 * 102 * void driver_pipe_enable(struct drm_simple_display_pipe *pipe, 103 * struct drm_crtc_state *crtc_state, 104 * struct drm_plane_state *plane_state) 105 * { 106 * struct drm_shadow_plane_state *shadow_plane_state = 107 * to_drm_shadow_plane_state(plane_state); 108 * 109 * // access shadow buffer via shadow_plane_state->map 110 * } 111 */ 112 113 /* 114 * Plane Helpers 115 */ 116 117 /** 118 * drm_gem_plane_helper_prepare_fb() - Prepare a GEM backed framebuffer 119 * @plane: Plane 120 * @state: Plane state the fence will be attached to 121 * 122 * This function extracts the exclusive fence from &drm_gem_object.resv and 123 * attaches it to plane state for the atomic helper to wait on. This is 124 * necessary to correctly implement implicit synchronization for any buffers 125 * shared as a struct &dma_buf. This function can be used as the 126 * &drm_plane_helper_funcs.prepare_fb callback. 127 * 128 * There is no need for &drm_plane_helper_funcs.cleanup_fb hook for simple 129 * GEM based framebuffer drivers which have their buffers always pinned in 130 * memory. 131 * 132 * This function is the default implementation for GEM drivers of 133 * &drm_plane_helper_funcs.prepare_fb if no callback is provided. 134 */ 135 int drm_gem_plane_helper_prepare_fb(struct drm_plane *plane, 136 struct drm_plane_state *state) 137 { 138 struct dma_fence *fence = dma_fence_get(state->fence); 139 enum dma_resv_usage usage; 140 size_t i; 141 int ret; 142 143 if (!state->fb) 144 return 0; 145 146 /* 147 * Only add the kernel fences here if there is already a fence set via 148 * explicit fencing interfaces on the atomic ioctl. 149 * 150 * This way explicit fencing can be used to overrule implicit fencing, 151 * which is important to make explicit fencing use-cases work: One 152 * example is using one buffer for 2 screens with different refresh 153 * rates. Implicit fencing will clamp rendering to the refresh rate of 154 * the slower screen, whereas explicit fence allows 2 independent 155 * render and display loops on a single buffer. If a driver allows 156 * obeys both implicit and explicit fences for plane updates, then it 157 * will break all the benefits of explicit fencing. 158 */ 159 usage = fence ? DMA_RESV_USAGE_KERNEL : DMA_RESV_USAGE_WRITE; 160 161 for (i = 0; i < state->fb->format->num_planes; ++i) { 162 struct drm_gem_object *obj = drm_gem_fb_get_obj(state->fb, i); 163 struct dma_fence *new; 164 165 if (!obj) { 166 ret = -EINVAL; 167 goto error; 168 } 169 170 ret = dma_resv_get_singleton(obj->resv, usage, &new); 171 if (ret) 172 goto error; 173 174 if (new && fence) { 175 struct dma_fence_chain *chain = dma_fence_chain_alloc(); 176 177 if (!chain) { 178 ret = -ENOMEM; 179 goto error; 180 } 181 182 dma_fence_chain_init(chain, fence, new, 1); 183 fence = &chain->base; 184 185 } else if (new) { 186 fence = new; 187 } 188 } 189 190 dma_fence_put(state->fence); 191 state->fence = fence; 192 return 0; 193 194 error: 195 dma_fence_put(fence); 196 return ret; 197 } 198 EXPORT_SYMBOL_GPL(drm_gem_plane_helper_prepare_fb); 199 200 /* 201 * Shadow-buffered Planes 202 */ 203 204 /** 205 * __drm_gem_duplicate_shadow_plane_state - duplicates shadow-buffered plane state 206 * @plane: the plane 207 * @new_shadow_plane_state: the new shadow-buffered plane state 208 * 209 * This function duplicates shadow-buffered plane state. This is helpful for drivers 210 * that subclass struct drm_shadow_plane_state. 211 * 212 * The function does not duplicate existing mappings of the shadow buffers. 213 * Mappings are maintained during the atomic commit by the plane's prepare_fb 214 * and cleanup_fb helpers. See drm_gem_prepare_shadow_fb() and drm_gem_cleanup_shadow_fb() 215 * for corresponding helpers. 216 */ 217 void 218 __drm_gem_duplicate_shadow_plane_state(struct drm_plane *plane, 219 struct drm_shadow_plane_state *new_shadow_plane_state) 220 { 221 struct drm_plane_state *plane_state = plane->state; 222 struct drm_shadow_plane_state *shadow_plane_state = 223 to_drm_shadow_plane_state(plane_state); 224 225 __drm_atomic_helper_plane_duplicate_state(plane, &new_shadow_plane_state->base); 226 227 drm_format_conv_state_copy(&shadow_plane_state->fmtcnv_state, 228 &new_shadow_plane_state->fmtcnv_state); 229 } 230 EXPORT_SYMBOL(__drm_gem_duplicate_shadow_plane_state); 231 232 /** 233 * drm_gem_duplicate_shadow_plane_state - duplicates shadow-buffered plane state 234 * @plane: the plane 235 * 236 * This function implements struct &drm_plane_funcs.atomic_duplicate_state for 237 * shadow-buffered planes. It assumes the existing state to be of type 238 * struct drm_shadow_plane_state and it allocates the new state to be of this 239 * type. 240 * 241 * The function does not duplicate existing mappings of the shadow buffers. 242 * Mappings are maintained during the atomic commit by the plane's prepare_fb 243 * and cleanup_fb helpers. See drm_gem_prepare_shadow_fb() and drm_gem_cleanup_shadow_fb() 244 * for corresponding helpers. 245 * 246 * Returns: 247 * A pointer to a new plane state on success, or NULL otherwise. 248 */ 249 struct drm_plane_state * 250 drm_gem_duplicate_shadow_plane_state(struct drm_plane *plane) 251 { 252 struct drm_plane_state *plane_state = plane->state; 253 struct drm_shadow_plane_state *new_shadow_plane_state; 254 255 if (!plane_state) 256 return NULL; 257 258 new_shadow_plane_state = kzalloc(sizeof(*new_shadow_plane_state), GFP_KERNEL); 259 if (!new_shadow_plane_state) 260 return NULL; 261 __drm_gem_duplicate_shadow_plane_state(plane, new_shadow_plane_state); 262 263 return &new_shadow_plane_state->base; 264 } 265 EXPORT_SYMBOL(drm_gem_duplicate_shadow_plane_state); 266 267 /** 268 * __drm_gem_destroy_shadow_plane_state - cleans up shadow-buffered plane state 269 * @shadow_plane_state: the shadow-buffered plane state 270 * 271 * This function cleans up shadow-buffered plane state. Helpful for drivers that 272 * subclass struct drm_shadow_plane_state. 273 */ 274 void __drm_gem_destroy_shadow_plane_state(struct drm_shadow_plane_state *shadow_plane_state) 275 { 276 drm_format_conv_state_release(&shadow_plane_state->fmtcnv_state); 277 __drm_atomic_helper_plane_destroy_state(&shadow_plane_state->base); 278 } 279 EXPORT_SYMBOL(__drm_gem_destroy_shadow_plane_state); 280 281 /** 282 * drm_gem_destroy_shadow_plane_state - deletes shadow-buffered plane state 283 * @plane: the plane 284 * @plane_state: the plane state of type struct drm_shadow_plane_state 285 * 286 * This function implements struct &drm_plane_funcs.atomic_destroy_state 287 * for shadow-buffered planes. It expects that mappings of shadow buffers 288 * have been released already. 289 */ 290 void drm_gem_destroy_shadow_plane_state(struct drm_plane *plane, 291 struct drm_plane_state *plane_state) 292 { 293 struct drm_shadow_plane_state *shadow_plane_state = 294 to_drm_shadow_plane_state(plane_state); 295 296 __drm_gem_destroy_shadow_plane_state(shadow_plane_state); 297 kfree(shadow_plane_state); 298 } 299 EXPORT_SYMBOL(drm_gem_destroy_shadow_plane_state); 300 301 /** 302 * __drm_gem_reset_shadow_plane - resets a shadow-buffered plane 303 * @plane: the plane 304 * @shadow_plane_state: the shadow-buffered plane state 305 * 306 * This function resets state for shadow-buffered planes. Helpful 307 * for drivers that subclass struct drm_shadow_plane_state. 308 */ 309 void __drm_gem_reset_shadow_plane(struct drm_plane *plane, 310 struct drm_shadow_plane_state *shadow_plane_state) 311 { 312 __drm_atomic_helper_plane_reset(plane, &shadow_plane_state->base); 313 drm_format_conv_state_init(&shadow_plane_state->fmtcnv_state); 314 } 315 EXPORT_SYMBOL(__drm_gem_reset_shadow_plane); 316 317 /** 318 * drm_gem_reset_shadow_plane - resets a shadow-buffered plane 319 * @plane: the plane 320 * 321 * This function implements struct &drm_plane_funcs.reset_plane for 322 * shadow-buffered planes. It assumes the current plane state to be 323 * of type struct drm_shadow_plane and it allocates the new state of 324 * this type. 325 */ 326 void drm_gem_reset_shadow_plane(struct drm_plane *plane) 327 { 328 struct drm_shadow_plane_state *shadow_plane_state; 329 330 if (plane->state) { 331 drm_gem_destroy_shadow_plane_state(plane, plane->state); 332 plane->state = NULL; /* must be set to NULL here */ 333 } 334 335 shadow_plane_state = kzalloc(sizeof(*shadow_plane_state), GFP_KERNEL); 336 if (!shadow_plane_state) 337 return; 338 __drm_gem_reset_shadow_plane(plane, shadow_plane_state); 339 } 340 EXPORT_SYMBOL(drm_gem_reset_shadow_plane); 341 342 /** 343 * drm_gem_begin_shadow_fb_access - prepares shadow framebuffers for CPU access 344 * @plane: the plane 345 * @plane_state: the plane state of type struct drm_shadow_plane_state 346 * 347 * This function implements struct &drm_plane_helper_funcs.begin_fb_access. It 348 * maps all buffer objects of the plane's framebuffer into kernel address 349 * space and stores them in struct &drm_shadow_plane_state.map. The first data 350 * bytes are available in struct &drm_shadow_plane_state.data. 351 * 352 * See drm_gem_end_shadow_fb_access() for cleanup. 353 * 354 * Returns: 355 * 0 on success, or a negative errno code otherwise. 356 */ 357 int drm_gem_begin_shadow_fb_access(struct drm_plane *plane, struct drm_plane_state *plane_state) 358 { 359 struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state); 360 struct drm_framebuffer *fb = plane_state->fb; 361 362 if (!fb) 363 return 0; 364 365 return drm_gem_fb_vmap(fb, shadow_plane_state->map, shadow_plane_state->data); 366 } 367 EXPORT_SYMBOL(drm_gem_begin_shadow_fb_access); 368 369 /** 370 * drm_gem_end_shadow_fb_access - releases shadow framebuffers from CPU access 371 * @plane: the plane 372 * @plane_state: the plane state of type struct drm_shadow_plane_state 373 * 374 * This function implements struct &drm_plane_helper_funcs.end_fb_access. It 375 * undoes all effects of drm_gem_begin_shadow_fb_access() in reverse order. 376 * 377 * See drm_gem_begin_shadow_fb_access() for more information. 378 */ 379 void drm_gem_end_shadow_fb_access(struct drm_plane *plane, struct drm_plane_state *plane_state) 380 { 381 struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state); 382 struct drm_framebuffer *fb = plane_state->fb; 383 384 if (!fb) 385 return; 386 387 drm_gem_fb_vunmap(fb, shadow_plane_state->map); 388 } 389 EXPORT_SYMBOL(drm_gem_end_shadow_fb_access); 390 391 /** 392 * drm_gem_simple_kms_begin_shadow_fb_access - prepares shadow framebuffers for CPU access 393 * @pipe: the simple display pipe 394 * @plane_state: the plane state of type struct drm_shadow_plane_state 395 * 396 * This function implements struct drm_simple_display_funcs.begin_fb_access. 397 * 398 * See drm_gem_begin_shadow_fb_access() for details and 399 * drm_gem_simple_kms_cleanup_shadow_fb() for cleanup. 400 * 401 * Returns: 402 * 0 on success, or a negative errno code otherwise. 403 */ 404 int drm_gem_simple_kms_begin_shadow_fb_access(struct drm_simple_display_pipe *pipe, 405 struct drm_plane_state *plane_state) 406 { 407 return drm_gem_begin_shadow_fb_access(&pipe->plane, plane_state); 408 } 409 EXPORT_SYMBOL(drm_gem_simple_kms_begin_shadow_fb_access); 410 411 /** 412 * drm_gem_simple_kms_end_shadow_fb_access - releases shadow framebuffers from CPU access 413 * @pipe: the simple display pipe 414 * @plane_state: the plane state of type struct drm_shadow_plane_state 415 * 416 * This function implements struct drm_simple_display_funcs.end_fb_access. 417 * It undoes all effects of drm_gem_simple_kms_begin_shadow_fb_access() in 418 * reverse order. 419 * 420 * See drm_gem_simple_kms_begin_shadow_fb_access(). 421 */ 422 void drm_gem_simple_kms_end_shadow_fb_access(struct drm_simple_display_pipe *pipe, 423 struct drm_plane_state *plane_state) 424 { 425 drm_gem_end_shadow_fb_access(&pipe->plane, plane_state); 426 } 427 EXPORT_SYMBOL(drm_gem_simple_kms_end_shadow_fb_access); 428 429 /** 430 * drm_gem_simple_kms_reset_shadow_plane - resets a shadow-buffered plane 431 * @pipe: the simple display pipe 432 * 433 * This function implements struct drm_simple_display_funcs.reset_plane 434 * for shadow-buffered planes. 435 */ 436 void drm_gem_simple_kms_reset_shadow_plane(struct drm_simple_display_pipe *pipe) 437 { 438 drm_gem_reset_shadow_plane(&pipe->plane); 439 } 440 EXPORT_SYMBOL(drm_gem_simple_kms_reset_shadow_plane); 441 442 /** 443 * drm_gem_simple_kms_duplicate_shadow_plane_state - duplicates shadow-buffered plane state 444 * @pipe: the simple display pipe 445 * 446 * This function implements struct drm_simple_display_funcs.duplicate_plane_state 447 * for shadow-buffered planes. It does not duplicate existing mappings of the shadow 448 * buffers. Mappings are maintained during the atomic commit by the plane's prepare_fb 449 * and cleanup_fb helpers. 450 * 451 * Returns: 452 * A pointer to a new plane state on success, or NULL otherwise. 453 */ 454 struct drm_plane_state * 455 drm_gem_simple_kms_duplicate_shadow_plane_state(struct drm_simple_display_pipe *pipe) 456 { 457 return drm_gem_duplicate_shadow_plane_state(&pipe->plane); 458 } 459 EXPORT_SYMBOL(drm_gem_simple_kms_duplicate_shadow_plane_state); 460 461 /** 462 * drm_gem_simple_kms_destroy_shadow_plane_state - resets shadow-buffered plane state 463 * @pipe: the simple display pipe 464 * @plane_state: the plane state of type struct drm_shadow_plane_state 465 * 466 * This function implements struct drm_simple_display_funcs.destroy_plane_state 467 * for shadow-buffered planes. It expects that mappings of shadow buffers 468 * have been released already. 469 */ 470 void drm_gem_simple_kms_destroy_shadow_plane_state(struct drm_simple_display_pipe *pipe, 471 struct drm_plane_state *plane_state) 472 { 473 drm_gem_destroy_shadow_plane_state(&pipe->plane, plane_state); 474 } 475 EXPORT_SYMBOL(drm_gem_simple_kms_destroy_shadow_plane_state); 476