1 // SPDX-License-Identifier: GPL-2.0-or-later 2 3 #include <linux/dma-resv.h> 4 #include <linux/dma-fence-chain.h> 5 #include <linux/export.h> 6 7 #include <drm/drm_atomic_state_helper.h> 8 #include <drm/drm_atomic_uapi.h> 9 #include <drm/drm_framebuffer.h> 10 #include <drm/drm_gem.h> 11 #include <drm/drm_gem_atomic_helper.h> 12 #include <drm/drm_gem_framebuffer_helper.h> 13 #include <drm/drm_simple_kms_helper.h> 14 15 #include "drm_internal.h" 16 17 /** 18 * DOC: overview 19 * 20 * The GEM atomic helpers library implements generic atomic-commit 21 * functions for drivers that use GEM objects. Currently, it provides 22 * synchronization helpers, and plane state and framebuffer BO mappings 23 * for planes with shadow buffers. 24 * 25 * Before scanout, a plane's framebuffer needs to be synchronized with 26 * possible writers that draw into the framebuffer. All drivers should 27 * call drm_gem_plane_helper_prepare_fb() from their implementation of 28 * struct &drm_plane_helper.prepare_fb . It sets the plane's fence from 29 * the framebuffer so that the DRM core can synchronize access automatically. 30 * drm_gem_plane_helper_prepare_fb() can also be used directly as 31 * implementation of prepare_fb. 32 * 33 * .. code-block:: c 34 * 35 * #include <drm/drm_gem_atomic_helper.h> 36 * 37 * struct drm_plane_helper_funcs driver_plane_helper_funcs = { 38 * ..., 39 * . prepare_fb = drm_gem_plane_helper_prepare_fb, 40 * }; 41 * 42 * A driver using a shadow buffer copies the content of the shadow buffers 43 * into the HW's framebuffer memory during an atomic update. This requires 44 * a mapping of the shadow buffer into kernel address space. The mappings 45 * cannot be established by commit-tail functions, such as atomic_update, 46 * as this would violate locking rules around dma_buf_vmap(). 47 * 48 * The helpers for shadow-buffered planes establish and release mappings, 49 * and provide struct drm_shadow_plane_state, which stores the plane's mapping 50 * for commit-tail functions. 51 * 52 * Shadow-buffered planes can easily be enabled by using the provided macros 53 * %DRM_GEM_SHADOW_PLANE_FUNCS and %DRM_GEM_SHADOW_PLANE_HELPER_FUNCS. 54 * These macros set up the plane and plane-helper callbacks to point to the 55 * shadow-buffer helpers. 56 * 57 * .. code-block:: c 58 * 59 * #include <drm/drm_gem_atomic_helper.h> 60 * 61 * struct drm_plane_funcs driver_plane_funcs = { 62 * ..., 63 * DRM_GEM_SHADOW_PLANE_FUNCS, 64 * }; 65 * 66 * struct drm_plane_helper_funcs driver_plane_helper_funcs = { 67 * ..., 68 * DRM_GEM_SHADOW_PLANE_HELPER_FUNCS, 69 * }; 70 * 71 * In the driver's atomic-update function, shadow-buffer mappings are available 72 * from the plane state. Use to_drm_shadow_plane_state() to upcast from 73 * struct drm_plane_state. 74 * 75 * .. code-block:: c 76 * 77 * void driver_plane_atomic_update(struct drm_plane *plane, 78 * struct drm_plane_state *old_plane_state) 79 * { 80 * struct drm_plane_state *plane_state = plane->state; 81 * struct drm_shadow_plane_state *shadow_plane_state = 82 * to_drm_shadow_plane_state(plane_state); 83 * 84 * // access shadow buffer via shadow_plane_state->map 85 * } 86 * 87 * A mapping address for each of the framebuffer's buffer object is stored in 88 * struct &drm_shadow_plane_state.map. The mappings are valid while the state 89 * is being used. 90 * 91 * Drivers that use struct drm_simple_display_pipe can use 92 * %DRM_GEM_SIMPLE_DISPLAY_PIPE_SHADOW_PLANE_FUNCS to initialize the rsp 93 * callbacks. Access to shadow-buffer mappings is similar to regular 94 * atomic_update. 95 * 96 * .. code-block:: c 97 * 98 * struct drm_simple_display_pipe_funcs driver_pipe_funcs = { 99 * ..., 100 * DRM_GEM_SIMPLE_DISPLAY_PIPE_SHADOW_PLANE_FUNCS, 101 * }; 102 * 103 * void driver_pipe_enable(struct drm_simple_display_pipe *pipe, 104 * struct drm_crtc_state *crtc_state, 105 * struct drm_plane_state *plane_state) 106 * { 107 * struct drm_shadow_plane_state *shadow_plane_state = 108 * to_drm_shadow_plane_state(plane_state); 109 * 110 * // access shadow buffer via shadow_plane_state->map 111 * } 112 */ 113 114 /* 115 * Plane Helpers 116 */ 117 118 /** 119 * drm_gem_plane_helper_prepare_fb() - Prepare a GEM backed framebuffer 120 * @plane: Plane 121 * @state: Plane state the fence will be attached to 122 * 123 * This function extracts the exclusive fence from &drm_gem_object.resv and 124 * attaches it to plane state for the atomic helper to wait on. This is 125 * necessary to correctly implement implicit synchronization for any buffers 126 * shared as a struct &dma_buf. This function can be used as the 127 * &drm_plane_helper_funcs.prepare_fb callback. 128 * 129 * There is no need for &drm_plane_helper_funcs.cleanup_fb hook for simple 130 * GEM based framebuffer drivers which have their buffers always pinned in 131 * memory. 132 * 133 * This function is the default implementation for GEM drivers of 134 * &drm_plane_helper_funcs.prepare_fb if no callback is provided. 135 */ 136 int drm_gem_plane_helper_prepare_fb(struct drm_plane *plane, 137 struct drm_plane_state *state) 138 { 139 struct dma_fence *fence = dma_fence_get(state->fence); 140 enum dma_resv_usage usage; 141 size_t i; 142 int ret; 143 144 if (!state->fb) 145 return 0; 146 147 /* 148 * Only add the kernel fences here if there is already a fence set via 149 * explicit fencing interfaces on the atomic ioctl. 150 * 151 * This way explicit fencing can be used to overrule implicit fencing, 152 * which is important to make explicit fencing use-cases work: One 153 * example is using one buffer for 2 screens with different refresh 154 * rates. Implicit fencing will clamp rendering to the refresh rate of 155 * the slower screen, whereas explicit fence allows 2 independent 156 * render and display loops on a single buffer. If a driver allows 157 * obeys both implicit and explicit fences for plane updates, then it 158 * will break all the benefits of explicit fencing. 159 */ 160 usage = fence ? DMA_RESV_USAGE_KERNEL : DMA_RESV_USAGE_WRITE; 161 162 for (i = 0; i < state->fb->format->num_planes; ++i) { 163 struct drm_gem_object *obj = drm_gem_fb_get_obj(state->fb, i); 164 struct dma_fence *new; 165 166 if (!obj) { 167 ret = -EINVAL; 168 goto error; 169 } 170 171 ret = dma_resv_get_singleton(obj->resv, usage, &new); 172 if (ret) 173 goto error; 174 175 if (new && fence) { 176 struct dma_fence_chain *chain = dma_fence_chain_alloc(); 177 178 if (!chain) { 179 ret = -ENOMEM; 180 goto error; 181 } 182 183 dma_fence_chain_init(chain, fence, new, 1); 184 fence = &chain->base; 185 186 } else if (new) { 187 fence = new; 188 } 189 } 190 191 dma_fence_put(state->fence); 192 state->fence = fence; 193 return 0; 194 195 error: 196 dma_fence_put(fence); 197 return ret; 198 } 199 EXPORT_SYMBOL_GPL(drm_gem_plane_helper_prepare_fb); 200 201 /* 202 * Shadow-buffered Planes 203 */ 204 205 /** 206 * __drm_gem_duplicate_shadow_plane_state - duplicates shadow-buffered plane state 207 * @plane: the plane 208 * @new_shadow_plane_state: the new shadow-buffered plane state 209 * 210 * This function duplicates shadow-buffered plane state. This is helpful for drivers 211 * that subclass struct drm_shadow_plane_state. 212 * 213 * The function does not duplicate existing mappings of the shadow buffers. 214 * Mappings are maintained during the atomic commit by the plane's prepare_fb 215 * and cleanup_fb helpers. See drm_gem_prepare_shadow_fb() and drm_gem_cleanup_shadow_fb() 216 * for corresponding helpers. 217 */ 218 void 219 __drm_gem_duplicate_shadow_plane_state(struct drm_plane *plane, 220 struct drm_shadow_plane_state *new_shadow_plane_state) 221 { 222 struct drm_plane_state *plane_state = plane->state; 223 struct drm_shadow_plane_state *shadow_plane_state = 224 to_drm_shadow_plane_state(plane_state); 225 226 __drm_atomic_helper_plane_duplicate_state(plane, &new_shadow_plane_state->base); 227 228 drm_format_conv_state_copy(&new_shadow_plane_state->fmtcnv_state, 229 &shadow_plane_state->fmtcnv_state); 230 } 231 EXPORT_SYMBOL(__drm_gem_duplicate_shadow_plane_state); 232 233 /** 234 * drm_gem_duplicate_shadow_plane_state - duplicates shadow-buffered plane state 235 * @plane: the plane 236 * 237 * This function implements struct &drm_plane_funcs.atomic_duplicate_state for 238 * shadow-buffered planes. It assumes the existing state to be of type 239 * struct drm_shadow_plane_state and it allocates the new state to be of this 240 * type. 241 * 242 * The function does not duplicate existing mappings of the shadow buffers. 243 * Mappings are maintained during the atomic commit by the plane's prepare_fb 244 * and cleanup_fb helpers. See drm_gem_prepare_shadow_fb() and drm_gem_cleanup_shadow_fb() 245 * for corresponding helpers. 246 * 247 * Returns: 248 * A pointer to a new plane state on success, or NULL otherwise. 249 */ 250 struct drm_plane_state * 251 drm_gem_duplicate_shadow_plane_state(struct drm_plane *plane) 252 { 253 struct drm_plane_state *plane_state = plane->state; 254 struct drm_shadow_plane_state *new_shadow_plane_state; 255 256 if (!plane_state) 257 return NULL; 258 259 new_shadow_plane_state = kzalloc(sizeof(*new_shadow_plane_state), GFP_KERNEL); 260 if (!new_shadow_plane_state) 261 return NULL; 262 __drm_gem_duplicate_shadow_plane_state(plane, new_shadow_plane_state); 263 264 return &new_shadow_plane_state->base; 265 } 266 EXPORT_SYMBOL(drm_gem_duplicate_shadow_plane_state); 267 268 /** 269 * __drm_gem_destroy_shadow_plane_state - cleans up shadow-buffered plane state 270 * @shadow_plane_state: the shadow-buffered plane state 271 * 272 * This function cleans up shadow-buffered plane state. Helpful for drivers that 273 * subclass struct drm_shadow_plane_state. 274 */ 275 void __drm_gem_destroy_shadow_plane_state(struct drm_shadow_plane_state *shadow_plane_state) 276 { 277 drm_format_conv_state_release(&shadow_plane_state->fmtcnv_state); 278 __drm_atomic_helper_plane_destroy_state(&shadow_plane_state->base); 279 } 280 EXPORT_SYMBOL(__drm_gem_destroy_shadow_plane_state); 281 282 /** 283 * drm_gem_destroy_shadow_plane_state - deletes shadow-buffered plane state 284 * @plane: the plane 285 * @plane_state: the plane state of type struct drm_shadow_plane_state 286 * 287 * This function implements struct &drm_plane_funcs.atomic_destroy_state 288 * for shadow-buffered planes. It expects that mappings of shadow buffers 289 * have been released already. 290 */ 291 void drm_gem_destroy_shadow_plane_state(struct drm_plane *plane, 292 struct drm_plane_state *plane_state) 293 { 294 struct drm_shadow_plane_state *shadow_plane_state = 295 to_drm_shadow_plane_state(plane_state); 296 297 __drm_gem_destroy_shadow_plane_state(shadow_plane_state); 298 kfree(shadow_plane_state); 299 } 300 EXPORT_SYMBOL(drm_gem_destroy_shadow_plane_state); 301 302 /** 303 * __drm_gem_reset_shadow_plane - resets a shadow-buffered plane 304 * @plane: the plane 305 * @shadow_plane_state: the shadow-buffered plane state 306 * 307 * This function resets state for shadow-buffered planes. Helpful 308 * for drivers that subclass struct drm_shadow_plane_state. 309 */ 310 void __drm_gem_reset_shadow_plane(struct drm_plane *plane, 311 struct drm_shadow_plane_state *shadow_plane_state) 312 { 313 __drm_atomic_helper_plane_reset(plane, &shadow_plane_state->base); 314 drm_format_conv_state_init(&shadow_plane_state->fmtcnv_state); 315 } 316 EXPORT_SYMBOL(__drm_gem_reset_shadow_plane); 317 318 /** 319 * drm_gem_reset_shadow_plane - resets a shadow-buffered plane 320 * @plane: the plane 321 * 322 * This function implements struct &drm_plane_funcs.reset_plane for 323 * shadow-buffered planes. It assumes the current plane state to be 324 * of type struct drm_shadow_plane and it allocates the new state of 325 * this type. 326 */ 327 void drm_gem_reset_shadow_plane(struct drm_plane *plane) 328 { 329 struct drm_shadow_plane_state *shadow_plane_state; 330 331 if (plane->state) { 332 drm_gem_destroy_shadow_plane_state(plane, plane->state); 333 plane->state = NULL; /* must be set to NULL here */ 334 } 335 336 shadow_plane_state = kzalloc(sizeof(*shadow_plane_state), GFP_KERNEL); 337 if (!shadow_plane_state) 338 return; 339 __drm_gem_reset_shadow_plane(plane, shadow_plane_state); 340 } 341 EXPORT_SYMBOL(drm_gem_reset_shadow_plane); 342 343 /** 344 * drm_gem_begin_shadow_fb_access - prepares shadow framebuffers for CPU access 345 * @plane: the plane 346 * @plane_state: the plane state of type struct drm_shadow_plane_state 347 * 348 * This function implements struct &drm_plane_helper_funcs.begin_fb_access. It 349 * maps all buffer objects of the plane's framebuffer into kernel address 350 * space and stores them in struct &drm_shadow_plane_state.map. The first data 351 * bytes are available in struct &drm_shadow_plane_state.data. 352 * 353 * See drm_gem_end_shadow_fb_access() for cleanup. 354 * 355 * Returns: 356 * 0 on success, or a negative errno code otherwise. 357 */ 358 int drm_gem_begin_shadow_fb_access(struct drm_plane *plane, struct drm_plane_state *plane_state) 359 { 360 struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state); 361 struct drm_framebuffer *fb = plane_state->fb; 362 363 if (!fb) 364 return 0; 365 366 return drm_gem_fb_vmap(fb, shadow_plane_state->map, shadow_plane_state->data); 367 } 368 EXPORT_SYMBOL(drm_gem_begin_shadow_fb_access); 369 370 /** 371 * drm_gem_end_shadow_fb_access - releases shadow framebuffers from CPU access 372 * @plane: the plane 373 * @plane_state: the plane state of type struct drm_shadow_plane_state 374 * 375 * This function implements struct &drm_plane_helper_funcs.end_fb_access. It 376 * undoes all effects of drm_gem_begin_shadow_fb_access() in reverse order. 377 * 378 * See drm_gem_begin_shadow_fb_access() for more information. 379 */ 380 void drm_gem_end_shadow_fb_access(struct drm_plane *plane, struct drm_plane_state *plane_state) 381 { 382 struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state); 383 struct drm_framebuffer *fb = plane_state->fb; 384 385 if (!fb) 386 return; 387 388 drm_gem_fb_vunmap(fb, shadow_plane_state->map); 389 } 390 EXPORT_SYMBOL(drm_gem_end_shadow_fb_access); 391 392 /** 393 * drm_gem_simple_kms_begin_shadow_fb_access - prepares shadow framebuffers for CPU access 394 * @pipe: the simple display pipe 395 * @plane_state: the plane state of type struct drm_shadow_plane_state 396 * 397 * This function implements struct drm_simple_display_funcs.begin_fb_access. 398 * 399 * See drm_gem_begin_shadow_fb_access() for details and 400 * drm_gem_simple_kms_cleanup_shadow_fb() for cleanup. 401 * 402 * Returns: 403 * 0 on success, or a negative errno code otherwise. 404 */ 405 int drm_gem_simple_kms_begin_shadow_fb_access(struct drm_simple_display_pipe *pipe, 406 struct drm_plane_state *plane_state) 407 { 408 return drm_gem_begin_shadow_fb_access(&pipe->plane, plane_state); 409 } 410 EXPORT_SYMBOL(drm_gem_simple_kms_begin_shadow_fb_access); 411 412 /** 413 * drm_gem_simple_kms_end_shadow_fb_access - releases shadow framebuffers from CPU access 414 * @pipe: the simple display pipe 415 * @plane_state: the plane state of type struct drm_shadow_plane_state 416 * 417 * This function implements struct drm_simple_display_funcs.end_fb_access. 418 * It undoes all effects of drm_gem_simple_kms_begin_shadow_fb_access() in 419 * reverse order. 420 * 421 * See drm_gem_simple_kms_begin_shadow_fb_access(). 422 */ 423 void drm_gem_simple_kms_end_shadow_fb_access(struct drm_simple_display_pipe *pipe, 424 struct drm_plane_state *plane_state) 425 { 426 drm_gem_end_shadow_fb_access(&pipe->plane, plane_state); 427 } 428 EXPORT_SYMBOL(drm_gem_simple_kms_end_shadow_fb_access); 429 430 /** 431 * drm_gem_simple_kms_reset_shadow_plane - resets a shadow-buffered plane 432 * @pipe: the simple display pipe 433 * 434 * This function implements struct drm_simple_display_funcs.reset_plane 435 * for shadow-buffered planes. 436 */ 437 void drm_gem_simple_kms_reset_shadow_plane(struct drm_simple_display_pipe *pipe) 438 { 439 drm_gem_reset_shadow_plane(&pipe->plane); 440 } 441 EXPORT_SYMBOL(drm_gem_simple_kms_reset_shadow_plane); 442 443 /** 444 * drm_gem_simple_kms_duplicate_shadow_plane_state - duplicates shadow-buffered plane state 445 * @pipe: the simple display pipe 446 * 447 * This function implements struct drm_simple_display_funcs.duplicate_plane_state 448 * for shadow-buffered planes. It does not duplicate existing mappings of the shadow 449 * buffers. Mappings are maintained during the atomic commit by the plane's prepare_fb 450 * and cleanup_fb helpers. 451 * 452 * Returns: 453 * A pointer to a new plane state on success, or NULL otherwise. 454 */ 455 struct drm_plane_state * 456 drm_gem_simple_kms_duplicate_shadow_plane_state(struct drm_simple_display_pipe *pipe) 457 { 458 return drm_gem_duplicate_shadow_plane_state(&pipe->plane); 459 } 460 EXPORT_SYMBOL(drm_gem_simple_kms_duplicate_shadow_plane_state); 461 462 /** 463 * drm_gem_simple_kms_destroy_shadow_plane_state - resets shadow-buffered plane state 464 * @pipe: the simple display pipe 465 * @plane_state: the plane state of type struct drm_shadow_plane_state 466 * 467 * This function implements struct drm_simple_display_funcs.destroy_plane_state 468 * for shadow-buffered planes. It expects that mappings of shadow buffers 469 * have been released already. 470 */ 471 void drm_gem_simple_kms_destroy_shadow_plane_state(struct drm_simple_display_pipe *pipe, 472 struct drm_plane_state *plane_state) 473 { 474 drm_gem_destroy_shadow_plane_state(&pipe->plane, plane_state); 475 } 476 EXPORT_SYMBOL(drm_gem_simple_kms_destroy_shadow_plane_state); 477