1 /* 2 * Copyright (C) 2014 Red Hat 3 * Author: Rob Clark <robdclark@gmail.com> 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be included in 13 * all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 * OTHER DEALINGS IN THE SOFTWARE. 22 */ 23 24 #include <drm/drmP.h> 25 #include <drm/drm_crtc.h> 26 #include <drm/drm_modeset_lock.h> 27 28 /** 29 * DOC: kms locking 30 * 31 * As KMS moves toward more fine grained locking, and atomic ioctl where 32 * userspace can indirectly control locking order, it becomes necessary 33 * to use ww_mutex and acquire-contexts to avoid deadlocks. But because 34 * the locking is more distributed around the driver code, we want a bit 35 * of extra utility/tracking out of our acquire-ctx. This is provided 36 * by drm_modeset_lock / drm_modeset_acquire_ctx. 37 * 38 * For basic principles of ww_mutex, see: Documentation/locking/ww-mutex-design.txt 39 * 40 * The basic usage pattern is to: 41 * 42 * drm_modeset_acquire_init(&ctx) 43 * retry: 44 * foreach (lock in random_ordered_set_of_locks) { 45 * ret = drm_modeset_lock(lock, &ctx) 46 * if (ret == -EDEADLK) { 47 * drm_modeset_backoff(&ctx); 48 * goto retry; 49 * } 50 * } 51 * 52 * ... do stuff ... 53 * 54 * drm_modeset_drop_locks(&ctx); 55 * drm_modeset_acquire_fini(&ctx); 56 */ 57 58 59 /** 60 * __drm_modeset_lock_all - internal helper to grab all modeset locks 61 * @dev: DRM device 62 * @trylock: trylock mode for atomic contexts 63 * 64 * This is a special version of drm_modeset_lock_all() which can also be used in 65 * atomic contexts. Then @trylock must be set to true. 66 * 67 * Returns: 68 * 0 on success or negative error code on failure. 69 */ 70 int __drm_modeset_lock_all(struct drm_device *dev, 71 bool trylock) 72 { 73 struct drm_mode_config *config = &dev->mode_config; 74 struct drm_modeset_acquire_ctx *ctx; 75 int ret; 76 77 ctx = kzalloc(sizeof(*ctx), 78 trylock ? GFP_ATOMIC : GFP_KERNEL); 79 if (!ctx) 80 return -ENOMEM; 81 82 if (trylock) { 83 if (!mutex_trylock(&config->mutex)) { 84 ret = -EBUSY; 85 goto out; 86 } 87 } else { 88 mutex_lock(&config->mutex); 89 } 90 91 drm_modeset_acquire_init(ctx, 0); 92 ctx->trylock_only = trylock; 93 94 retry: 95 ret = drm_modeset_lock(&config->connection_mutex, ctx); 96 if (ret) 97 goto fail; 98 ret = drm_modeset_lock_all_crtcs(dev, ctx); 99 if (ret) 100 goto fail; 101 102 WARN_ON(config->acquire_ctx); 103 104 /* now we hold the locks, so now that it is safe, stash the 105 * ctx for drm_modeset_unlock_all(): 106 */ 107 config->acquire_ctx = ctx; 108 109 drm_warn_on_modeset_not_all_locked(dev); 110 111 return 0; 112 113 fail: 114 if (ret == -EDEADLK) { 115 drm_modeset_backoff(ctx); 116 goto retry; 117 } 118 119 out: 120 kfree(ctx); 121 return ret; 122 } 123 EXPORT_SYMBOL(__drm_modeset_lock_all); 124 125 /** 126 * drm_modeset_lock_all - take all modeset locks 127 * @dev: drm device 128 * 129 * This function takes all modeset locks, suitable where a more fine-grained 130 * scheme isn't (yet) implemented. Locks must be dropped with 131 * drm_modeset_unlock_all. 132 */ 133 void drm_modeset_lock_all(struct drm_device *dev) 134 { 135 WARN_ON(__drm_modeset_lock_all(dev, false) != 0); 136 } 137 EXPORT_SYMBOL(drm_modeset_lock_all); 138 139 /** 140 * drm_modeset_unlock_all - drop all modeset locks 141 * @dev: device 142 * 143 * This function drop all modeset locks taken by drm_modeset_lock_all. 144 */ 145 void drm_modeset_unlock_all(struct drm_device *dev) 146 { 147 struct drm_mode_config *config = &dev->mode_config; 148 struct drm_modeset_acquire_ctx *ctx = config->acquire_ctx; 149 150 if (WARN_ON(!ctx)) 151 return; 152 153 config->acquire_ctx = NULL; 154 drm_modeset_drop_locks(ctx); 155 drm_modeset_acquire_fini(ctx); 156 157 kfree(ctx); 158 159 mutex_unlock(&dev->mode_config.mutex); 160 } 161 EXPORT_SYMBOL(drm_modeset_unlock_all); 162 163 /** 164 * drm_modeset_lock_crtc - lock crtc with hidden acquire ctx for a plane update 165 * @crtc: DRM CRTC 166 * @plane: DRM plane to be updated on @crtc 167 * 168 * This function locks the given crtc and plane (which should be either the 169 * primary or cursor plane) using a hidden acquire context. This is necessary so 170 * that drivers internally using the atomic interfaces can grab further locks 171 * with the lock acquire context. 172 * 173 * Note that @plane can be NULL, e.g. when the cursor support hasn't yet been 174 * converted to universal planes yet. 175 */ 176 void drm_modeset_lock_crtc(struct drm_crtc *crtc, 177 struct drm_plane *plane) 178 { 179 struct drm_modeset_acquire_ctx *ctx; 180 int ret; 181 182 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 183 if (WARN_ON(!ctx)) 184 return; 185 186 drm_modeset_acquire_init(ctx, 0); 187 188 retry: 189 ret = drm_modeset_lock(&crtc->mutex, ctx); 190 if (ret) 191 goto fail; 192 193 if (plane) { 194 ret = drm_modeset_lock(&plane->mutex, ctx); 195 if (ret) 196 goto fail; 197 198 if (plane->crtc) { 199 ret = drm_modeset_lock(&plane->crtc->mutex, ctx); 200 if (ret) 201 goto fail; 202 } 203 } 204 205 WARN_ON(crtc->acquire_ctx); 206 207 /* now we hold the locks, so now that it is safe, stash the 208 * ctx for drm_modeset_unlock_crtc(): 209 */ 210 crtc->acquire_ctx = ctx; 211 212 return; 213 214 fail: 215 if (ret == -EDEADLK) { 216 drm_modeset_backoff(ctx); 217 goto retry; 218 } 219 } 220 EXPORT_SYMBOL(drm_modeset_lock_crtc); 221 222 /** 223 * drm_modeset_legacy_acquire_ctx - find acquire ctx for legacy ioctls 224 * @crtc: drm crtc 225 * 226 * Legacy ioctl operations like cursor updates or page flips only have per-crtc 227 * locking, and store the acquire ctx in the corresponding crtc. All other 228 * legacy operations take all locks and use a global acquire context. This 229 * function grabs the right one. 230 */ 231 struct drm_modeset_acquire_ctx * 232 drm_modeset_legacy_acquire_ctx(struct drm_crtc *crtc) 233 { 234 if (crtc->acquire_ctx) 235 return crtc->acquire_ctx; 236 237 WARN_ON(!crtc->dev->mode_config.acquire_ctx); 238 239 return crtc->dev->mode_config.acquire_ctx; 240 } 241 EXPORT_SYMBOL(drm_modeset_legacy_acquire_ctx); 242 243 /** 244 * drm_modeset_unlock_crtc - drop crtc lock 245 * @crtc: drm crtc 246 * 247 * This drops the crtc lock acquire with drm_modeset_lock_crtc() and all other 248 * locks acquired through the hidden context. 249 */ 250 void drm_modeset_unlock_crtc(struct drm_crtc *crtc) 251 { 252 struct drm_modeset_acquire_ctx *ctx = crtc->acquire_ctx; 253 254 if (WARN_ON(!ctx)) 255 return; 256 257 crtc->acquire_ctx = NULL; 258 drm_modeset_drop_locks(ctx); 259 drm_modeset_acquire_fini(ctx); 260 261 kfree(ctx); 262 } 263 EXPORT_SYMBOL(drm_modeset_unlock_crtc); 264 265 /** 266 * drm_warn_on_modeset_not_all_locked - check that all modeset locks are locked 267 * @dev: device 268 * 269 * Useful as a debug assert. 270 */ 271 void drm_warn_on_modeset_not_all_locked(struct drm_device *dev) 272 { 273 struct drm_crtc *crtc; 274 275 /* Locking is currently fubar in the panic handler. */ 276 if (oops_in_progress) 277 return; 278 279 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) 280 WARN_ON(!drm_modeset_is_locked(&crtc->mutex)); 281 282 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); 283 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); 284 } 285 EXPORT_SYMBOL(drm_warn_on_modeset_not_all_locked); 286 287 /** 288 * drm_modeset_acquire_init - initialize acquire context 289 * @ctx: the acquire context 290 * @flags: for future 291 */ 292 void drm_modeset_acquire_init(struct drm_modeset_acquire_ctx *ctx, 293 uint32_t flags) 294 { 295 memset(ctx, 0, sizeof(*ctx)); 296 ww_acquire_init(&ctx->ww_ctx, &crtc_ww_class); 297 INIT_LIST_HEAD(&ctx->locked); 298 } 299 EXPORT_SYMBOL(drm_modeset_acquire_init); 300 301 /** 302 * drm_modeset_acquire_fini - cleanup acquire context 303 * @ctx: the acquire context 304 */ 305 void drm_modeset_acquire_fini(struct drm_modeset_acquire_ctx *ctx) 306 { 307 ww_acquire_fini(&ctx->ww_ctx); 308 } 309 EXPORT_SYMBOL(drm_modeset_acquire_fini); 310 311 /** 312 * drm_modeset_drop_locks - drop all locks 313 * @ctx: the acquire context 314 * 315 * Drop all locks currently held against this acquire context. 316 */ 317 void drm_modeset_drop_locks(struct drm_modeset_acquire_ctx *ctx) 318 { 319 WARN_ON(ctx->contended); 320 while (!list_empty(&ctx->locked)) { 321 struct drm_modeset_lock *lock; 322 323 lock = list_first_entry(&ctx->locked, 324 struct drm_modeset_lock, head); 325 326 drm_modeset_unlock(lock); 327 } 328 } 329 EXPORT_SYMBOL(drm_modeset_drop_locks); 330 331 static inline int modeset_lock(struct drm_modeset_lock *lock, 332 struct drm_modeset_acquire_ctx *ctx, 333 bool interruptible, bool slow) 334 { 335 int ret; 336 337 WARN_ON(ctx->contended); 338 339 if (ctx->trylock_only) { 340 if (!ww_mutex_trylock(&lock->mutex)) 341 return -EBUSY; 342 else 343 return 0; 344 } else if (interruptible && slow) { 345 ret = ww_mutex_lock_slow_interruptible(&lock->mutex, &ctx->ww_ctx); 346 } else if (interruptible) { 347 ret = ww_mutex_lock_interruptible(&lock->mutex, &ctx->ww_ctx); 348 } else if (slow) { 349 ww_mutex_lock_slow(&lock->mutex, &ctx->ww_ctx); 350 ret = 0; 351 } else { 352 ret = ww_mutex_lock(&lock->mutex, &ctx->ww_ctx); 353 } 354 if (!ret) { 355 WARN_ON(!list_empty(&lock->head)); 356 list_add(&lock->head, &ctx->locked); 357 } else if (ret == -EALREADY) { 358 /* we already hold the lock.. this is fine. For atomic 359 * we will need to be able to drm_modeset_lock() things 360 * without having to keep track of what is already locked 361 * or not. 362 */ 363 ret = 0; 364 } else if (ret == -EDEADLK) { 365 ctx->contended = lock; 366 } 367 368 return ret; 369 } 370 371 static int modeset_backoff(struct drm_modeset_acquire_ctx *ctx, 372 bool interruptible) 373 { 374 struct drm_modeset_lock *contended = ctx->contended; 375 376 ctx->contended = NULL; 377 378 if (WARN_ON(!contended)) 379 return 0; 380 381 drm_modeset_drop_locks(ctx); 382 383 return modeset_lock(contended, ctx, interruptible, true); 384 } 385 386 /** 387 * drm_modeset_backoff - deadlock avoidance backoff 388 * @ctx: the acquire context 389 * 390 * If deadlock is detected (ie. drm_modeset_lock() returns -EDEADLK), 391 * you must call this function to drop all currently held locks and 392 * block until the contended lock becomes available. 393 */ 394 void drm_modeset_backoff(struct drm_modeset_acquire_ctx *ctx) 395 { 396 modeset_backoff(ctx, false); 397 } 398 EXPORT_SYMBOL(drm_modeset_backoff); 399 400 /** 401 * drm_modeset_backoff_interruptible - deadlock avoidance backoff 402 * @ctx: the acquire context 403 * 404 * Interruptible version of drm_modeset_backoff() 405 */ 406 int drm_modeset_backoff_interruptible(struct drm_modeset_acquire_ctx *ctx) 407 { 408 return modeset_backoff(ctx, true); 409 } 410 EXPORT_SYMBOL(drm_modeset_backoff_interruptible); 411 412 /** 413 * drm_modeset_lock - take modeset lock 414 * @lock: lock to take 415 * @ctx: acquire ctx 416 * 417 * If ctx is not NULL, then its ww acquire context is used and the 418 * lock will be tracked by the context and can be released by calling 419 * drm_modeset_drop_locks(). If -EDEADLK is returned, this means a 420 * deadlock scenario has been detected and it is an error to attempt 421 * to take any more locks without first calling drm_modeset_backoff(). 422 */ 423 int drm_modeset_lock(struct drm_modeset_lock *lock, 424 struct drm_modeset_acquire_ctx *ctx) 425 { 426 if (ctx) 427 return modeset_lock(lock, ctx, false, false); 428 429 ww_mutex_lock(&lock->mutex, NULL); 430 return 0; 431 } 432 EXPORT_SYMBOL(drm_modeset_lock); 433 434 /** 435 * drm_modeset_lock_interruptible - take modeset lock 436 * @lock: lock to take 437 * @ctx: acquire ctx 438 * 439 * Interruptible version of drm_modeset_lock() 440 */ 441 int drm_modeset_lock_interruptible(struct drm_modeset_lock *lock, 442 struct drm_modeset_acquire_ctx *ctx) 443 { 444 if (ctx) 445 return modeset_lock(lock, ctx, true, false); 446 447 return ww_mutex_lock_interruptible(&lock->mutex, NULL); 448 } 449 EXPORT_SYMBOL(drm_modeset_lock_interruptible); 450 451 /** 452 * drm_modeset_unlock - drop modeset lock 453 * @lock: lock to release 454 */ 455 void drm_modeset_unlock(struct drm_modeset_lock *lock) 456 { 457 list_del_init(&lock->head); 458 ww_mutex_unlock(&lock->mutex); 459 } 460 EXPORT_SYMBOL(drm_modeset_unlock); 461 462 /* In some legacy codepaths it's convenient to just grab all the crtc and plane 463 * related locks. */ 464 int drm_modeset_lock_all_crtcs(struct drm_device *dev, 465 struct drm_modeset_acquire_ctx *ctx) 466 { 467 struct drm_mode_config *config = &dev->mode_config; 468 struct drm_crtc *crtc; 469 struct drm_plane *plane; 470 int ret = 0; 471 472 list_for_each_entry(crtc, &config->crtc_list, head) { 473 ret = drm_modeset_lock(&crtc->mutex, ctx); 474 if (ret) 475 return ret; 476 } 477 478 list_for_each_entry(plane, &config->plane_list, head) { 479 ret = drm_modeset_lock(&plane->mutex, ctx); 480 if (ret) 481 return ret; 482 } 483 484 return 0; 485 } 486 EXPORT_SYMBOL(drm_modeset_lock_all_crtcs); 487