1 /* 2 * Copyright (C) 2014 Red Hat 3 * Copyright (C) 2014 Intel Corp. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be included in 13 * all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 * OTHER DEALINGS IN THE SOFTWARE. 22 * 23 * Authors: 24 * Rob Clark <robdclark@gmail.com> 25 * Daniel Vetter <daniel.vetter@ffwll.ch> 26 */ 27 28 29 #include <drm/drmP.h> 30 #include <drm/drm_atomic.h> 31 #include <drm/drm_mode.h> 32 #include <drm/drm_print.h> 33 #include <drm/drm_writeback.h> 34 #include <linux/sync_file.h> 35 36 #include "drm_crtc_internal.h" 37 #include "drm_internal.h" 38 39 void __drm_crtc_commit_free(struct kref *kref) 40 { 41 struct drm_crtc_commit *commit = 42 container_of(kref, struct drm_crtc_commit, ref); 43 44 kfree(commit); 45 } 46 EXPORT_SYMBOL(__drm_crtc_commit_free); 47 48 /** 49 * drm_atomic_state_default_release - 50 * release memory initialized by drm_atomic_state_init 51 * @state: atomic state 52 * 53 * Free all the memory allocated by drm_atomic_state_init. 54 * This should only be used by drivers which are still subclassing 55 * &drm_atomic_state and haven't switched to &drm_private_state yet. 56 */ 57 void drm_atomic_state_default_release(struct drm_atomic_state *state) 58 { 59 kfree(state->connectors); 60 kfree(state->crtcs); 61 kfree(state->planes); 62 kfree(state->private_objs); 63 } 64 EXPORT_SYMBOL(drm_atomic_state_default_release); 65 66 /** 67 * drm_atomic_state_init - init new atomic state 68 * @dev: DRM device 69 * @state: atomic state 70 * 71 * Default implementation for filling in a new atomic state. 72 * This should only be used by drivers which are still subclassing 73 * &drm_atomic_state and haven't switched to &drm_private_state yet. 74 */ 75 int 76 drm_atomic_state_init(struct drm_device *dev, struct drm_atomic_state *state) 77 { 78 kref_init(&state->ref); 79 80 /* TODO legacy paths should maybe do a better job about 81 * setting this appropriately? 82 */ 83 state->allow_modeset = true; 84 85 state->crtcs = kcalloc(dev->mode_config.num_crtc, 86 sizeof(*state->crtcs), GFP_KERNEL); 87 if (!state->crtcs) 88 goto fail; 89 state->planes = kcalloc(dev->mode_config.num_total_plane, 90 sizeof(*state->planes), GFP_KERNEL); 91 if (!state->planes) 92 goto fail; 93 94 state->dev = dev; 95 96 DRM_DEBUG_ATOMIC("Allocated atomic state %p\n", state); 97 98 return 0; 99 fail: 100 drm_atomic_state_default_release(state); 101 return -ENOMEM; 102 } 103 EXPORT_SYMBOL(drm_atomic_state_init); 104 105 /** 106 * drm_atomic_state_alloc - allocate atomic state 107 * @dev: DRM device 108 * 109 * This allocates an empty atomic state to track updates. 110 */ 111 struct drm_atomic_state * 112 drm_atomic_state_alloc(struct drm_device *dev) 113 { 114 struct drm_mode_config *config = &dev->mode_config; 115 116 if (!config->funcs->atomic_state_alloc) { 117 struct drm_atomic_state *state; 118 119 state = kzalloc(sizeof(*state), GFP_KERNEL); 120 if (!state) 121 return NULL; 122 if (drm_atomic_state_init(dev, state) < 0) { 123 kfree(state); 124 return NULL; 125 } 126 return state; 127 } 128 129 return config->funcs->atomic_state_alloc(dev); 130 } 131 EXPORT_SYMBOL(drm_atomic_state_alloc); 132 133 /** 134 * drm_atomic_state_default_clear - clear base atomic state 135 * @state: atomic state 136 * 137 * Default implementation for clearing atomic state. 138 * This should only be used by drivers which are still subclassing 139 * &drm_atomic_state and haven't switched to &drm_private_state yet. 140 */ 141 void drm_atomic_state_default_clear(struct drm_atomic_state *state) 142 { 143 struct drm_device *dev = state->dev; 144 struct drm_mode_config *config = &dev->mode_config; 145 int i; 146 147 DRM_DEBUG_ATOMIC("Clearing atomic state %p\n", state); 148 149 for (i = 0; i < state->num_connector; i++) { 150 struct drm_connector *connector = state->connectors[i].ptr; 151 152 if (!connector) 153 continue; 154 155 connector->funcs->atomic_destroy_state(connector, 156 state->connectors[i].state); 157 state->connectors[i].ptr = NULL; 158 state->connectors[i].state = NULL; 159 state->connectors[i].old_state = NULL; 160 state->connectors[i].new_state = NULL; 161 drm_connector_put(connector); 162 } 163 164 for (i = 0; i < config->num_crtc; i++) { 165 struct drm_crtc *crtc = state->crtcs[i].ptr; 166 167 if (!crtc) 168 continue; 169 170 crtc->funcs->atomic_destroy_state(crtc, 171 state->crtcs[i].state); 172 173 state->crtcs[i].ptr = NULL; 174 state->crtcs[i].state = NULL; 175 state->crtcs[i].old_state = NULL; 176 state->crtcs[i].new_state = NULL; 177 178 if (state->crtcs[i].commit) { 179 drm_crtc_commit_put(state->crtcs[i].commit); 180 state->crtcs[i].commit = NULL; 181 } 182 } 183 184 for (i = 0; i < config->num_total_plane; i++) { 185 struct drm_plane *plane = state->planes[i].ptr; 186 187 if (!plane) 188 continue; 189 190 plane->funcs->atomic_destroy_state(plane, 191 state->planes[i].state); 192 state->planes[i].ptr = NULL; 193 state->planes[i].state = NULL; 194 state->planes[i].old_state = NULL; 195 state->planes[i].new_state = NULL; 196 } 197 198 for (i = 0; i < state->num_private_objs; i++) { 199 struct drm_private_obj *obj = state->private_objs[i].ptr; 200 201 obj->funcs->atomic_destroy_state(obj, 202 state->private_objs[i].state); 203 state->private_objs[i].ptr = NULL; 204 state->private_objs[i].state = NULL; 205 state->private_objs[i].old_state = NULL; 206 state->private_objs[i].new_state = NULL; 207 } 208 state->num_private_objs = 0; 209 210 if (state->fake_commit) { 211 drm_crtc_commit_put(state->fake_commit); 212 state->fake_commit = NULL; 213 } 214 } 215 EXPORT_SYMBOL(drm_atomic_state_default_clear); 216 217 /** 218 * drm_atomic_state_clear - clear state object 219 * @state: atomic state 220 * 221 * When the w/w mutex algorithm detects a deadlock we need to back off and drop 222 * all locks. So someone else could sneak in and change the current modeset 223 * configuration. Which means that all the state assembled in @state is no 224 * longer an atomic update to the current state, but to some arbitrary earlier 225 * state. Which could break assumptions the driver's 226 * &drm_mode_config_funcs.atomic_check likely relies on. 227 * 228 * Hence we must clear all cached state and completely start over, using this 229 * function. 230 */ 231 void drm_atomic_state_clear(struct drm_atomic_state *state) 232 { 233 struct drm_device *dev = state->dev; 234 struct drm_mode_config *config = &dev->mode_config; 235 236 if (config->funcs->atomic_state_clear) 237 config->funcs->atomic_state_clear(state); 238 else 239 drm_atomic_state_default_clear(state); 240 } 241 EXPORT_SYMBOL(drm_atomic_state_clear); 242 243 /** 244 * __drm_atomic_state_free - free all memory for an atomic state 245 * @ref: This atomic state to deallocate 246 * 247 * This frees all memory associated with an atomic state, including all the 248 * per-object state for planes, crtcs and connectors. 249 */ 250 void __drm_atomic_state_free(struct kref *ref) 251 { 252 struct drm_atomic_state *state = container_of(ref, typeof(*state), ref); 253 struct drm_mode_config *config = &state->dev->mode_config; 254 255 drm_atomic_state_clear(state); 256 257 DRM_DEBUG_ATOMIC("Freeing atomic state %p\n", state); 258 259 if (config->funcs->atomic_state_free) { 260 config->funcs->atomic_state_free(state); 261 } else { 262 drm_atomic_state_default_release(state); 263 kfree(state); 264 } 265 } 266 EXPORT_SYMBOL(__drm_atomic_state_free); 267 268 /** 269 * drm_atomic_get_crtc_state - get crtc state 270 * @state: global atomic state object 271 * @crtc: crtc to get state object for 272 * 273 * This function returns the crtc state for the given crtc, allocating it if 274 * needed. It will also grab the relevant crtc lock to make sure that the state 275 * is consistent. 276 * 277 * Returns: 278 * 279 * Either the allocated state or the error code encoded into the pointer. When 280 * the error is EDEADLK then the w/w mutex code has detected a deadlock and the 281 * entire atomic sequence must be restarted. All other errors are fatal. 282 */ 283 struct drm_crtc_state * 284 drm_atomic_get_crtc_state(struct drm_atomic_state *state, 285 struct drm_crtc *crtc) 286 { 287 int ret, index = drm_crtc_index(crtc); 288 struct drm_crtc_state *crtc_state; 289 290 WARN_ON(!state->acquire_ctx); 291 292 crtc_state = drm_atomic_get_existing_crtc_state(state, crtc); 293 if (crtc_state) 294 return crtc_state; 295 296 ret = drm_modeset_lock(&crtc->mutex, state->acquire_ctx); 297 if (ret) 298 return ERR_PTR(ret); 299 300 crtc_state = crtc->funcs->atomic_duplicate_state(crtc); 301 if (!crtc_state) 302 return ERR_PTR(-ENOMEM); 303 304 state->crtcs[index].state = crtc_state; 305 state->crtcs[index].old_state = crtc->state; 306 state->crtcs[index].new_state = crtc_state; 307 state->crtcs[index].ptr = crtc; 308 crtc_state->state = state; 309 310 DRM_DEBUG_ATOMIC("Added [CRTC:%d:%s] %p state to %p\n", 311 crtc->base.id, crtc->name, crtc_state, state); 312 313 return crtc_state; 314 } 315 EXPORT_SYMBOL(drm_atomic_get_crtc_state); 316 317 static void set_out_fence_for_crtc(struct drm_atomic_state *state, 318 struct drm_crtc *crtc, s32 __user *fence_ptr) 319 { 320 state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = fence_ptr; 321 } 322 323 static s32 __user *get_out_fence_for_crtc(struct drm_atomic_state *state, 324 struct drm_crtc *crtc) 325 { 326 s32 __user *fence_ptr; 327 328 fence_ptr = state->crtcs[drm_crtc_index(crtc)].out_fence_ptr; 329 state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = NULL; 330 331 return fence_ptr; 332 } 333 334 static int set_out_fence_for_connector(struct drm_atomic_state *state, 335 struct drm_connector *connector, 336 s32 __user *fence_ptr) 337 { 338 unsigned int index = drm_connector_index(connector); 339 340 if (!fence_ptr) 341 return 0; 342 343 if (put_user(-1, fence_ptr)) 344 return -EFAULT; 345 346 state->connectors[index].out_fence_ptr = fence_ptr; 347 348 return 0; 349 } 350 351 static s32 __user *get_out_fence_for_connector(struct drm_atomic_state *state, 352 struct drm_connector *connector) 353 { 354 unsigned int index = drm_connector_index(connector); 355 s32 __user *fence_ptr; 356 357 fence_ptr = state->connectors[index].out_fence_ptr; 358 state->connectors[index].out_fence_ptr = NULL; 359 360 return fence_ptr; 361 } 362 363 /** 364 * drm_atomic_set_mode_for_crtc - set mode for CRTC 365 * @state: the CRTC whose incoming state to update 366 * @mode: kernel-internal mode to use for the CRTC, or NULL to disable 367 * 368 * Set a mode (originating from the kernel) on the desired CRTC state and update 369 * the enable property. 370 * 371 * RETURNS: 372 * Zero on success, error code on failure. Cannot return -EDEADLK. 373 */ 374 int drm_atomic_set_mode_for_crtc(struct drm_crtc_state *state, 375 const struct drm_display_mode *mode) 376 { 377 struct drm_crtc *crtc = state->crtc; 378 struct drm_mode_modeinfo umode; 379 380 /* Early return for no change. */ 381 if (mode && memcmp(&state->mode, mode, sizeof(*mode)) == 0) 382 return 0; 383 384 drm_property_blob_put(state->mode_blob); 385 state->mode_blob = NULL; 386 387 if (mode) { 388 drm_mode_convert_to_umode(&umode, mode); 389 state->mode_blob = 390 drm_property_create_blob(state->crtc->dev, 391 sizeof(umode), 392 &umode); 393 if (IS_ERR(state->mode_blob)) 394 return PTR_ERR(state->mode_blob); 395 396 drm_mode_copy(&state->mode, mode); 397 state->enable = true; 398 DRM_DEBUG_ATOMIC("Set [MODE:%s] for [CRTC:%d:%s] state %p\n", 399 mode->name, crtc->base.id, crtc->name, state); 400 } else { 401 memset(&state->mode, 0, sizeof(state->mode)); 402 state->enable = false; 403 DRM_DEBUG_ATOMIC("Set [NOMODE] for [CRTC:%d:%s] state %p\n", 404 crtc->base.id, crtc->name, state); 405 } 406 407 return 0; 408 } 409 EXPORT_SYMBOL(drm_atomic_set_mode_for_crtc); 410 411 /** 412 * drm_atomic_set_mode_prop_for_crtc - set mode for CRTC 413 * @state: the CRTC whose incoming state to update 414 * @blob: pointer to blob property to use for mode 415 * 416 * Set a mode (originating from a blob property) on the desired CRTC state. 417 * This function will take a reference on the blob property for the CRTC state, 418 * and release the reference held on the state's existing mode property, if any 419 * was set. 420 * 421 * RETURNS: 422 * Zero on success, error code on failure. Cannot return -EDEADLK. 423 */ 424 int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state, 425 struct drm_property_blob *blob) 426 { 427 struct drm_crtc *crtc = state->crtc; 428 429 if (blob == state->mode_blob) 430 return 0; 431 432 drm_property_blob_put(state->mode_blob); 433 state->mode_blob = NULL; 434 435 memset(&state->mode, 0, sizeof(state->mode)); 436 437 if (blob) { 438 int ret; 439 440 if (blob->length != sizeof(struct drm_mode_modeinfo)) { 441 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] bad mode blob length: %zu\n", 442 crtc->base.id, crtc->name, 443 blob->length); 444 return -EINVAL; 445 } 446 447 ret = drm_mode_convert_umode(crtc->dev, 448 &state->mode, blob->data); 449 if (ret) { 450 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] invalid mode (ret=%d, status=%s):\n", 451 crtc->base.id, crtc->name, 452 ret, drm_get_mode_status_name(state->mode.status)); 453 drm_mode_debug_printmodeline(&state->mode); 454 return -EINVAL; 455 } 456 457 state->mode_blob = drm_property_blob_get(blob); 458 state->enable = true; 459 DRM_DEBUG_ATOMIC("Set [MODE:%s] for [CRTC:%d:%s] state %p\n", 460 state->mode.name, crtc->base.id, crtc->name, 461 state); 462 } else { 463 state->enable = false; 464 DRM_DEBUG_ATOMIC("Set [NOMODE] for [CRTC:%d:%s] state %p\n", 465 crtc->base.id, crtc->name, state); 466 } 467 468 return 0; 469 } 470 EXPORT_SYMBOL(drm_atomic_set_mode_prop_for_crtc); 471 472 /** 473 * drm_atomic_replace_property_blob_from_id - lookup the new blob and replace the old one with it 474 * @dev: DRM device 475 * @blob: a pointer to the member blob to be replaced 476 * @blob_id: ID of the new blob 477 * @expected_size: total expected size of the blob data (in bytes) 478 * @expected_elem_size: expected element size of the blob data (in bytes) 479 * @replaced: did the blob get replaced? 480 * 481 * Replace @blob with another blob with the ID @blob_id. If @blob_id is zero 482 * @blob becomes NULL. 483 * 484 * If @expected_size is positive the new blob length is expected to be equal 485 * to @expected_size bytes. If @expected_elem_size is positive the new blob 486 * length is expected to be a multiple of @expected_elem_size bytes. Otherwise 487 * an error is returned. 488 * 489 * @replaced will indicate to the caller whether the blob was replaced or not. 490 * If the old and new blobs were in fact the same blob @replaced will be false 491 * otherwise it will be true. 492 * 493 * RETURNS: 494 * Zero on success, error code on failure. 495 */ 496 static int 497 drm_atomic_replace_property_blob_from_id(struct drm_device *dev, 498 struct drm_property_blob **blob, 499 uint64_t blob_id, 500 ssize_t expected_size, 501 ssize_t expected_elem_size, 502 bool *replaced) 503 { 504 struct drm_property_blob *new_blob = NULL; 505 506 if (blob_id != 0) { 507 new_blob = drm_property_lookup_blob(dev, blob_id); 508 if (new_blob == NULL) 509 return -EINVAL; 510 511 if (expected_size > 0 && 512 new_blob->length != expected_size) { 513 drm_property_blob_put(new_blob); 514 return -EINVAL; 515 } 516 if (expected_elem_size > 0 && 517 new_blob->length % expected_elem_size != 0) { 518 drm_property_blob_put(new_blob); 519 return -EINVAL; 520 } 521 } 522 523 *replaced |= drm_property_replace_blob(blob, new_blob); 524 drm_property_blob_put(new_blob); 525 526 return 0; 527 } 528 529 /** 530 * drm_atomic_crtc_set_property - set property on CRTC 531 * @crtc: the drm CRTC to set a property on 532 * @state: the state object to update with the new property value 533 * @property: the property to set 534 * @val: the new property value 535 * 536 * This function handles generic/core properties and calls out to driver's 537 * &drm_crtc_funcs.atomic_set_property for driver properties. To ensure 538 * consistent behavior you must call this function rather than the driver hook 539 * directly. 540 * 541 * RETURNS: 542 * Zero on success, error code on failure 543 */ 544 int drm_atomic_crtc_set_property(struct drm_crtc *crtc, 545 struct drm_crtc_state *state, struct drm_property *property, 546 uint64_t val) 547 { 548 struct drm_device *dev = crtc->dev; 549 struct drm_mode_config *config = &dev->mode_config; 550 bool replaced = false; 551 int ret; 552 553 if (property == config->prop_active) 554 state->active = val; 555 else if (property == config->prop_mode_id) { 556 struct drm_property_blob *mode = 557 drm_property_lookup_blob(dev, val); 558 ret = drm_atomic_set_mode_prop_for_crtc(state, mode); 559 drm_property_blob_put(mode); 560 return ret; 561 } else if (property == config->degamma_lut_property) { 562 ret = drm_atomic_replace_property_blob_from_id(dev, 563 &state->degamma_lut, 564 val, 565 -1, sizeof(struct drm_color_lut), 566 &replaced); 567 state->color_mgmt_changed |= replaced; 568 return ret; 569 } else if (property == config->ctm_property) { 570 ret = drm_atomic_replace_property_blob_from_id(dev, 571 &state->ctm, 572 val, 573 sizeof(struct drm_color_ctm), -1, 574 &replaced); 575 state->color_mgmt_changed |= replaced; 576 return ret; 577 } else if (property == config->gamma_lut_property) { 578 ret = drm_atomic_replace_property_blob_from_id(dev, 579 &state->gamma_lut, 580 val, 581 -1, sizeof(struct drm_color_lut), 582 &replaced); 583 state->color_mgmt_changed |= replaced; 584 return ret; 585 } else if (property == config->prop_out_fence_ptr) { 586 s32 __user *fence_ptr = u64_to_user_ptr(val); 587 588 if (!fence_ptr) 589 return 0; 590 591 if (put_user(-1, fence_ptr)) 592 return -EFAULT; 593 594 set_out_fence_for_crtc(state->state, crtc, fence_ptr); 595 } else if (crtc->funcs->atomic_set_property) { 596 return crtc->funcs->atomic_set_property(crtc, state, property, val); 597 } else { 598 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] unknown property [PROP:%d:%s]]\n", 599 crtc->base.id, crtc->name, 600 property->base.id, property->name); 601 return -EINVAL; 602 } 603 604 return 0; 605 } 606 EXPORT_SYMBOL(drm_atomic_crtc_set_property); 607 608 /** 609 * drm_atomic_crtc_get_property - get property value from CRTC state 610 * @crtc: the drm CRTC to set a property on 611 * @state: the state object to get the property value from 612 * @property: the property to set 613 * @val: return location for the property value 614 * 615 * This function handles generic/core properties and calls out to driver's 616 * &drm_crtc_funcs.atomic_get_property for driver properties. To ensure 617 * consistent behavior you must call this function rather than the driver hook 618 * directly. 619 * 620 * RETURNS: 621 * Zero on success, error code on failure 622 */ 623 static int 624 drm_atomic_crtc_get_property(struct drm_crtc *crtc, 625 const struct drm_crtc_state *state, 626 struct drm_property *property, uint64_t *val) 627 { 628 struct drm_device *dev = crtc->dev; 629 struct drm_mode_config *config = &dev->mode_config; 630 631 if (property == config->prop_active) 632 *val = state->active; 633 else if (property == config->prop_mode_id) 634 *val = (state->mode_blob) ? state->mode_blob->base.id : 0; 635 else if (property == config->degamma_lut_property) 636 *val = (state->degamma_lut) ? state->degamma_lut->base.id : 0; 637 else if (property == config->ctm_property) 638 *val = (state->ctm) ? state->ctm->base.id : 0; 639 else if (property == config->gamma_lut_property) 640 *val = (state->gamma_lut) ? state->gamma_lut->base.id : 0; 641 else if (property == config->prop_out_fence_ptr) 642 *val = 0; 643 else if (crtc->funcs->atomic_get_property) 644 return crtc->funcs->atomic_get_property(crtc, state, property, val); 645 else 646 return -EINVAL; 647 648 return 0; 649 } 650 651 /** 652 * drm_atomic_crtc_check - check crtc state 653 * @crtc: crtc to check 654 * @state: crtc state to check 655 * 656 * Provides core sanity checks for crtc state. 657 * 658 * RETURNS: 659 * Zero on success, error code on failure 660 */ 661 static int drm_atomic_crtc_check(struct drm_crtc *crtc, 662 struct drm_crtc_state *state) 663 { 664 /* NOTE: we explicitly don't enforce constraints such as primary 665 * layer covering entire screen, since that is something we want 666 * to allow (on hw that supports it). For hw that does not, it 667 * should be checked in driver's crtc->atomic_check() vfunc. 668 * 669 * TODO: Add generic modeset state checks once we support those. 670 */ 671 672 if (state->active && !state->enable) { 673 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] active without enabled\n", 674 crtc->base.id, crtc->name); 675 return -EINVAL; 676 } 677 678 /* The state->enable vs. state->mode_blob checks can be WARN_ON, 679 * as this is a kernel-internal detail that userspace should never 680 * be able to trigger. */ 681 if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) && 682 WARN_ON(state->enable && !state->mode_blob)) { 683 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] enabled without mode blob\n", 684 crtc->base.id, crtc->name); 685 return -EINVAL; 686 } 687 688 if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) && 689 WARN_ON(!state->enable && state->mode_blob)) { 690 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] disabled with mode blob\n", 691 crtc->base.id, crtc->name); 692 return -EINVAL; 693 } 694 695 /* 696 * Reject event generation for when a CRTC is off and stays off. 697 * It wouldn't be hard to implement this, but userspace has a track 698 * record of happily burning through 100% cpu (or worse, crash) when the 699 * display pipe is suspended. To avoid all that fun just reject updates 700 * that ask for events since likely that indicates a bug in the 701 * compositor's drawing loop. This is consistent with the vblank IOCTL 702 * and legacy page_flip IOCTL which also reject service on a disabled 703 * pipe. 704 */ 705 if (state->event && !state->active && !crtc->state->active) { 706 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] requesting event but off\n", 707 crtc->base.id, crtc->name); 708 return -EINVAL; 709 } 710 711 return 0; 712 } 713 714 static void drm_atomic_crtc_print_state(struct drm_printer *p, 715 const struct drm_crtc_state *state) 716 { 717 struct drm_crtc *crtc = state->crtc; 718 719 drm_printf(p, "crtc[%u]: %s\n", crtc->base.id, crtc->name); 720 drm_printf(p, "\tenable=%d\n", state->enable); 721 drm_printf(p, "\tactive=%d\n", state->active); 722 drm_printf(p, "\tplanes_changed=%d\n", state->planes_changed); 723 drm_printf(p, "\tmode_changed=%d\n", state->mode_changed); 724 drm_printf(p, "\tactive_changed=%d\n", state->active_changed); 725 drm_printf(p, "\tconnectors_changed=%d\n", state->connectors_changed); 726 drm_printf(p, "\tcolor_mgmt_changed=%d\n", state->color_mgmt_changed); 727 drm_printf(p, "\tplane_mask=%x\n", state->plane_mask); 728 drm_printf(p, "\tconnector_mask=%x\n", state->connector_mask); 729 drm_printf(p, "\tencoder_mask=%x\n", state->encoder_mask); 730 drm_printf(p, "\tmode: " DRM_MODE_FMT "\n", DRM_MODE_ARG(&state->mode)); 731 732 if (crtc->funcs->atomic_print_state) 733 crtc->funcs->atomic_print_state(p, state); 734 } 735 736 /** 737 * drm_atomic_connector_check - check connector state 738 * @connector: connector to check 739 * @state: connector state to check 740 * 741 * Provides core sanity checks for connector state. 742 * 743 * RETURNS: 744 * Zero on success, error code on failure 745 */ 746 static int drm_atomic_connector_check(struct drm_connector *connector, 747 struct drm_connector_state *state) 748 { 749 struct drm_crtc_state *crtc_state; 750 struct drm_writeback_job *writeback_job = state->writeback_job; 751 752 if ((connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) || !writeback_job) 753 return 0; 754 755 if (writeback_job->fb && !state->crtc) { 756 DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] framebuffer without CRTC\n", 757 connector->base.id, connector->name); 758 return -EINVAL; 759 } 760 761 if (state->crtc) 762 crtc_state = drm_atomic_get_existing_crtc_state(state->state, 763 state->crtc); 764 765 if (writeback_job->fb && !crtc_state->active) { 766 DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] has framebuffer, but [CRTC:%d] is off\n", 767 connector->base.id, connector->name, 768 state->crtc->base.id); 769 return -EINVAL; 770 } 771 772 if (writeback_job->out_fence && !writeback_job->fb) { 773 DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] requesting out-fence without framebuffer\n", 774 connector->base.id, connector->name); 775 return -EINVAL; 776 } 777 778 return 0; 779 } 780 781 /** 782 * drm_atomic_get_plane_state - get plane state 783 * @state: global atomic state object 784 * @plane: plane to get state object for 785 * 786 * This function returns the plane state for the given plane, allocating it if 787 * needed. It will also grab the relevant plane lock to make sure that the state 788 * is consistent. 789 * 790 * Returns: 791 * 792 * Either the allocated state or the error code encoded into the pointer. When 793 * the error is EDEADLK then the w/w mutex code has detected a deadlock and the 794 * entire atomic sequence must be restarted. All other errors are fatal. 795 */ 796 struct drm_plane_state * 797 drm_atomic_get_plane_state(struct drm_atomic_state *state, 798 struct drm_plane *plane) 799 { 800 int ret, index = drm_plane_index(plane); 801 struct drm_plane_state *plane_state; 802 803 WARN_ON(!state->acquire_ctx); 804 805 /* the legacy pointers should never be set */ 806 WARN_ON(plane->fb); 807 WARN_ON(plane->old_fb); 808 WARN_ON(plane->crtc); 809 810 plane_state = drm_atomic_get_existing_plane_state(state, plane); 811 if (plane_state) 812 return plane_state; 813 814 ret = drm_modeset_lock(&plane->mutex, state->acquire_ctx); 815 if (ret) 816 return ERR_PTR(ret); 817 818 plane_state = plane->funcs->atomic_duplicate_state(plane); 819 if (!plane_state) 820 return ERR_PTR(-ENOMEM); 821 822 state->planes[index].state = plane_state; 823 state->planes[index].ptr = plane; 824 state->planes[index].old_state = plane->state; 825 state->planes[index].new_state = plane_state; 826 plane_state->state = state; 827 828 DRM_DEBUG_ATOMIC("Added [PLANE:%d:%s] %p state to %p\n", 829 plane->base.id, plane->name, plane_state, state); 830 831 if (plane_state->crtc) { 832 struct drm_crtc_state *crtc_state; 833 834 crtc_state = drm_atomic_get_crtc_state(state, 835 plane_state->crtc); 836 if (IS_ERR(crtc_state)) 837 return ERR_CAST(crtc_state); 838 } 839 840 return plane_state; 841 } 842 EXPORT_SYMBOL(drm_atomic_get_plane_state); 843 844 /** 845 * drm_atomic_plane_set_property - set property on plane 846 * @plane: the drm plane to set a property on 847 * @state: the state object to update with the new property value 848 * @property: the property to set 849 * @val: the new property value 850 * 851 * This function handles generic/core properties and calls out to driver's 852 * &drm_plane_funcs.atomic_set_property for driver properties. To ensure 853 * consistent behavior you must call this function rather than the driver hook 854 * directly. 855 * 856 * RETURNS: 857 * Zero on success, error code on failure 858 */ 859 static int drm_atomic_plane_set_property(struct drm_plane *plane, 860 struct drm_plane_state *state, struct drm_property *property, 861 uint64_t val) 862 { 863 struct drm_device *dev = plane->dev; 864 struct drm_mode_config *config = &dev->mode_config; 865 866 if (property == config->prop_fb_id) { 867 struct drm_framebuffer *fb = drm_framebuffer_lookup(dev, NULL, val); 868 drm_atomic_set_fb_for_plane(state, fb); 869 if (fb) 870 drm_framebuffer_put(fb); 871 } else if (property == config->prop_in_fence_fd) { 872 if (state->fence) 873 return -EINVAL; 874 875 if (U642I64(val) == -1) 876 return 0; 877 878 state->fence = sync_file_get_fence(val); 879 if (!state->fence) 880 return -EINVAL; 881 882 } else if (property == config->prop_crtc_id) { 883 struct drm_crtc *crtc = drm_crtc_find(dev, NULL, val); 884 return drm_atomic_set_crtc_for_plane(state, crtc); 885 } else if (property == config->prop_crtc_x) { 886 state->crtc_x = U642I64(val); 887 } else if (property == config->prop_crtc_y) { 888 state->crtc_y = U642I64(val); 889 } else if (property == config->prop_crtc_w) { 890 state->crtc_w = val; 891 } else if (property == config->prop_crtc_h) { 892 state->crtc_h = val; 893 } else if (property == config->prop_src_x) { 894 state->src_x = val; 895 } else if (property == config->prop_src_y) { 896 state->src_y = val; 897 } else if (property == config->prop_src_w) { 898 state->src_w = val; 899 } else if (property == config->prop_src_h) { 900 state->src_h = val; 901 } else if (property == plane->alpha_property) { 902 state->alpha = val; 903 } else if (property == plane->rotation_property) { 904 if (!is_power_of_2(val & DRM_MODE_ROTATE_MASK)) { 905 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] bad rotation bitmask: 0x%llx\n", 906 plane->base.id, plane->name, val); 907 return -EINVAL; 908 } 909 state->rotation = val; 910 } else if (property == plane->zpos_property) { 911 state->zpos = val; 912 } else if (property == plane->color_encoding_property) { 913 state->color_encoding = val; 914 } else if (property == plane->color_range_property) { 915 state->color_range = val; 916 } else if (plane->funcs->atomic_set_property) { 917 return plane->funcs->atomic_set_property(plane, state, 918 property, val); 919 } else { 920 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] unknown property [PROP:%d:%s]]\n", 921 plane->base.id, plane->name, 922 property->base.id, property->name); 923 return -EINVAL; 924 } 925 926 return 0; 927 } 928 929 /** 930 * drm_atomic_plane_get_property - get property value from plane state 931 * @plane: the drm plane to set a property on 932 * @state: the state object to get the property value from 933 * @property: the property to set 934 * @val: return location for the property value 935 * 936 * This function handles generic/core properties and calls out to driver's 937 * &drm_plane_funcs.atomic_get_property for driver properties. To ensure 938 * consistent behavior you must call this function rather than the driver hook 939 * directly. 940 * 941 * RETURNS: 942 * Zero on success, error code on failure 943 */ 944 static int 945 drm_atomic_plane_get_property(struct drm_plane *plane, 946 const struct drm_plane_state *state, 947 struct drm_property *property, uint64_t *val) 948 { 949 struct drm_device *dev = plane->dev; 950 struct drm_mode_config *config = &dev->mode_config; 951 952 if (property == config->prop_fb_id) { 953 *val = (state->fb) ? state->fb->base.id : 0; 954 } else if (property == config->prop_in_fence_fd) { 955 *val = -1; 956 } else if (property == config->prop_crtc_id) { 957 *val = (state->crtc) ? state->crtc->base.id : 0; 958 } else if (property == config->prop_crtc_x) { 959 *val = I642U64(state->crtc_x); 960 } else if (property == config->prop_crtc_y) { 961 *val = I642U64(state->crtc_y); 962 } else if (property == config->prop_crtc_w) { 963 *val = state->crtc_w; 964 } else if (property == config->prop_crtc_h) { 965 *val = state->crtc_h; 966 } else if (property == config->prop_src_x) { 967 *val = state->src_x; 968 } else if (property == config->prop_src_y) { 969 *val = state->src_y; 970 } else if (property == config->prop_src_w) { 971 *val = state->src_w; 972 } else if (property == config->prop_src_h) { 973 *val = state->src_h; 974 } else if (property == plane->alpha_property) { 975 *val = state->alpha; 976 } else if (property == plane->rotation_property) { 977 *val = state->rotation; 978 } else if (property == plane->zpos_property) { 979 *val = state->zpos; 980 } else if (property == plane->color_encoding_property) { 981 *val = state->color_encoding; 982 } else if (property == plane->color_range_property) { 983 *val = state->color_range; 984 } else if (plane->funcs->atomic_get_property) { 985 return plane->funcs->atomic_get_property(plane, state, property, val); 986 } else { 987 return -EINVAL; 988 } 989 990 return 0; 991 } 992 993 static bool 994 plane_switching_crtc(struct drm_atomic_state *state, 995 struct drm_plane *plane, 996 struct drm_plane_state *plane_state) 997 { 998 if (!plane->state->crtc || !plane_state->crtc) 999 return false; 1000 1001 if (plane->state->crtc == plane_state->crtc) 1002 return false; 1003 1004 /* This could be refined, but currently there's no helper or driver code 1005 * to implement direct switching of active planes nor userspace to take 1006 * advantage of more direct plane switching without the intermediate 1007 * full OFF state. 1008 */ 1009 return true; 1010 } 1011 1012 /** 1013 * drm_atomic_plane_check - check plane state 1014 * @plane: plane to check 1015 * @state: plane state to check 1016 * 1017 * Provides core sanity checks for plane state. 1018 * 1019 * RETURNS: 1020 * Zero on success, error code on failure 1021 */ 1022 static int drm_atomic_plane_check(struct drm_plane *plane, 1023 struct drm_plane_state *state) 1024 { 1025 unsigned int fb_width, fb_height; 1026 int ret; 1027 1028 /* either *both* CRTC and FB must be set, or neither */ 1029 if (state->crtc && !state->fb) { 1030 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] CRTC set but no FB\n", 1031 plane->base.id, plane->name); 1032 return -EINVAL; 1033 } else if (state->fb && !state->crtc) { 1034 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] FB set but no CRTC\n", 1035 plane->base.id, plane->name); 1036 return -EINVAL; 1037 } 1038 1039 /* if disabled, we don't care about the rest of the state: */ 1040 if (!state->crtc) 1041 return 0; 1042 1043 /* Check whether this plane is usable on this CRTC */ 1044 if (!(plane->possible_crtcs & drm_crtc_mask(state->crtc))) { 1045 DRM_DEBUG_ATOMIC("Invalid [CRTC:%d:%s] for [PLANE:%d:%s]\n", 1046 state->crtc->base.id, state->crtc->name, 1047 plane->base.id, plane->name); 1048 return -EINVAL; 1049 } 1050 1051 /* Check whether this plane supports the fb pixel format. */ 1052 ret = drm_plane_check_pixel_format(plane, state->fb->format->format, 1053 state->fb->modifier); 1054 if (ret) { 1055 struct drm_format_name_buf format_name; 1056 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid pixel format %s, modifier 0x%llx\n", 1057 plane->base.id, plane->name, 1058 drm_get_format_name(state->fb->format->format, 1059 &format_name), 1060 state->fb->modifier); 1061 return ret; 1062 } 1063 1064 /* Give drivers some help against integer overflows */ 1065 if (state->crtc_w > INT_MAX || 1066 state->crtc_x > INT_MAX - (int32_t) state->crtc_w || 1067 state->crtc_h > INT_MAX || 1068 state->crtc_y > INT_MAX - (int32_t) state->crtc_h) { 1069 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid CRTC coordinates %ux%u+%d+%d\n", 1070 plane->base.id, plane->name, 1071 state->crtc_w, state->crtc_h, 1072 state->crtc_x, state->crtc_y); 1073 return -ERANGE; 1074 } 1075 1076 fb_width = state->fb->width << 16; 1077 fb_height = state->fb->height << 16; 1078 1079 /* Make sure source coordinates are inside the fb. */ 1080 if (state->src_w > fb_width || 1081 state->src_x > fb_width - state->src_w || 1082 state->src_h > fb_height || 1083 state->src_y > fb_height - state->src_h) { 1084 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid source coordinates " 1085 "%u.%06ux%u.%06u+%u.%06u+%u.%06u (fb %ux%u)\n", 1086 plane->base.id, plane->name, 1087 state->src_w >> 16, ((state->src_w & 0xffff) * 15625) >> 10, 1088 state->src_h >> 16, ((state->src_h & 0xffff) * 15625) >> 10, 1089 state->src_x >> 16, ((state->src_x & 0xffff) * 15625) >> 10, 1090 state->src_y >> 16, ((state->src_y & 0xffff) * 15625) >> 10, 1091 state->fb->width, state->fb->height); 1092 return -ENOSPC; 1093 } 1094 1095 if (plane_switching_crtc(state->state, plane, state)) { 1096 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] switching CRTC directly\n", 1097 plane->base.id, plane->name); 1098 return -EINVAL; 1099 } 1100 1101 return 0; 1102 } 1103 1104 static void drm_atomic_plane_print_state(struct drm_printer *p, 1105 const struct drm_plane_state *state) 1106 { 1107 struct drm_plane *plane = state->plane; 1108 struct drm_rect src = drm_plane_state_src(state); 1109 struct drm_rect dest = drm_plane_state_dest(state); 1110 1111 drm_printf(p, "plane[%u]: %s\n", plane->base.id, plane->name); 1112 drm_printf(p, "\tcrtc=%s\n", state->crtc ? state->crtc->name : "(null)"); 1113 drm_printf(p, "\tfb=%u\n", state->fb ? state->fb->base.id : 0); 1114 if (state->fb) 1115 drm_framebuffer_print_info(p, 2, state->fb); 1116 drm_printf(p, "\tcrtc-pos=" DRM_RECT_FMT "\n", DRM_RECT_ARG(&dest)); 1117 drm_printf(p, "\tsrc-pos=" DRM_RECT_FP_FMT "\n", DRM_RECT_FP_ARG(&src)); 1118 drm_printf(p, "\trotation=%x\n", state->rotation); 1119 drm_printf(p, "\tnormalized-zpos=%x\n", state->normalized_zpos); 1120 drm_printf(p, "\tcolor-encoding=%s\n", 1121 drm_get_color_encoding_name(state->color_encoding)); 1122 drm_printf(p, "\tcolor-range=%s\n", 1123 drm_get_color_range_name(state->color_range)); 1124 1125 if (plane->funcs->atomic_print_state) 1126 plane->funcs->atomic_print_state(p, state); 1127 } 1128 1129 /** 1130 * DOC: handling driver private state 1131 * 1132 * Very often the DRM objects exposed to userspace in the atomic modeset api 1133 * (&drm_connector, &drm_crtc and &drm_plane) do not map neatly to the 1134 * underlying hardware. Especially for any kind of shared resources (e.g. shared 1135 * clocks, scaler units, bandwidth and fifo limits shared among a group of 1136 * planes or CRTCs, and so on) it makes sense to model these as independent 1137 * objects. Drivers then need to do similar state tracking and commit ordering for 1138 * such private (since not exposed to userpace) objects as the atomic core and 1139 * helpers already provide for connectors, planes and CRTCs. 1140 * 1141 * To make this easier on drivers the atomic core provides some support to track 1142 * driver private state objects using struct &drm_private_obj, with the 1143 * associated state struct &drm_private_state. 1144 * 1145 * Similar to userspace-exposed objects, private state structures can be 1146 * acquired by calling drm_atomic_get_private_obj_state(). Since this function 1147 * does not take care of locking, drivers should wrap it for each type of 1148 * private state object they have with the required call to drm_modeset_lock() 1149 * for the corresponding &drm_modeset_lock. 1150 * 1151 * All private state structures contained in a &drm_atomic_state update can be 1152 * iterated using for_each_oldnew_private_obj_in_state(), 1153 * for_each_new_private_obj_in_state() and for_each_old_private_obj_in_state(). 1154 * Drivers are recommended to wrap these for each type of driver private state 1155 * object they have, filtering on &drm_private_obj.funcs using for_each_if(), at 1156 * least if they want to iterate over all objects of a given type. 1157 * 1158 * An earlier way to handle driver private state was by subclassing struct 1159 * &drm_atomic_state. But since that encourages non-standard ways to implement 1160 * the check/commit split atomic requires (by using e.g. "check and rollback or 1161 * commit instead" of "duplicate state, check, then either commit or release 1162 * duplicated state) it is deprecated in favour of using &drm_private_state. 1163 */ 1164 1165 /** 1166 * drm_atomic_private_obj_init - initialize private object 1167 * @obj: private object 1168 * @state: initial private object state 1169 * @funcs: pointer to the struct of function pointers that identify the object 1170 * type 1171 * 1172 * Initialize the private object, which can be embedded into any 1173 * driver private object that needs its own atomic state. 1174 */ 1175 void 1176 drm_atomic_private_obj_init(struct drm_private_obj *obj, 1177 struct drm_private_state *state, 1178 const struct drm_private_state_funcs *funcs) 1179 { 1180 memset(obj, 0, sizeof(*obj)); 1181 1182 obj->state = state; 1183 obj->funcs = funcs; 1184 } 1185 EXPORT_SYMBOL(drm_atomic_private_obj_init); 1186 1187 /** 1188 * drm_atomic_private_obj_fini - finalize private object 1189 * @obj: private object 1190 * 1191 * Finalize the private object. 1192 */ 1193 void 1194 drm_atomic_private_obj_fini(struct drm_private_obj *obj) 1195 { 1196 obj->funcs->atomic_destroy_state(obj, obj->state); 1197 } 1198 EXPORT_SYMBOL(drm_atomic_private_obj_fini); 1199 1200 /** 1201 * drm_atomic_get_private_obj_state - get private object state 1202 * @state: global atomic state 1203 * @obj: private object to get the state for 1204 * 1205 * This function returns the private object state for the given private object, 1206 * allocating the state if needed. It does not grab any locks as the caller is 1207 * expected to care of any required locking. 1208 * 1209 * RETURNS: 1210 * 1211 * Either the allocated state or the error code encoded into a pointer. 1212 */ 1213 struct drm_private_state * 1214 drm_atomic_get_private_obj_state(struct drm_atomic_state *state, 1215 struct drm_private_obj *obj) 1216 { 1217 int index, num_objs, i; 1218 size_t size; 1219 struct __drm_private_objs_state *arr; 1220 struct drm_private_state *obj_state; 1221 1222 for (i = 0; i < state->num_private_objs; i++) 1223 if (obj == state->private_objs[i].ptr) 1224 return state->private_objs[i].state; 1225 1226 num_objs = state->num_private_objs + 1; 1227 size = sizeof(*state->private_objs) * num_objs; 1228 arr = krealloc(state->private_objs, size, GFP_KERNEL); 1229 if (!arr) 1230 return ERR_PTR(-ENOMEM); 1231 1232 state->private_objs = arr; 1233 index = state->num_private_objs; 1234 memset(&state->private_objs[index], 0, sizeof(*state->private_objs)); 1235 1236 obj_state = obj->funcs->atomic_duplicate_state(obj); 1237 if (!obj_state) 1238 return ERR_PTR(-ENOMEM); 1239 1240 state->private_objs[index].state = obj_state; 1241 state->private_objs[index].old_state = obj->state; 1242 state->private_objs[index].new_state = obj_state; 1243 state->private_objs[index].ptr = obj; 1244 obj_state->state = state; 1245 1246 state->num_private_objs = num_objs; 1247 1248 DRM_DEBUG_ATOMIC("Added new private object %p state %p to %p\n", 1249 obj, obj_state, state); 1250 1251 return obj_state; 1252 } 1253 EXPORT_SYMBOL(drm_atomic_get_private_obj_state); 1254 1255 /** 1256 * drm_atomic_get_connector_state - get connector state 1257 * @state: global atomic state object 1258 * @connector: connector to get state object for 1259 * 1260 * This function returns the connector state for the given connector, 1261 * allocating it if needed. It will also grab the relevant connector lock to 1262 * make sure that the state is consistent. 1263 * 1264 * Returns: 1265 * 1266 * Either the allocated state or the error code encoded into the pointer. When 1267 * the error is EDEADLK then the w/w mutex code has detected a deadlock and the 1268 * entire atomic sequence must be restarted. All other errors are fatal. 1269 */ 1270 struct drm_connector_state * 1271 drm_atomic_get_connector_state(struct drm_atomic_state *state, 1272 struct drm_connector *connector) 1273 { 1274 int ret, index; 1275 struct drm_mode_config *config = &connector->dev->mode_config; 1276 struct drm_connector_state *connector_state; 1277 1278 WARN_ON(!state->acquire_ctx); 1279 1280 ret = drm_modeset_lock(&config->connection_mutex, state->acquire_ctx); 1281 if (ret) 1282 return ERR_PTR(ret); 1283 1284 index = drm_connector_index(connector); 1285 1286 if (index >= state->num_connector) { 1287 struct __drm_connnectors_state *c; 1288 int alloc = max(index + 1, config->num_connector); 1289 1290 c = krealloc(state->connectors, alloc * sizeof(*state->connectors), GFP_KERNEL); 1291 if (!c) 1292 return ERR_PTR(-ENOMEM); 1293 1294 state->connectors = c; 1295 memset(&state->connectors[state->num_connector], 0, 1296 sizeof(*state->connectors) * (alloc - state->num_connector)); 1297 1298 state->num_connector = alloc; 1299 } 1300 1301 if (state->connectors[index].state) 1302 return state->connectors[index].state; 1303 1304 connector_state = connector->funcs->atomic_duplicate_state(connector); 1305 if (!connector_state) 1306 return ERR_PTR(-ENOMEM); 1307 1308 drm_connector_get(connector); 1309 state->connectors[index].state = connector_state; 1310 state->connectors[index].old_state = connector->state; 1311 state->connectors[index].new_state = connector_state; 1312 state->connectors[index].ptr = connector; 1313 connector_state->state = state; 1314 1315 DRM_DEBUG_ATOMIC("Added [CONNECTOR:%d:%s] %p state to %p\n", 1316 connector->base.id, connector->name, 1317 connector_state, state); 1318 1319 if (connector_state->crtc) { 1320 struct drm_crtc_state *crtc_state; 1321 1322 crtc_state = drm_atomic_get_crtc_state(state, 1323 connector_state->crtc); 1324 if (IS_ERR(crtc_state)) 1325 return ERR_CAST(crtc_state); 1326 } 1327 1328 return connector_state; 1329 } 1330 EXPORT_SYMBOL(drm_atomic_get_connector_state); 1331 1332 /** 1333 * drm_atomic_connector_set_property - set property on connector. 1334 * @connector: the drm connector to set a property on 1335 * @state: the state object to update with the new property value 1336 * @property: the property to set 1337 * @val: the new property value 1338 * 1339 * This function handles generic/core properties and calls out to driver's 1340 * &drm_connector_funcs.atomic_set_property for driver properties. To ensure 1341 * consistent behavior you must call this function rather than the driver hook 1342 * directly. 1343 * 1344 * RETURNS: 1345 * Zero on success, error code on failure 1346 */ 1347 static int drm_atomic_connector_set_property(struct drm_connector *connector, 1348 struct drm_connector_state *state, struct drm_property *property, 1349 uint64_t val) 1350 { 1351 struct drm_device *dev = connector->dev; 1352 struct drm_mode_config *config = &dev->mode_config; 1353 1354 if (property == config->prop_crtc_id) { 1355 struct drm_crtc *crtc = drm_crtc_find(dev, NULL, val); 1356 return drm_atomic_set_crtc_for_connector(state, crtc); 1357 } else if (property == config->dpms_property) { 1358 /* setting DPMS property requires special handling, which 1359 * is done in legacy setprop path for us. Disallow (for 1360 * now?) atomic writes to DPMS property: 1361 */ 1362 return -EINVAL; 1363 } else if (property == config->tv_select_subconnector_property) { 1364 state->tv.subconnector = val; 1365 } else if (property == config->tv_left_margin_property) { 1366 state->tv.margins.left = val; 1367 } else if (property == config->tv_right_margin_property) { 1368 state->tv.margins.right = val; 1369 } else if (property == config->tv_top_margin_property) { 1370 state->tv.margins.top = val; 1371 } else if (property == config->tv_bottom_margin_property) { 1372 state->tv.margins.bottom = val; 1373 } else if (property == config->tv_mode_property) { 1374 state->tv.mode = val; 1375 } else if (property == config->tv_brightness_property) { 1376 state->tv.brightness = val; 1377 } else if (property == config->tv_contrast_property) { 1378 state->tv.contrast = val; 1379 } else if (property == config->tv_flicker_reduction_property) { 1380 state->tv.flicker_reduction = val; 1381 } else if (property == config->tv_overscan_property) { 1382 state->tv.overscan = val; 1383 } else if (property == config->tv_saturation_property) { 1384 state->tv.saturation = val; 1385 } else if (property == config->tv_hue_property) { 1386 state->tv.hue = val; 1387 } else if (property == config->link_status_property) { 1388 /* Never downgrade from GOOD to BAD on userspace's request here, 1389 * only hw issues can do that. 1390 * 1391 * For an atomic property the userspace doesn't need to be able 1392 * to understand all the properties, but needs to be able to 1393 * restore the state it wants on VT switch. So if the userspace 1394 * tries to change the link_status from GOOD to BAD, driver 1395 * silently rejects it and returns a 0. This prevents userspace 1396 * from accidently breaking the display when it restores the 1397 * state. 1398 */ 1399 if (state->link_status != DRM_LINK_STATUS_GOOD) 1400 state->link_status = val; 1401 } else if (property == config->aspect_ratio_property) { 1402 state->picture_aspect_ratio = val; 1403 } else if (property == config->content_type_property) { 1404 state->content_type = val; 1405 } else if (property == connector->scaling_mode_property) { 1406 state->scaling_mode = val; 1407 } else if (property == connector->content_protection_property) { 1408 if (val == DRM_MODE_CONTENT_PROTECTION_ENABLED) { 1409 DRM_DEBUG_KMS("only drivers can set CP Enabled\n"); 1410 return -EINVAL; 1411 } 1412 state->content_protection = val; 1413 } else if (property == config->writeback_fb_id_property) { 1414 struct drm_framebuffer *fb = drm_framebuffer_lookup(dev, NULL, val); 1415 int ret = drm_atomic_set_writeback_fb_for_connector(state, fb); 1416 if (fb) 1417 drm_framebuffer_put(fb); 1418 return ret; 1419 } else if (property == config->writeback_out_fence_ptr_property) { 1420 s32 __user *fence_ptr = u64_to_user_ptr(val); 1421 1422 return set_out_fence_for_connector(state->state, connector, 1423 fence_ptr); 1424 } else if (connector->funcs->atomic_set_property) { 1425 return connector->funcs->atomic_set_property(connector, 1426 state, property, val); 1427 } else { 1428 DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] unknown property [PROP:%d:%s]]\n", 1429 connector->base.id, connector->name, 1430 property->base.id, property->name); 1431 return -EINVAL; 1432 } 1433 1434 return 0; 1435 } 1436 1437 static void drm_atomic_connector_print_state(struct drm_printer *p, 1438 const struct drm_connector_state *state) 1439 { 1440 struct drm_connector *connector = state->connector; 1441 1442 drm_printf(p, "connector[%u]: %s\n", connector->base.id, connector->name); 1443 drm_printf(p, "\tcrtc=%s\n", state->crtc ? state->crtc->name : "(null)"); 1444 1445 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 1446 if (state->writeback_job && state->writeback_job->fb) 1447 drm_printf(p, "\tfb=%d\n", state->writeback_job->fb->base.id); 1448 1449 if (connector->funcs->atomic_print_state) 1450 connector->funcs->atomic_print_state(p, state); 1451 } 1452 1453 /** 1454 * drm_atomic_connector_get_property - get property value from connector state 1455 * @connector: the drm connector to set a property on 1456 * @state: the state object to get the property value from 1457 * @property: the property to set 1458 * @val: return location for the property value 1459 * 1460 * This function handles generic/core properties and calls out to driver's 1461 * &drm_connector_funcs.atomic_get_property for driver properties. To ensure 1462 * consistent behavior you must call this function rather than the driver hook 1463 * directly. 1464 * 1465 * RETURNS: 1466 * Zero on success, error code on failure 1467 */ 1468 static int 1469 drm_atomic_connector_get_property(struct drm_connector *connector, 1470 const struct drm_connector_state *state, 1471 struct drm_property *property, uint64_t *val) 1472 { 1473 struct drm_device *dev = connector->dev; 1474 struct drm_mode_config *config = &dev->mode_config; 1475 1476 if (property == config->prop_crtc_id) { 1477 *val = (state->crtc) ? state->crtc->base.id : 0; 1478 } else if (property == config->dpms_property) { 1479 *val = connector->dpms; 1480 } else if (property == config->tv_select_subconnector_property) { 1481 *val = state->tv.subconnector; 1482 } else if (property == config->tv_left_margin_property) { 1483 *val = state->tv.margins.left; 1484 } else if (property == config->tv_right_margin_property) { 1485 *val = state->tv.margins.right; 1486 } else if (property == config->tv_top_margin_property) { 1487 *val = state->tv.margins.top; 1488 } else if (property == config->tv_bottom_margin_property) { 1489 *val = state->tv.margins.bottom; 1490 } else if (property == config->tv_mode_property) { 1491 *val = state->tv.mode; 1492 } else if (property == config->tv_brightness_property) { 1493 *val = state->tv.brightness; 1494 } else if (property == config->tv_contrast_property) { 1495 *val = state->tv.contrast; 1496 } else if (property == config->tv_flicker_reduction_property) { 1497 *val = state->tv.flicker_reduction; 1498 } else if (property == config->tv_overscan_property) { 1499 *val = state->tv.overscan; 1500 } else if (property == config->tv_saturation_property) { 1501 *val = state->tv.saturation; 1502 } else if (property == config->tv_hue_property) { 1503 *val = state->tv.hue; 1504 } else if (property == config->link_status_property) { 1505 *val = state->link_status; 1506 } else if (property == config->aspect_ratio_property) { 1507 *val = state->picture_aspect_ratio; 1508 } else if (property == config->content_type_property) { 1509 *val = state->content_type; 1510 } else if (property == connector->scaling_mode_property) { 1511 *val = state->scaling_mode; 1512 } else if (property == connector->content_protection_property) { 1513 *val = state->content_protection; 1514 } else if (property == config->writeback_fb_id_property) { 1515 /* Writeback framebuffer is one-shot, write and forget */ 1516 *val = 0; 1517 } else if (property == config->writeback_out_fence_ptr_property) { 1518 *val = 0; 1519 } else if (connector->funcs->atomic_get_property) { 1520 return connector->funcs->atomic_get_property(connector, 1521 state, property, val); 1522 } else { 1523 return -EINVAL; 1524 } 1525 1526 return 0; 1527 } 1528 1529 int drm_atomic_get_property(struct drm_mode_object *obj, 1530 struct drm_property *property, uint64_t *val) 1531 { 1532 struct drm_device *dev = property->dev; 1533 int ret; 1534 1535 switch (obj->type) { 1536 case DRM_MODE_OBJECT_CONNECTOR: { 1537 struct drm_connector *connector = obj_to_connector(obj); 1538 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); 1539 ret = drm_atomic_connector_get_property(connector, 1540 connector->state, property, val); 1541 break; 1542 } 1543 case DRM_MODE_OBJECT_CRTC: { 1544 struct drm_crtc *crtc = obj_to_crtc(obj); 1545 WARN_ON(!drm_modeset_is_locked(&crtc->mutex)); 1546 ret = drm_atomic_crtc_get_property(crtc, 1547 crtc->state, property, val); 1548 break; 1549 } 1550 case DRM_MODE_OBJECT_PLANE: { 1551 struct drm_plane *plane = obj_to_plane(obj); 1552 WARN_ON(!drm_modeset_is_locked(&plane->mutex)); 1553 ret = drm_atomic_plane_get_property(plane, 1554 plane->state, property, val); 1555 break; 1556 } 1557 default: 1558 ret = -EINVAL; 1559 break; 1560 } 1561 1562 return ret; 1563 } 1564 1565 /** 1566 * drm_atomic_set_crtc_for_plane - set crtc for plane 1567 * @plane_state: the plane whose incoming state to update 1568 * @crtc: crtc to use for the plane 1569 * 1570 * Changing the assigned crtc for a plane requires us to grab the lock and state 1571 * for the new crtc, as needed. This function takes care of all these details 1572 * besides updating the pointer in the state object itself. 1573 * 1574 * Returns: 1575 * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK 1576 * then the w/w mutex code has detected a deadlock and the entire atomic 1577 * sequence must be restarted. All other errors are fatal. 1578 */ 1579 int 1580 drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state, 1581 struct drm_crtc *crtc) 1582 { 1583 struct drm_plane *plane = plane_state->plane; 1584 struct drm_crtc_state *crtc_state; 1585 /* Nothing to do for same crtc*/ 1586 if (plane_state->crtc == crtc) 1587 return 0; 1588 if (plane_state->crtc) { 1589 crtc_state = drm_atomic_get_crtc_state(plane_state->state, 1590 plane_state->crtc); 1591 if (WARN_ON(IS_ERR(crtc_state))) 1592 return PTR_ERR(crtc_state); 1593 1594 crtc_state->plane_mask &= ~drm_plane_mask(plane); 1595 } 1596 1597 plane_state->crtc = crtc; 1598 1599 if (crtc) { 1600 crtc_state = drm_atomic_get_crtc_state(plane_state->state, 1601 crtc); 1602 if (IS_ERR(crtc_state)) 1603 return PTR_ERR(crtc_state); 1604 crtc_state->plane_mask |= drm_plane_mask(plane); 1605 } 1606 1607 if (crtc) 1608 DRM_DEBUG_ATOMIC("Link [PLANE:%d:%s] state %p to [CRTC:%d:%s]\n", 1609 plane->base.id, plane->name, plane_state, 1610 crtc->base.id, crtc->name); 1611 else 1612 DRM_DEBUG_ATOMIC("Link [PLANE:%d:%s] state %p to [NOCRTC]\n", 1613 plane->base.id, plane->name, plane_state); 1614 1615 return 0; 1616 } 1617 EXPORT_SYMBOL(drm_atomic_set_crtc_for_plane); 1618 1619 /** 1620 * drm_atomic_set_fb_for_plane - set framebuffer for plane 1621 * @plane_state: atomic state object for the plane 1622 * @fb: fb to use for the plane 1623 * 1624 * Changing the assigned framebuffer for a plane requires us to grab a reference 1625 * to the new fb and drop the reference to the old fb, if there is one. This 1626 * function takes care of all these details besides updating the pointer in the 1627 * state object itself. 1628 */ 1629 void 1630 drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state, 1631 struct drm_framebuffer *fb) 1632 { 1633 struct drm_plane *plane = plane_state->plane; 1634 1635 if (fb) 1636 DRM_DEBUG_ATOMIC("Set [FB:%d] for [PLANE:%d:%s] state %p\n", 1637 fb->base.id, plane->base.id, plane->name, 1638 plane_state); 1639 else 1640 DRM_DEBUG_ATOMIC("Set [NOFB] for [PLANE:%d:%s] state %p\n", 1641 plane->base.id, plane->name, plane_state); 1642 1643 drm_framebuffer_assign(&plane_state->fb, fb); 1644 } 1645 EXPORT_SYMBOL(drm_atomic_set_fb_for_plane); 1646 1647 /** 1648 * drm_atomic_set_fence_for_plane - set fence for plane 1649 * @plane_state: atomic state object for the plane 1650 * @fence: dma_fence to use for the plane 1651 * 1652 * Helper to setup the plane_state fence in case it is not set yet. 1653 * By using this drivers doesn't need to worry if the user choose 1654 * implicit or explicit fencing. 1655 * 1656 * This function will not set the fence to the state if it was set 1657 * via explicit fencing interfaces on the atomic ioctl. In that case it will 1658 * drop the reference to the fence as we are not storing it anywhere. 1659 * Otherwise, if &drm_plane_state.fence is not set this function we just set it 1660 * with the received implicit fence. In both cases this function consumes a 1661 * reference for @fence. 1662 * 1663 * This way explicit fencing can be used to overrule implicit fencing, which is 1664 * important to make explicit fencing use-cases work: One example is using one 1665 * buffer for 2 screens with different refresh rates. Implicit fencing will 1666 * clamp rendering to the refresh rate of the slower screen, whereas explicit 1667 * fence allows 2 independent render and display loops on a single buffer. If a 1668 * driver allows obeys both implicit and explicit fences for plane updates, then 1669 * it will break all the benefits of explicit fencing. 1670 */ 1671 void 1672 drm_atomic_set_fence_for_plane(struct drm_plane_state *plane_state, 1673 struct dma_fence *fence) 1674 { 1675 if (plane_state->fence) { 1676 dma_fence_put(fence); 1677 return; 1678 } 1679 1680 plane_state->fence = fence; 1681 } 1682 EXPORT_SYMBOL(drm_atomic_set_fence_for_plane); 1683 1684 /** 1685 * drm_atomic_set_crtc_for_connector - set crtc for connector 1686 * @conn_state: atomic state object for the connector 1687 * @crtc: crtc to use for the connector 1688 * 1689 * Changing the assigned crtc for a connector requires us to grab the lock and 1690 * state for the new crtc, as needed. This function takes care of all these 1691 * details besides updating the pointer in the state object itself. 1692 * 1693 * Returns: 1694 * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK 1695 * then the w/w mutex code has detected a deadlock and the entire atomic 1696 * sequence must be restarted. All other errors are fatal. 1697 */ 1698 int 1699 drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state, 1700 struct drm_crtc *crtc) 1701 { 1702 struct drm_connector *connector = conn_state->connector; 1703 struct drm_crtc_state *crtc_state; 1704 1705 if (conn_state->crtc == crtc) 1706 return 0; 1707 1708 if (conn_state->crtc) { 1709 crtc_state = drm_atomic_get_new_crtc_state(conn_state->state, 1710 conn_state->crtc); 1711 1712 crtc_state->connector_mask &= 1713 ~drm_connector_mask(conn_state->connector); 1714 1715 drm_connector_put(conn_state->connector); 1716 conn_state->crtc = NULL; 1717 } 1718 1719 if (crtc) { 1720 crtc_state = drm_atomic_get_crtc_state(conn_state->state, crtc); 1721 if (IS_ERR(crtc_state)) 1722 return PTR_ERR(crtc_state); 1723 1724 crtc_state->connector_mask |= 1725 drm_connector_mask(conn_state->connector); 1726 1727 drm_connector_get(conn_state->connector); 1728 conn_state->crtc = crtc; 1729 1730 DRM_DEBUG_ATOMIC("Link [CONNECTOR:%d:%s] state %p to [CRTC:%d:%s]\n", 1731 connector->base.id, connector->name, 1732 conn_state, crtc->base.id, crtc->name); 1733 } else { 1734 DRM_DEBUG_ATOMIC("Link [CONNECTOR:%d:%s] state %p to [NOCRTC]\n", 1735 connector->base.id, connector->name, 1736 conn_state); 1737 } 1738 1739 return 0; 1740 } 1741 EXPORT_SYMBOL(drm_atomic_set_crtc_for_connector); 1742 1743 /* 1744 * drm_atomic_get_writeback_job - return or allocate a writeback job 1745 * @conn_state: Connector state to get the job for 1746 * 1747 * Writeback jobs have a different lifetime to the atomic state they are 1748 * associated with. This convenience function takes care of allocating a job 1749 * if there isn't yet one associated with the connector state, otherwise 1750 * it just returns the existing job. 1751 * 1752 * Returns: The writeback job for the given connector state 1753 */ 1754 static struct drm_writeback_job * 1755 drm_atomic_get_writeback_job(struct drm_connector_state *conn_state) 1756 { 1757 WARN_ON(conn_state->connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK); 1758 1759 if (!conn_state->writeback_job) 1760 conn_state->writeback_job = 1761 kzalloc(sizeof(*conn_state->writeback_job), GFP_KERNEL); 1762 1763 return conn_state->writeback_job; 1764 } 1765 1766 /** 1767 * drm_atomic_set_writeback_fb_for_connector - set writeback framebuffer 1768 * @conn_state: atomic state object for the connector 1769 * @fb: fb to use for the connector 1770 * 1771 * This is used to set the framebuffer for a writeback connector, which outputs 1772 * to a buffer instead of an actual physical connector. 1773 * Changing the assigned framebuffer requires us to grab a reference to the new 1774 * fb and drop the reference to the old fb, if there is one. This function 1775 * takes care of all these details besides updating the pointer in the 1776 * state object itself. 1777 * 1778 * Note: The only way conn_state can already have an fb set is if the commit 1779 * sets the property more than once. 1780 * 1781 * See also: drm_writeback_connector_init() 1782 * 1783 * Returns: 0 on success 1784 */ 1785 int drm_atomic_set_writeback_fb_for_connector( 1786 struct drm_connector_state *conn_state, 1787 struct drm_framebuffer *fb) 1788 { 1789 struct drm_writeback_job *job = 1790 drm_atomic_get_writeback_job(conn_state); 1791 if (!job) 1792 return -ENOMEM; 1793 1794 drm_framebuffer_assign(&job->fb, fb); 1795 1796 if (fb) 1797 DRM_DEBUG_ATOMIC("Set [FB:%d] for connector state %p\n", 1798 fb->base.id, conn_state); 1799 else 1800 DRM_DEBUG_ATOMIC("Set [NOFB] for connector state %p\n", 1801 conn_state); 1802 1803 return 0; 1804 } 1805 EXPORT_SYMBOL(drm_atomic_set_writeback_fb_for_connector); 1806 1807 /** 1808 * drm_atomic_add_affected_connectors - add connectors for crtc 1809 * @state: atomic state 1810 * @crtc: DRM crtc 1811 * 1812 * This function walks the current configuration and adds all connectors 1813 * currently using @crtc to the atomic configuration @state. Note that this 1814 * function must acquire the connection mutex. This can potentially cause 1815 * unneeded seralization if the update is just for the planes on one crtc. Hence 1816 * drivers and helpers should only call this when really needed (e.g. when a 1817 * full modeset needs to happen due to some change). 1818 * 1819 * Returns: 1820 * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK 1821 * then the w/w mutex code has detected a deadlock and the entire atomic 1822 * sequence must be restarted. All other errors are fatal. 1823 */ 1824 int 1825 drm_atomic_add_affected_connectors(struct drm_atomic_state *state, 1826 struct drm_crtc *crtc) 1827 { 1828 struct drm_mode_config *config = &state->dev->mode_config; 1829 struct drm_connector *connector; 1830 struct drm_connector_state *conn_state; 1831 struct drm_connector_list_iter conn_iter; 1832 struct drm_crtc_state *crtc_state; 1833 int ret; 1834 1835 crtc_state = drm_atomic_get_crtc_state(state, crtc); 1836 if (IS_ERR(crtc_state)) 1837 return PTR_ERR(crtc_state); 1838 1839 ret = drm_modeset_lock(&config->connection_mutex, state->acquire_ctx); 1840 if (ret) 1841 return ret; 1842 1843 DRM_DEBUG_ATOMIC("Adding all current connectors for [CRTC:%d:%s] to %p\n", 1844 crtc->base.id, crtc->name, state); 1845 1846 /* 1847 * Changed connectors are already in @state, so only need to look 1848 * at the connector_mask in crtc_state. 1849 */ 1850 drm_connector_list_iter_begin(state->dev, &conn_iter); 1851 drm_for_each_connector_iter(connector, &conn_iter) { 1852 if (!(crtc_state->connector_mask & drm_connector_mask(connector))) 1853 continue; 1854 1855 conn_state = drm_atomic_get_connector_state(state, connector); 1856 if (IS_ERR(conn_state)) { 1857 drm_connector_list_iter_end(&conn_iter); 1858 return PTR_ERR(conn_state); 1859 } 1860 } 1861 drm_connector_list_iter_end(&conn_iter); 1862 1863 return 0; 1864 } 1865 EXPORT_SYMBOL(drm_atomic_add_affected_connectors); 1866 1867 /** 1868 * drm_atomic_add_affected_planes - add planes for crtc 1869 * @state: atomic state 1870 * @crtc: DRM crtc 1871 * 1872 * This function walks the current configuration and adds all planes 1873 * currently used by @crtc to the atomic configuration @state. This is useful 1874 * when an atomic commit also needs to check all currently enabled plane on 1875 * @crtc, e.g. when changing the mode. It's also useful when re-enabling a CRTC 1876 * to avoid special code to force-enable all planes. 1877 * 1878 * Since acquiring a plane state will always also acquire the w/w mutex of the 1879 * current CRTC for that plane (if there is any) adding all the plane states for 1880 * a CRTC will not reduce parallism of atomic updates. 1881 * 1882 * Returns: 1883 * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK 1884 * then the w/w mutex code has detected a deadlock and the entire atomic 1885 * sequence must be restarted. All other errors are fatal. 1886 */ 1887 int 1888 drm_atomic_add_affected_planes(struct drm_atomic_state *state, 1889 struct drm_crtc *crtc) 1890 { 1891 struct drm_plane *plane; 1892 1893 WARN_ON(!drm_atomic_get_new_crtc_state(state, crtc)); 1894 1895 DRM_DEBUG_ATOMIC("Adding all current planes for [CRTC:%d:%s] to %p\n", 1896 crtc->base.id, crtc->name, state); 1897 1898 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) { 1899 struct drm_plane_state *plane_state = 1900 drm_atomic_get_plane_state(state, plane); 1901 1902 if (IS_ERR(plane_state)) 1903 return PTR_ERR(plane_state); 1904 } 1905 return 0; 1906 } 1907 EXPORT_SYMBOL(drm_atomic_add_affected_planes); 1908 1909 /** 1910 * drm_atomic_check_only - check whether a given config would work 1911 * @state: atomic configuration to check 1912 * 1913 * Note that this function can return -EDEADLK if the driver needed to acquire 1914 * more locks but encountered a deadlock. The caller must then do the usual w/w 1915 * backoff dance and restart. All other errors are fatal. 1916 * 1917 * Returns: 1918 * 0 on success, negative error code on failure. 1919 */ 1920 int drm_atomic_check_only(struct drm_atomic_state *state) 1921 { 1922 struct drm_device *dev = state->dev; 1923 struct drm_mode_config *config = &dev->mode_config; 1924 struct drm_plane *plane; 1925 struct drm_plane_state *plane_state; 1926 struct drm_crtc *crtc; 1927 struct drm_crtc_state *crtc_state; 1928 struct drm_connector *conn; 1929 struct drm_connector_state *conn_state; 1930 int i, ret = 0; 1931 1932 DRM_DEBUG_ATOMIC("checking %p\n", state); 1933 1934 for_each_new_plane_in_state(state, plane, plane_state, i) { 1935 ret = drm_atomic_plane_check(plane, plane_state); 1936 if (ret) { 1937 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] atomic core check failed\n", 1938 plane->base.id, plane->name); 1939 return ret; 1940 } 1941 } 1942 1943 for_each_new_crtc_in_state(state, crtc, crtc_state, i) { 1944 ret = drm_atomic_crtc_check(crtc, crtc_state); 1945 if (ret) { 1946 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic core check failed\n", 1947 crtc->base.id, crtc->name); 1948 return ret; 1949 } 1950 } 1951 1952 for_each_new_connector_in_state(state, conn, conn_state, i) { 1953 ret = drm_atomic_connector_check(conn, conn_state); 1954 if (ret) { 1955 DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] atomic core check failed\n", 1956 conn->base.id, conn->name); 1957 return ret; 1958 } 1959 } 1960 1961 if (config->funcs->atomic_check) { 1962 ret = config->funcs->atomic_check(state->dev, state); 1963 1964 if (ret) { 1965 DRM_DEBUG_ATOMIC("atomic driver check for %p failed: %d\n", 1966 state, ret); 1967 return ret; 1968 } 1969 } 1970 1971 if (!state->allow_modeset) { 1972 for_each_new_crtc_in_state(state, crtc, crtc_state, i) { 1973 if (drm_atomic_crtc_needs_modeset(crtc_state)) { 1974 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] requires full modeset\n", 1975 crtc->base.id, crtc->name); 1976 return -EINVAL; 1977 } 1978 } 1979 } 1980 1981 return 0; 1982 } 1983 EXPORT_SYMBOL(drm_atomic_check_only); 1984 1985 /** 1986 * drm_atomic_commit - commit configuration atomically 1987 * @state: atomic configuration to check 1988 * 1989 * Note that this function can return -EDEADLK if the driver needed to acquire 1990 * more locks but encountered a deadlock. The caller must then do the usual w/w 1991 * backoff dance and restart. All other errors are fatal. 1992 * 1993 * This function will take its own reference on @state. 1994 * Callers should always release their reference with drm_atomic_state_put(). 1995 * 1996 * Returns: 1997 * 0 on success, negative error code on failure. 1998 */ 1999 int drm_atomic_commit(struct drm_atomic_state *state) 2000 { 2001 struct drm_mode_config *config = &state->dev->mode_config; 2002 int ret; 2003 2004 ret = drm_atomic_check_only(state); 2005 if (ret) 2006 return ret; 2007 2008 DRM_DEBUG_ATOMIC("committing %p\n", state); 2009 2010 return config->funcs->atomic_commit(state->dev, state, false); 2011 } 2012 EXPORT_SYMBOL(drm_atomic_commit); 2013 2014 /** 2015 * drm_atomic_nonblocking_commit - atomic nonblocking commit 2016 * @state: atomic configuration to check 2017 * 2018 * Note that this function can return -EDEADLK if the driver needed to acquire 2019 * more locks but encountered a deadlock. The caller must then do the usual w/w 2020 * backoff dance and restart. All other errors are fatal. 2021 * 2022 * This function will take its own reference on @state. 2023 * Callers should always release their reference with drm_atomic_state_put(). 2024 * 2025 * Returns: 2026 * 0 on success, negative error code on failure. 2027 */ 2028 int drm_atomic_nonblocking_commit(struct drm_atomic_state *state) 2029 { 2030 struct drm_mode_config *config = &state->dev->mode_config; 2031 int ret; 2032 2033 ret = drm_atomic_check_only(state); 2034 if (ret) 2035 return ret; 2036 2037 DRM_DEBUG_ATOMIC("committing %p nonblocking\n", state); 2038 2039 return config->funcs->atomic_commit(state->dev, state, true); 2040 } 2041 EXPORT_SYMBOL(drm_atomic_nonblocking_commit); 2042 2043 static void drm_atomic_print_state(const struct drm_atomic_state *state) 2044 { 2045 struct drm_printer p = drm_info_printer(state->dev->dev); 2046 struct drm_plane *plane; 2047 struct drm_plane_state *plane_state; 2048 struct drm_crtc *crtc; 2049 struct drm_crtc_state *crtc_state; 2050 struct drm_connector *connector; 2051 struct drm_connector_state *connector_state; 2052 int i; 2053 2054 DRM_DEBUG_ATOMIC("checking %p\n", state); 2055 2056 for_each_new_plane_in_state(state, plane, plane_state, i) 2057 drm_atomic_plane_print_state(&p, plane_state); 2058 2059 for_each_new_crtc_in_state(state, crtc, crtc_state, i) 2060 drm_atomic_crtc_print_state(&p, crtc_state); 2061 2062 for_each_new_connector_in_state(state, connector, connector_state, i) 2063 drm_atomic_connector_print_state(&p, connector_state); 2064 } 2065 2066 static void __drm_state_dump(struct drm_device *dev, struct drm_printer *p, 2067 bool take_locks) 2068 { 2069 struct drm_mode_config *config = &dev->mode_config; 2070 struct drm_plane *plane; 2071 struct drm_crtc *crtc; 2072 struct drm_connector *connector; 2073 struct drm_connector_list_iter conn_iter; 2074 2075 if (!drm_drv_uses_atomic_modeset(dev)) 2076 return; 2077 2078 list_for_each_entry(plane, &config->plane_list, head) { 2079 if (take_locks) 2080 drm_modeset_lock(&plane->mutex, NULL); 2081 drm_atomic_plane_print_state(p, plane->state); 2082 if (take_locks) 2083 drm_modeset_unlock(&plane->mutex); 2084 } 2085 2086 list_for_each_entry(crtc, &config->crtc_list, head) { 2087 if (take_locks) 2088 drm_modeset_lock(&crtc->mutex, NULL); 2089 drm_atomic_crtc_print_state(p, crtc->state); 2090 if (take_locks) 2091 drm_modeset_unlock(&crtc->mutex); 2092 } 2093 2094 drm_connector_list_iter_begin(dev, &conn_iter); 2095 if (take_locks) 2096 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); 2097 drm_for_each_connector_iter(connector, &conn_iter) 2098 drm_atomic_connector_print_state(p, connector->state); 2099 if (take_locks) 2100 drm_modeset_unlock(&dev->mode_config.connection_mutex); 2101 drm_connector_list_iter_end(&conn_iter); 2102 } 2103 2104 /** 2105 * drm_state_dump - dump entire device atomic state 2106 * @dev: the drm device 2107 * @p: where to print the state to 2108 * 2109 * Just for debugging. Drivers might want an option to dump state 2110 * to dmesg in case of error irq's. (Hint, you probably want to 2111 * ratelimit this!) 2112 * 2113 * The caller must drm_modeset_lock_all(), or if this is called 2114 * from error irq handler, it should not be enabled by default. 2115 * (Ie. if you are debugging errors you might not care that this 2116 * is racey. But calling this without all modeset locks held is 2117 * not inherently safe.) 2118 */ 2119 void drm_state_dump(struct drm_device *dev, struct drm_printer *p) 2120 { 2121 __drm_state_dump(dev, p, false); 2122 } 2123 EXPORT_SYMBOL(drm_state_dump); 2124 2125 #ifdef CONFIG_DEBUG_FS 2126 static int drm_state_info(struct seq_file *m, void *data) 2127 { 2128 struct drm_info_node *node = (struct drm_info_node *) m->private; 2129 struct drm_device *dev = node->minor->dev; 2130 struct drm_printer p = drm_seq_file_printer(m); 2131 2132 __drm_state_dump(dev, &p, true); 2133 2134 return 0; 2135 } 2136 2137 /* any use in debugfs files to dump individual planes/crtc/etc? */ 2138 static const struct drm_info_list drm_atomic_debugfs_list[] = { 2139 {"state", drm_state_info, 0}, 2140 }; 2141 2142 int drm_atomic_debugfs_init(struct drm_minor *minor) 2143 { 2144 return drm_debugfs_create_files(drm_atomic_debugfs_list, 2145 ARRAY_SIZE(drm_atomic_debugfs_list), 2146 minor->debugfs_root, minor); 2147 } 2148 #endif 2149 2150 /* 2151 * The big monster ioctl 2152 */ 2153 2154 static struct drm_pending_vblank_event *create_vblank_event( 2155 struct drm_crtc *crtc, uint64_t user_data) 2156 { 2157 struct drm_pending_vblank_event *e = NULL; 2158 2159 e = kzalloc(sizeof *e, GFP_KERNEL); 2160 if (!e) 2161 return NULL; 2162 2163 e->event.base.type = DRM_EVENT_FLIP_COMPLETE; 2164 e->event.base.length = sizeof(e->event); 2165 e->event.vbl.crtc_id = crtc->base.id; 2166 e->event.vbl.user_data = user_data; 2167 2168 return e; 2169 } 2170 2171 int drm_atomic_connector_commit_dpms(struct drm_atomic_state *state, 2172 struct drm_connector *connector, 2173 int mode) 2174 { 2175 struct drm_connector *tmp_connector; 2176 struct drm_connector_state *new_conn_state; 2177 struct drm_crtc *crtc; 2178 struct drm_crtc_state *crtc_state; 2179 int i, ret, old_mode = connector->dpms; 2180 bool active = false; 2181 2182 ret = drm_modeset_lock(&state->dev->mode_config.connection_mutex, 2183 state->acquire_ctx); 2184 if (ret) 2185 return ret; 2186 2187 if (mode != DRM_MODE_DPMS_ON) 2188 mode = DRM_MODE_DPMS_OFF; 2189 connector->dpms = mode; 2190 2191 crtc = connector->state->crtc; 2192 if (!crtc) 2193 goto out; 2194 ret = drm_atomic_add_affected_connectors(state, crtc); 2195 if (ret) 2196 goto out; 2197 2198 crtc_state = drm_atomic_get_crtc_state(state, crtc); 2199 if (IS_ERR(crtc_state)) { 2200 ret = PTR_ERR(crtc_state); 2201 goto out; 2202 } 2203 2204 for_each_new_connector_in_state(state, tmp_connector, new_conn_state, i) { 2205 if (new_conn_state->crtc != crtc) 2206 continue; 2207 if (tmp_connector->dpms == DRM_MODE_DPMS_ON) { 2208 active = true; 2209 break; 2210 } 2211 } 2212 2213 crtc_state->active = active; 2214 ret = drm_atomic_commit(state); 2215 out: 2216 if (ret != 0) 2217 connector->dpms = old_mode; 2218 return ret; 2219 } 2220 2221 int drm_atomic_set_property(struct drm_atomic_state *state, 2222 struct drm_mode_object *obj, 2223 struct drm_property *prop, 2224 uint64_t prop_value) 2225 { 2226 struct drm_mode_object *ref; 2227 int ret; 2228 2229 if (!drm_property_change_valid_get(prop, prop_value, &ref)) 2230 return -EINVAL; 2231 2232 switch (obj->type) { 2233 case DRM_MODE_OBJECT_CONNECTOR: { 2234 struct drm_connector *connector = obj_to_connector(obj); 2235 struct drm_connector_state *connector_state; 2236 2237 connector_state = drm_atomic_get_connector_state(state, connector); 2238 if (IS_ERR(connector_state)) { 2239 ret = PTR_ERR(connector_state); 2240 break; 2241 } 2242 2243 ret = drm_atomic_connector_set_property(connector, 2244 connector_state, prop, prop_value); 2245 break; 2246 } 2247 case DRM_MODE_OBJECT_CRTC: { 2248 struct drm_crtc *crtc = obj_to_crtc(obj); 2249 struct drm_crtc_state *crtc_state; 2250 2251 crtc_state = drm_atomic_get_crtc_state(state, crtc); 2252 if (IS_ERR(crtc_state)) { 2253 ret = PTR_ERR(crtc_state); 2254 break; 2255 } 2256 2257 ret = drm_atomic_crtc_set_property(crtc, 2258 crtc_state, prop, prop_value); 2259 break; 2260 } 2261 case DRM_MODE_OBJECT_PLANE: { 2262 struct drm_plane *plane = obj_to_plane(obj); 2263 struct drm_plane_state *plane_state; 2264 2265 plane_state = drm_atomic_get_plane_state(state, plane); 2266 if (IS_ERR(plane_state)) { 2267 ret = PTR_ERR(plane_state); 2268 break; 2269 } 2270 2271 ret = drm_atomic_plane_set_property(plane, 2272 plane_state, prop, prop_value); 2273 break; 2274 } 2275 default: 2276 ret = -EINVAL; 2277 break; 2278 } 2279 2280 drm_property_change_valid_put(prop, ref); 2281 return ret; 2282 } 2283 2284 /** 2285 * DOC: explicit fencing properties 2286 * 2287 * Explicit fencing allows userspace to control the buffer synchronization 2288 * between devices. A Fence or a group of fences are transfered to/from 2289 * userspace using Sync File fds and there are two DRM properties for that. 2290 * IN_FENCE_FD on each DRM Plane to send fences to the kernel and 2291 * OUT_FENCE_PTR on each DRM CRTC to receive fences from the kernel. 2292 * 2293 * As a contrast, with implicit fencing the kernel keeps track of any 2294 * ongoing rendering, and automatically ensures that the atomic update waits 2295 * for any pending rendering to complete. For shared buffers represented with 2296 * a &struct dma_buf this is tracked in &struct reservation_object. 2297 * Implicit syncing is how Linux traditionally worked (e.g. DRI2/3 on X.org), 2298 * whereas explicit fencing is what Android wants. 2299 * 2300 * "IN_FENCE_FD”: 2301 * Use this property to pass a fence that DRM should wait on before 2302 * proceeding with the Atomic Commit request and show the framebuffer for 2303 * the plane on the screen. The fence can be either a normal fence or a 2304 * merged one, the sync_file framework will handle both cases and use a 2305 * fence_array if a merged fence is received. Passing -1 here means no 2306 * fences to wait on. 2307 * 2308 * If the Atomic Commit request has the DRM_MODE_ATOMIC_TEST_ONLY flag 2309 * it will only check if the Sync File is a valid one. 2310 * 2311 * On the driver side the fence is stored on the @fence parameter of 2312 * &struct drm_plane_state. Drivers which also support implicit fencing 2313 * should set the implicit fence using drm_atomic_set_fence_for_plane(), 2314 * to make sure there's consistent behaviour between drivers in precedence 2315 * of implicit vs. explicit fencing. 2316 * 2317 * "OUT_FENCE_PTR”: 2318 * Use this property to pass a file descriptor pointer to DRM. Once the 2319 * Atomic Commit request call returns OUT_FENCE_PTR will be filled with 2320 * the file descriptor number of a Sync File. This Sync File contains the 2321 * CRTC fence that will be signaled when all framebuffers present on the 2322 * Atomic Commit * request for that given CRTC are scanned out on the 2323 * screen. 2324 * 2325 * The Atomic Commit request fails if a invalid pointer is passed. If the 2326 * Atomic Commit request fails for any other reason the out fence fd 2327 * returned will be -1. On a Atomic Commit with the 2328 * DRM_MODE_ATOMIC_TEST_ONLY flag the out fence will also be set to -1. 2329 * 2330 * Note that out-fences don't have a special interface to drivers and are 2331 * internally represented by a &struct drm_pending_vblank_event in struct 2332 * &drm_crtc_state, which is also used by the nonblocking atomic commit 2333 * helpers and for the DRM event handling for existing userspace. 2334 */ 2335 2336 struct drm_out_fence_state { 2337 s32 __user *out_fence_ptr; 2338 struct sync_file *sync_file; 2339 int fd; 2340 }; 2341 2342 static int setup_out_fence(struct drm_out_fence_state *fence_state, 2343 struct dma_fence *fence) 2344 { 2345 fence_state->fd = get_unused_fd_flags(O_CLOEXEC); 2346 if (fence_state->fd < 0) 2347 return fence_state->fd; 2348 2349 if (put_user(fence_state->fd, fence_state->out_fence_ptr)) 2350 return -EFAULT; 2351 2352 fence_state->sync_file = sync_file_create(fence); 2353 if (!fence_state->sync_file) 2354 return -ENOMEM; 2355 2356 return 0; 2357 } 2358 2359 static int prepare_signaling(struct drm_device *dev, 2360 struct drm_atomic_state *state, 2361 struct drm_mode_atomic *arg, 2362 struct drm_file *file_priv, 2363 struct drm_out_fence_state **fence_state, 2364 unsigned int *num_fences) 2365 { 2366 struct drm_crtc *crtc; 2367 struct drm_crtc_state *crtc_state; 2368 struct drm_connector *conn; 2369 struct drm_connector_state *conn_state; 2370 int i, c = 0, ret; 2371 2372 if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) 2373 return 0; 2374 2375 for_each_new_crtc_in_state(state, crtc, crtc_state, i) { 2376 s32 __user *fence_ptr; 2377 2378 fence_ptr = get_out_fence_for_crtc(crtc_state->state, crtc); 2379 2380 if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT || fence_ptr) { 2381 struct drm_pending_vblank_event *e; 2382 2383 e = create_vblank_event(crtc, arg->user_data); 2384 if (!e) 2385 return -ENOMEM; 2386 2387 crtc_state->event = e; 2388 } 2389 2390 if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT) { 2391 struct drm_pending_vblank_event *e = crtc_state->event; 2392 2393 if (!file_priv) 2394 continue; 2395 2396 ret = drm_event_reserve_init(dev, file_priv, &e->base, 2397 &e->event.base); 2398 if (ret) { 2399 kfree(e); 2400 crtc_state->event = NULL; 2401 return ret; 2402 } 2403 } 2404 2405 if (fence_ptr) { 2406 struct dma_fence *fence; 2407 struct drm_out_fence_state *f; 2408 2409 f = krealloc(*fence_state, sizeof(**fence_state) * 2410 (*num_fences + 1), GFP_KERNEL); 2411 if (!f) 2412 return -ENOMEM; 2413 2414 memset(&f[*num_fences], 0, sizeof(*f)); 2415 2416 f[*num_fences].out_fence_ptr = fence_ptr; 2417 *fence_state = f; 2418 2419 fence = drm_crtc_create_fence(crtc); 2420 if (!fence) 2421 return -ENOMEM; 2422 2423 ret = setup_out_fence(&f[(*num_fences)++], fence); 2424 if (ret) { 2425 dma_fence_put(fence); 2426 return ret; 2427 } 2428 2429 crtc_state->event->base.fence = fence; 2430 } 2431 2432 c++; 2433 } 2434 2435 for_each_new_connector_in_state(state, conn, conn_state, i) { 2436 struct drm_writeback_connector *wb_conn; 2437 struct drm_writeback_job *job; 2438 struct drm_out_fence_state *f; 2439 struct dma_fence *fence; 2440 s32 __user *fence_ptr; 2441 2442 fence_ptr = get_out_fence_for_connector(state, conn); 2443 if (!fence_ptr) 2444 continue; 2445 2446 job = drm_atomic_get_writeback_job(conn_state); 2447 if (!job) 2448 return -ENOMEM; 2449 2450 f = krealloc(*fence_state, sizeof(**fence_state) * 2451 (*num_fences + 1), GFP_KERNEL); 2452 if (!f) 2453 return -ENOMEM; 2454 2455 memset(&f[*num_fences], 0, sizeof(*f)); 2456 2457 f[*num_fences].out_fence_ptr = fence_ptr; 2458 *fence_state = f; 2459 2460 wb_conn = drm_connector_to_writeback(conn); 2461 fence = drm_writeback_get_out_fence(wb_conn); 2462 if (!fence) 2463 return -ENOMEM; 2464 2465 ret = setup_out_fence(&f[(*num_fences)++], fence); 2466 if (ret) { 2467 dma_fence_put(fence); 2468 return ret; 2469 } 2470 2471 job->out_fence = fence; 2472 } 2473 2474 /* 2475 * Having this flag means user mode pends on event which will never 2476 * reach due to lack of at least one CRTC for signaling 2477 */ 2478 if (c == 0 && (arg->flags & DRM_MODE_PAGE_FLIP_EVENT)) 2479 return -EINVAL; 2480 2481 return 0; 2482 } 2483 2484 static void complete_signaling(struct drm_device *dev, 2485 struct drm_atomic_state *state, 2486 struct drm_out_fence_state *fence_state, 2487 unsigned int num_fences, 2488 bool install_fds) 2489 { 2490 struct drm_crtc *crtc; 2491 struct drm_crtc_state *crtc_state; 2492 int i; 2493 2494 if (install_fds) { 2495 for (i = 0; i < num_fences; i++) 2496 fd_install(fence_state[i].fd, 2497 fence_state[i].sync_file->file); 2498 2499 kfree(fence_state); 2500 return; 2501 } 2502 2503 for_each_new_crtc_in_state(state, crtc, crtc_state, i) { 2504 struct drm_pending_vblank_event *event = crtc_state->event; 2505 /* 2506 * Free the allocated event. drm_atomic_helper_setup_commit 2507 * can allocate an event too, so only free it if it's ours 2508 * to prevent a double free in drm_atomic_state_clear. 2509 */ 2510 if (event && (event->base.fence || event->base.file_priv)) { 2511 drm_event_cancel_free(dev, &event->base); 2512 crtc_state->event = NULL; 2513 } 2514 } 2515 2516 if (!fence_state) 2517 return; 2518 2519 for (i = 0; i < num_fences; i++) { 2520 if (fence_state[i].sync_file) 2521 fput(fence_state[i].sync_file->file); 2522 if (fence_state[i].fd >= 0) 2523 put_unused_fd(fence_state[i].fd); 2524 2525 /* If this fails log error to the user */ 2526 if (fence_state[i].out_fence_ptr && 2527 put_user(-1, fence_state[i].out_fence_ptr)) 2528 DRM_DEBUG_ATOMIC("Couldn't clear out_fence_ptr\n"); 2529 } 2530 2531 kfree(fence_state); 2532 } 2533 2534 int drm_mode_atomic_ioctl(struct drm_device *dev, 2535 void *data, struct drm_file *file_priv) 2536 { 2537 struct drm_mode_atomic *arg = data; 2538 uint32_t __user *objs_ptr = (uint32_t __user *)(unsigned long)(arg->objs_ptr); 2539 uint32_t __user *count_props_ptr = (uint32_t __user *)(unsigned long)(arg->count_props_ptr); 2540 uint32_t __user *props_ptr = (uint32_t __user *)(unsigned long)(arg->props_ptr); 2541 uint64_t __user *prop_values_ptr = (uint64_t __user *)(unsigned long)(arg->prop_values_ptr); 2542 unsigned int copied_objs, copied_props; 2543 struct drm_atomic_state *state; 2544 struct drm_modeset_acquire_ctx ctx; 2545 struct drm_out_fence_state *fence_state; 2546 int ret = 0; 2547 unsigned int i, j, num_fences; 2548 2549 /* disallow for drivers not supporting atomic: */ 2550 if (!drm_core_check_feature(dev, DRIVER_ATOMIC)) 2551 return -EINVAL; 2552 2553 /* disallow for userspace that has not enabled atomic cap (even 2554 * though this may be a bit overkill, since legacy userspace 2555 * wouldn't know how to call this ioctl) 2556 */ 2557 if (!file_priv->atomic) 2558 return -EINVAL; 2559 2560 if (arg->flags & ~DRM_MODE_ATOMIC_FLAGS) 2561 return -EINVAL; 2562 2563 if (arg->reserved) 2564 return -EINVAL; 2565 2566 if ((arg->flags & DRM_MODE_PAGE_FLIP_ASYNC) && 2567 !dev->mode_config.async_page_flip) 2568 return -EINVAL; 2569 2570 /* can't test and expect an event at the same time. */ 2571 if ((arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) && 2572 (arg->flags & DRM_MODE_PAGE_FLIP_EVENT)) 2573 return -EINVAL; 2574 2575 drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE); 2576 2577 state = drm_atomic_state_alloc(dev); 2578 if (!state) 2579 return -ENOMEM; 2580 2581 state->acquire_ctx = &ctx; 2582 state->allow_modeset = !!(arg->flags & DRM_MODE_ATOMIC_ALLOW_MODESET); 2583 2584 retry: 2585 copied_objs = 0; 2586 copied_props = 0; 2587 fence_state = NULL; 2588 num_fences = 0; 2589 2590 for (i = 0; i < arg->count_objs; i++) { 2591 uint32_t obj_id, count_props; 2592 struct drm_mode_object *obj; 2593 2594 if (get_user(obj_id, objs_ptr + copied_objs)) { 2595 ret = -EFAULT; 2596 goto out; 2597 } 2598 2599 obj = drm_mode_object_find(dev, file_priv, obj_id, DRM_MODE_OBJECT_ANY); 2600 if (!obj) { 2601 ret = -ENOENT; 2602 goto out; 2603 } 2604 2605 if (!obj->properties) { 2606 drm_mode_object_put(obj); 2607 ret = -ENOENT; 2608 goto out; 2609 } 2610 2611 if (get_user(count_props, count_props_ptr + copied_objs)) { 2612 drm_mode_object_put(obj); 2613 ret = -EFAULT; 2614 goto out; 2615 } 2616 2617 copied_objs++; 2618 2619 for (j = 0; j < count_props; j++) { 2620 uint32_t prop_id; 2621 uint64_t prop_value; 2622 struct drm_property *prop; 2623 2624 if (get_user(prop_id, props_ptr + copied_props)) { 2625 drm_mode_object_put(obj); 2626 ret = -EFAULT; 2627 goto out; 2628 } 2629 2630 prop = drm_mode_obj_find_prop_id(obj, prop_id); 2631 if (!prop) { 2632 drm_mode_object_put(obj); 2633 ret = -ENOENT; 2634 goto out; 2635 } 2636 2637 if (copy_from_user(&prop_value, 2638 prop_values_ptr + copied_props, 2639 sizeof(prop_value))) { 2640 drm_mode_object_put(obj); 2641 ret = -EFAULT; 2642 goto out; 2643 } 2644 2645 ret = drm_atomic_set_property(state, obj, prop, 2646 prop_value); 2647 if (ret) { 2648 drm_mode_object_put(obj); 2649 goto out; 2650 } 2651 2652 copied_props++; 2653 } 2654 2655 drm_mode_object_put(obj); 2656 } 2657 2658 ret = prepare_signaling(dev, state, arg, file_priv, &fence_state, 2659 &num_fences); 2660 if (ret) 2661 goto out; 2662 2663 if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) { 2664 ret = drm_atomic_check_only(state); 2665 } else if (arg->flags & DRM_MODE_ATOMIC_NONBLOCK) { 2666 ret = drm_atomic_nonblocking_commit(state); 2667 } else { 2668 if (unlikely(drm_debug & DRM_UT_STATE)) 2669 drm_atomic_print_state(state); 2670 2671 ret = drm_atomic_commit(state); 2672 } 2673 2674 out: 2675 complete_signaling(dev, state, fence_state, num_fences, !ret); 2676 2677 if (ret == -EDEADLK) { 2678 drm_atomic_state_clear(state); 2679 ret = drm_modeset_backoff(&ctx); 2680 if (!ret) 2681 goto retry; 2682 } 2683 2684 drm_atomic_state_put(state); 2685 2686 drm_modeset_drop_locks(&ctx); 2687 drm_modeset_acquire_fini(&ctx); 2688 2689 return ret; 2690 } 2691