1 /* 2 * Copyright (C) 2014 Red Hat 3 * Copyright (C) 2014 Intel Corp. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be included in 13 * all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 * OTHER DEALINGS IN THE SOFTWARE. 22 * 23 * Authors: 24 * Rob Clark <robdclark@gmail.com> 25 * Daniel Vetter <daniel.vetter@ffwll.ch> 26 */ 27 28 29 #include <drm/drmP.h> 30 #include <drm/drm_atomic.h> 31 #include <drm/drm_atomic_uapi.h> 32 #include <drm/drm_mode.h> 33 #include <drm/drm_print.h> 34 #include <drm/drm_writeback.h> 35 #include <linux/sync_file.h> 36 37 #include "drm_crtc_internal.h" 38 #include "drm_internal.h" 39 40 void __drm_crtc_commit_free(struct kref *kref) 41 { 42 struct drm_crtc_commit *commit = 43 container_of(kref, struct drm_crtc_commit, ref); 44 45 kfree(commit); 46 } 47 EXPORT_SYMBOL(__drm_crtc_commit_free); 48 49 /** 50 * drm_atomic_state_default_release - 51 * release memory initialized by drm_atomic_state_init 52 * @state: atomic state 53 * 54 * Free all the memory allocated by drm_atomic_state_init. 55 * This should only be used by drivers which are still subclassing 56 * &drm_atomic_state and haven't switched to &drm_private_state yet. 57 */ 58 void drm_atomic_state_default_release(struct drm_atomic_state *state) 59 { 60 kfree(state->connectors); 61 kfree(state->crtcs); 62 kfree(state->planes); 63 kfree(state->private_objs); 64 } 65 EXPORT_SYMBOL(drm_atomic_state_default_release); 66 67 /** 68 * drm_atomic_state_init - init new atomic state 69 * @dev: DRM device 70 * @state: atomic state 71 * 72 * Default implementation for filling in a new atomic state. 73 * This should only be used by drivers which are still subclassing 74 * &drm_atomic_state and haven't switched to &drm_private_state yet. 75 */ 76 int 77 drm_atomic_state_init(struct drm_device *dev, struct drm_atomic_state *state) 78 { 79 kref_init(&state->ref); 80 81 /* TODO legacy paths should maybe do a better job about 82 * setting this appropriately? 83 */ 84 state->allow_modeset = true; 85 86 state->crtcs = kcalloc(dev->mode_config.num_crtc, 87 sizeof(*state->crtcs), GFP_KERNEL); 88 if (!state->crtcs) 89 goto fail; 90 state->planes = kcalloc(dev->mode_config.num_total_plane, 91 sizeof(*state->planes), GFP_KERNEL); 92 if (!state->planes) 93 goto fail; 94 95 state->dev = dev; 96 97 DRM_DEBUG_ATOMIC("Allocated atomic state %p\n", state); 98 99 return 0; 100 fail: 101 drm_atomic_state_default_release(state); 102 return -ENOMEM; 103 } 104 EXPORT_SYMBOL(drm_atomic_state_init); 105 106 /** 107 * drm_atomic_state_alloc - allocate atomic state 108 * @dev: DRM device 109 * 110 * This allocates an empty atomic state to track updates. 111 */ 112 struct drm_atomic_state * 113 drm_atomic_state_alloc(struct drm_device *dev) 114 { 115 struct drm_mode_config *config = &dev->mode_config; 116 117 if (!config->funcs->atomic_state_alloc) { 118 struct drm_atomic_state *state; 119 120 state = kzalloc(sizeof(*state), GFP_KERNEL); 121 if (!state) 122 return NULL; 123 if (drm_atomic_state_init(dev, state) < 0) { 124 kfree(state); 125 return NULL; 126 } 127 return state; 128 } 129 130 return config->funcs->atomic_state_alloc(dev); 131 } 132 EXPORT_SYMBOL(drm_atomic_state_alloc); 133 134 /** 135 * drm_atomic_state_default_clear - clear base atomic state 136 * @state: atomic state 137 * 138 * Default implementation for clearing atomic state. 139 * This should only be used by drivers which are still subclassing 140 * &drm_atomic_state and haven't switched to &drm_private_state yet. 141 */ 142 void drm_atomic_state_default_clear(struct drm_atomic_state *state) 143 { 144 struct drm_device *dev = state->dev; 145 struct drm_mode_config *config = &dev->mode_config; 146 int i; 147 148 DRM_DEBUG_ATOMIC("Clearing atomic state %p\n", state); 149 150 for (i = 0; i < state->num_connector; i++) { 151 struct drm_connector *connector = state->connectors[i].ptr; 152 153 if (!connector) 154 continue; 155 156 connector->funcs->atomic_destroy_state(connector, 157 state->connectors[i].state); 158 state->connectors[i].ptr = NULL; 159 state->connectors[i].state = NULL; 160 state->connectors[i].old_state = NULL; 161 state->connectors[i].new_state = NULL; 162 drm_connector_put(connector); 163 } 164 165 for (i = 0; i < config->num_crtc; i++) { 166 struct drm_crtc *crtc = state->crtcs[i].ptr; 167 168 if (!crtc) 169 continue; 170 171 crtc->funcs->atomic_destroy_state(crtc, 172 state->crtcs[i].state); 173 174 state->crtcs[i].ptr = NULL; 175 state->crtcs[i].state = NULL; 176 state->crtcs[i].old_state = NULL; 177 state->crtcs[i].new_state = NULL; 178 179 if (state->crtcs[i].commit) { 180 drm_crtc_commit_put(state->crtcs[i].commit); 181 state->crtcs[i].commit = NULL; 182 } 183 } 184 185 for (i = 0; i < config->num_total_plane; i++) { 186 struct drm_plane *plane = state->planes[i].ptr; 187 188 if (!plane) 189 continue; 190 191 plane->funcs->atomic_destroy_state(plane, 192 state->planes[i].state); 193 state->planes[i].ptr = NULL; 194 state->planes[i].state = NULL; 195 state->planes[i].old_state = NULL; 196 state->planes[i].new_state = NULL; 197 } 198 199 for (i = 0; i < state->num_private_objs; i++) { 200 struct drm_private_obj *obj = state->private_objs[i].ptr; 201 202 obj->funcs->atomic_destroy_state(obj, 203 state->private_objs[i].state); 204 state->private_objs[i].ptr = NULL; 205 state->private_objs[i].state = NULL; 206 state->private_objs[i].old_state = NULL; 207 state->private_objs[i].new_state = NULL; 208 } 209 state->num_private_objs = 0; 210 211 if (state->fake_commit) { 212 drm_crtc_commit_put(state->fake_commit); 213 state->fake_commit = NULL; 214 } 215 } 216 EXPORT_SYMBOL(drm_atomic_state_default_clear); 217 218 /** 219 * drm_atomic_state_clear - clear state object 220 * @state: atomic state 221 * 222 * When the w/w mutex algorithm detects a deadlock we need to back off and drop 223 * all locks. So someone else could sneak in and change the current modeset 224 * configuration. Which means that all the state assembled in @state is no 225 * longer an atomic update to the current state, but to some arbitrary earlier 226 * state. Which could break assumptions the driver's 227 * &drm_mode_config_funcs.atomic_check likely relies on. 228 * 229 * Hence we must clear all cached state and completely start over, using this 230 * function. 231 */ 232 void drm_atomic_state_clear(struct drm_atomic_state *state) 233 { 234 struct drm_device *dev = state->dev; 235 struct drm_mode_config *config = &dev->mode_config; 236 237 if (config->funcs->atomic_state_clear) 238 config->funcs->atomic_state_clear(state); 239 else 240 drm_atomic_state_default_clear(state); 241 } 242 EXPORT_SYMBOL(drm_atomic_state_clear); 243 244 /** 245 * __drm_atomic_state_free - free all memory for an atomic state 246 * @ref: This atomic state to deallocate 247 * 248 * This frees all memory associated with an atomic state, including all the 249 * per-object state for planes, crtcs and connectors. 250 */ 251 void __drm_atomic_state_free(struct kref *ref) 252 { 253 struct drm_atomic_state *state = container_of(ref, typeof(*state), ref); 254 struct drm_mode_config *config = &state->dev->mode_config; 255 256 drm_atomic_state_clear(state); 257 258 DRM_DEBUG_ATOMIC("Freeing atomic state %p\n", state); 259 260 if (config->funcs->atomic_state_free) { 261 config->funcs->atomic_state_free(state); 262 } else { 263 drm_atomic_state_default_release(state); 264 kfree(state); 265 } 266 } 267 EXPORT_SYMBOL(__drm_atomic_state_free); 268 269 /** 270 * drm_atomic_get_crtc_state - get crtc state 271 * @state: global atomic state object 272 * @crtc: crtc to get state object for 273 * 274 * This function returns the crtc state for the given crtc, allocating it if 275 * needed. It will also grab the relevant crtc lock to make sure that the state 276 * is consistent. 277 * 278 * Returns: 279 * 280 * Either the allocated state or the error code encoded into the pointer. When 281 * the error is EDEADLK then the w/w mutex code has detected a deadlock and the 282 * entire atomic sequence must be restarted. All other errors are fatal. 283 */ 284 struct drm_crtc_state * 285 drm_atomic_get_crtc_state(struct drm_atomic_state *state, 286 struct drm_crtc *crtc) 287 { 288 int ret, index = drm_crtc_index(crtc); 289 struct drm_crtc_state *crtc_state; 290 291 WARN_ON(!state->acquire_ctx); 292 293 crtc_state = drm_atomic_get_existing_crtc_state(state, crtc); 294 if (crtc_state) 295 return crtc_state; 296 297 ret = drm_modeset_lock(&crtc->mutex, state->acquire_ctx); 298 if (ret) 299 return ERR_PTR(ret); 300 301 crtc_state = crtc->funcs->atomic_duplicate_state(crtc); 302 if (!crtc_state) 303 return ERR_PTR(-ENOMEM); 304 305 state->crtcs[index].state = crtc_state; 306 state->crtcs[index].old_state = crtc->state; 307 state->crtcs[index].new_state = crtc_state; 308 state->crtcs[index].ptr = crtc; 309 crtc_state->state = state; 310 311 DRM_DEBUG_ATOMIC("Added [CRTC:%d:%s] %p state to %p\n", 312 crtc->base.id, crtc->name, crtc_state, state); 313 314 return crtc_state; 315 } 316 EXPORT_SYMBOL(drm_atomic_get_crtc_state); 317 318 static int drm_atomic_crtc_check(const struct drm_crtc_state *old_crtc_state, 319 const struct drm_crtc_state *new_crtc_state) 320 { 321 struct drm_crtc *crtc = new_crtc_state->crtc; 322 323 /* NOTE: we explicitly don't enforce constraints such as primary 324 * layer covering entire screen, since that is something we want 325 * to allow (on hw that supports it). For hw that does not, it 326 * should be checked in driver's crtc->atomic_check() vfunc. 327 * 328 * TODO: Add generic modeset state checks once we support those. 329 */ 330 331 if (new_crtc_state->active && !new_crtc_state->enable) { 332 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] active without enabled\n", 333 crtc->base.id, crtc->name); 334 return -EINVAL; 335 } 336 337 /* The state->enable vs. state->mode_blob checks can be WARN_ON, 338 * as this is a kernel-internal detail that userspace should never 339 * be able to trigger. */ 340 if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) && 341 WARN_ON(new_crtc_state->enable && !new_crtc_state->mode_blob)) { 342 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] enabled without mode blob\n", 343 crtc->base.id, crtc->name); 344 return -EINVAL; 345 } 346 347 if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) && 348 WARN_ON(!new_crtc_state->enable && new_crtc_state->mode_blob)) { 349 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] disabled with mode blob\n", 350 crtc->base.id, crtc->name); 351 return -EINVAL; 352 } 353 354 /* 355 * Reject event generation for when a CRTC is off and stays off. 356 * It wouldn't be hard to implement this, but userspace has a track 357 * record of happily burning through 100% cpu (or worse, crash) when the 358 * display pipe is suspended. To avoid all that fun just reject updates 359 * that ask for events since likely that indicates a bug in the 360 * compositor's drawing loop. This is consistent with the vblank IOCTL 361 * and legacy page_flip IOCTL which also reject service on a disabled 362 * pipe. 363 */ 364 if (new_crtc_state->event && 365 !new_crtc_state->active && !old_crtc_state->active) { 366 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] requesting event but off\n", 367 crtc->base.id, crtc->name); 368 return -EINVAL; 369 } 370 371 return 0; 372 } 373 374 static void drm_atomic_crtc_print_state(struct drm_printer *p, 375 const struct drm_crtc_state *state) 376 { 377 struct drm_crtc *crtc = state->crtc; 378 379 drm_printf(p, "crtc[%u]: %s\n", crtc->base.id, crtc->name); 380 drm_printf(p, "\tenable=%d\n", state->enable); 381 drm_printf(p, "\tactive=%d\n", state->active); 382 drm_printf(p, "\tplanes_changed=%d\n", state->planes_changed); 383 drm_printf(p, "\tmode_changed=%d\n", state->mode_changed); 384 drm_printf(p, "\tactive_changed=%d\n", state->active_changed); 385 drm_printf(p, "\tconnectors_changed=%d\n", state->connectors_changed); 386 drm_printf(p, "\tcolor_mgmt_changed=%d\n", state->color_mgmt_changed); 387 drm_printf(p, "\tplane_mask=%x\n", state->plane_mask); 388 drm_printf(p, "\tconnector_mask=%x\n", state->connector_mask); 389 drm_printf(p, "\tencoder_mask=%x\n", state->encoder_mask); 390 drm_printf(p, "\tmode: " DRM_MODE_FMT "\n", DRM_MODE_ARG(&state->mode)); 391 392 if (crtc->funcs->atomic_print_state) 393 crtc->funcs->atomic_print_state(p, state); 394 } 395 396 static int drm_atomic_connector_check(struct drm_connector *connector, 397 struct drm_connector_state *state) 398 { 399 struct drm_crtc_state *crtc_state; 400 struct drm_writeback_job *writeback_job = state->writeback_job; 401 const struct drm_display_info *info = &connector->display_info; 402 403 state->max_bpc = info->bpc ? info->bpc : 8; 404 if (connector->max_bpc_property) 405 state->max_bpc = min(state->max_bpc, state->max_requested_bpc); 406 407 if ((connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) || !writeback_job) 408 return 0; 409 410 if (writeback_job->fb && !state->crtc) { 411 DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] framebuffer without CRTC\n", 412 connector->base.id, connector->name); 413 return -EINVAL; 414 } 415 416 if (state->crtc) 417 crtc_state = drm_atomic_get_existing_crtc_state(state->state, 418 state->crtc); 419 420 if (writeback_job->fb && !crtc_state->active) { 421 DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] has framebuffer, but [CRTC:%d] is off\n", 422 connector->base.id, connector->name, 423 state->crtc->base.id); 424 return -EINVAL; 425 } 426 427 if (writeback_job->out_fence && !writeback_job->fb) { 428 DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] requesting out-fence without framebuffer\n", 429 connector->base.id, connector->name); 430 return -EINVAL; 431 } 432 433 return 0; 434 } 435 436 /** 437 * drm_atomic_get_plane_state - get plane state 438 * @state: global atomic state object 439 * @plane: plane to get state object for 440 * 441 * This function returns the plane state for the given plane, allocating it if 442 * needed. It will also grab the relevant plane lock to make sure that the state 443 * is consistent. 444 * 445 * Returns: 446 * 447 * Either the allocated state or the error code encoded into the pointer. When 448 * the error is EDEADLK then the w/w mutex code has detected a deadlock and the 449 * entire atomic sequence must be restarted. All other errors are fatal. 450 */ 451 struct drm_plane_state * 452 drm_atomic_get_plane_state(struct drm_atomic_state *state, 453 struct drm_plane *plane) 454 { 455 int ret, index = drm_plane_index(plane); 456 struct drm_plane_state *plane_state; 457 458 WARN_ON(!state->acquire_ctx); 459 460 /* the legacy pointers should never be set */ 461 WARN_ON(plane->fb); 462 WARN_ON(plane->old_fb); 463 WARN_ON(plane->crtc); 464 465 plane_state = drm_atomic_get_existing_plane_state(state, plane); 466 if (plane_state) 467 return plane_state; 468 469 ret = drm_modeset_lock(&plane->mutex, state->acquire_ctx); 470 if (ret) 471 return ERR_PTR(ret); 472 473 plane_state = plane->funcs->atomic_duplicate_state(plane); 474 if (!plane_state) 475 return ERR_PTR(-ENOMEM); 476 477 state->planes[index].state = plane_state; 478 state->planes[index].ptr = plane; 479 state->planes[index].old_state = plane->state; 480 state->planes[index].new_state = plane_state; 481 plane_state->state = state; 482 483 DRM_DEBUG_ATOMIC("Added [PLANE:%d:%s] %p state to %p\n", 484 plane->base.id, plane->name, plane_state, state); 485 486 if (plane_state->crtc) { 487 struct drm_crtc_state *crtc_state; 488 489 crtc_state = drm_atomic_get_crtc_state(state, 490 plane_state->crtc); 491 if (IS_ERR(crtc_state)) 492 return ERR_CAST(crtc_state); 493 } 494 495 return plane_state; 496 } 497 EXPORT_SYMBOL(drm_atomic_get_plane_state); 498 499 static bool 500 plane_switching_crtc(const struct drm_plane_state *old_plane_state, 501 const struct drm_plane_state *new_plane_state) 502 { 503 if (!old_plane_state->crtc || !new_plane_state->crtc) 504 return false; 505 506 if (old_plane_state->crtc == new_plane_state->crtc) 507 return false; 508 509 /* This could be refined, but currently there's no helper or driver code 510 * to implement direct switching of active planes nor userspace to take 511 * advantage of more direct plane switching without the intermediate 512 * full OFF state. 513 */ 514 return true; 515 } 516 517 /** 518 * drm_atomic_plane_check - check plane state 519 * @old_plane_state: old plane state to check 520 * @new_plane_state: new plane state to check 521 * 522 * Provides core sanity checks for plane state. 523 * 524 * RETURNS: 525 * Zero on success, error code on failure 526 */ 527 static int drm_atomic_plane_check(const struct drm_plane_state *old_plane_state, 528 const struct drm_plane_state *new_plane_state) 529 { 530 struct drm_plane *plane = new_plane_state->plane; 531 struct drm_crtc *crtc = new_plane_state->crtc; 532 const struct drm_framebuffer *fb = new_plane_state->fb; 533 unsigned int fb_width, fb_height; 534 struct drm_mode_rect *clips; 535 uint32_t num_clips; 536 int ret; 537 538 /* either *both* CRTC and FB must be set, or neither */ 539 if (crtc && !fb) { 540 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] CRTC set but no FB\n", 541 plane->base.id, plane->name); 542 return -EINVAL; 543 } else if (fb && !crtc) { 544 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] FB set but no CRTC\n", 545 plane->base.id, plane->name); 546 return -EINVAL; 547 } 548 549 /* if disabled, we don't care about the rest of the state: */ 550 if (!crtc) 551 return 0; 552 553 /* Check whether this plane is usable on this CRTC */ 554 if (!(plane->possible_crtcs & drm_crtc_mask(crtc))) { 555 DRM_DEBUG_ATOMIC("Invalid [CRTC:%d:%s] for [PLANE:%d:%s]\n", 556 crtc->base.id, crtc->name, 557 plane->base.id, plane->name); 558 return -EINVAL; 559 } 560 561 /* Check whether this plane supports the fb pixel format. */ 562 ret = drm_plane_check_pixel_format(plane, fb->format->format, 563 fb->modifier); 564 if (ret) { 565 struct drm_format_name_buf format_name; 566 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid pixel format %s, modifier 0x%llx\n", 567 plane->base.id, plane->name, 568 drm_get_format_name(fb->format->format, 569 &format_name), 570 fb->modifier); 571 return ret; 572 } 573 574 /* Give drivers some help against integer overflows */ 575 if (new_plane_state->crtc_w > INT_MAX || 576 new_plane_state->crtc_x > INT_MAX - (int32_t) new_plane_state->crtc_w || 577 new_plane_state->crtc_h > INT_MAX || 578 new_plane_state->crtc_y > INT_MAX - (int32_t) new_plane_state->crtc_h) { 579 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid CRTC coordinates %ux%u+%d+%d\n", 580 plane->base.id, plane->name, 581 new_plane_state->crtc_w, new_plane_state->crtc_h, 582 new_plane_state->crtc_x, new_plane_state->crtc_y); 583 return -ERANGE; 584 } 585 586 fb_width = fb->width << 16; 587 fb_height = fb->height << 16; 588 589 /* Make sure source coordinates are inside the fb. */ 590 if (new_plane_state->src_w > fb_width || 591 new_plane_state->src_x > fb_width - new_plane_state->src_w || 592 new_plane_state->src_h > fb_height || 593 new_plane_state->src_y > fb_height - new_plane_state->src_h) { 594 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid source coordinates " 595 "%u.%06ux%u.%06u+%u.%06u+%u.%06u (fb %ux%u)\n", 596 plane->base.id, plane->name, 597 new_plane_state->src_w >> 16, 598 ((new_plane_state->src_w & 0xffff) * 15625) >> 10, 599 new_plane_state->src_h >> 16, 600 ((new_plane_state->src_h & 0xffff) * 15625) >> 10, 601 new_plane_state->src_x >> 16, 602 ((new_plane_state->src_x & 0xffff) * 15625) >> 10, 603 new_plane_state->src_y >> 16, 604 ((new_plane_state->src_y & 0xffff) * 15625) >> 10, 605 fb->width, fb->height); 606 return -ENOSPC; 607 } 608 609 clips = drm_plane_get_damage_clips(new_plane_state); 610 num_clips = drm_plane_get_damage_clips_count(new_plane_state); 611 612 /* Make sure damage clips are valid and inside the fb. */ 613 while (num_clips > 0) { 614 if (clips->x1 >= clips->x2 || 615 clips->y1 >= clips->y2 || 616 clips->x1 < 0 || 617 clips->y1 < 0 || 618 clips->x2 > fb_width || 619 clips->y2 > fb_height) { 620 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid damage clip %d %d %d %d\n", 621 plane->base.id, plane->name, clips->x1, 622 clips->y1, clips->x2, clips->y2); 623 return -EINVAL; 624 } 625 clips++; 626 num_clips--; 627 } 628 629 if (plane_switching_crtc(old_plane_state, new_plane_state)) { 630 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] switching CRTC directly\n", 631 plane->base.id, plane->name); 632 return -EINVAL; 633 } 634 635 return 0; 636 } 637 638 static void drm_atomic_plane_print_state(struct drm_printer *p, 639 const struct drm_plane_state *state) 640 { 641 struct drm_plane *plane = state->plane; 642 struct drm_rect src = drm_plane_state_src(state); 643 struct drm_rect dest = drm_plane_state_dest(state); 644 645 drm_printf(p, "plane[%u]: %s\n", plane->base.id, plane->name); 646 drm_printf(p, "\tcrtc=%s\n", state->crtc ? state->crtc->name : "(null)"); 647 drm_printf(p, "\tfb=%u\n", state->fb ? state->fb->base.id : 0); 648 if (state->fb) 649 drm_framebuffer_print_info(p, 2, state->fb); 650 drm_printf(p, "\tcrtc-pos=" DRM_RECT_FMT "\n", DRM_RECT_ARG(&dest)); 651 drm_printf(p, "\tsrc-pos=" DRM_RECT_FP_FMT "\n", DRM_RECT_FP_ARG(&src)); 652 drm_printf(p, "\trotation=%x\n", state->rotation); 653 drm_printf(p, "\tnormalized-zpos=%x\n", state->normalized_zpos); 654 drm_printf(p, "\tcolor-encoding=%s\n", 655 drm_get_color_encoding_name(state->color_encoding)); 656 drm_printf(p, "\tcolor-range=%s\n", 657 drm_get_color_range_name(state->color_range)); 658 659 if (plane->funcs->atomic_print_state) 660 plane->funcs->atomic_print_state(p, state); 661 } 662 663 /** 664 * DOC: handling driver private state 665 * 666 * Very often the DRM objects exposed to userspace in the atomic modeset api 667 * (&drm_connector, &drm_crtc and &drm_plane) do not map neatly to the 668 * underlying hardware. Especially for any kind of shared resources (e.g. shared 669 * clocks, scaler units, bandwidth and fifo limits shared among a group of 670 * planes or CRTCs, and so on) it makes sense to model these as independent 671 * objects. Drivers then need to do similar state tracking and commit ordering for 672 * such private (since not exposed to userpace) objects as the atomic core and 673 * helpers already provide for connectors, planes and CRTCs. 674 * 675 * To make this easier on drivers the atomic core provides some support to track 676 * driver private state objects using struct &drm_private_obj, with the 677 * associated state struct &drm_private_state. 678 * 679 * Similar to userspace-exposed objects, private state structures can be 680 * acquired by calling drm_atomic_get_private_obj_state(). Since this function 681 * does not take care of locking, drivers should wrap it for each type of 682 * private state object they have with the required call to drm_modeset_lock() 683 * for the corresponding &drm_modeset_lock. 684 * 685 * All private state structures contained in a &drm_atomic_state update can be 686 * iterated using for_each_oldnew_private_obj_in_state(), 687 * for_each_new_private_obj_in_state() and for_each_old_private_obj_in_state(). 688 * Drivers are recommended to wrap these for each type of driver private state 689 * object they have, filtering on &drm_private_obj.funcs using for_each_if(), at 690 * least if they want to iterate over all objects of a given type. 691 * 692 * An earlier way to handle driver private state was by subclassing struct 693 * &drm_atomic_state. But since that encourages non-standard ways to implement 694 * the check/commit split atomic requires (by using e.g. "check and rollback or 695 * commit instead" of "duplicate state, check, then either commit or release 696 * duplicated state) it is deprecated in favour of using &drm_private_state. 697 */ 698 699 /** 700 * drm_atomic_private_obj_init - initialize private object 701 * @dev: DRM device this object will be attached to 702 * @obj: private object 703 * @state: initial private object state 704 * @funcs: pointer to the struct of function pointers that identify the object 705 * type 706 * 707 * Initialize the private object, which can be embedded into any 708 * driver private object that needs its own atomic state. 709 */ 710 void 711 drm_atomic_private_obj_init(struct drm_device *dev, 712 struct drm_private_obj *obj, 713 struct drm_private_state *state, 714 const struct drm_private_state_funcs *funcs) 715 { 716 memset(obj, 0, sizeof(*obj)); 717 718 drm_modeset_lock_init(&obj->lock); 719 720 obj->state = state; 721 obj->funcs = funcs; 722 list_add_tail(&obj->head, &dev->mode_config.privobj_list); 723 } 724 EXPORT_SYMBOL(drm_atomic_private_obj_init); 725 726 /** 727 * drm_atomic_private_obj_fini - finalize private object 728 * @obj: private object 729 * 730 * Finalize the private object. 731 */ 732 void 733 drm_atomic_private_obj_fini(struct drm_private_obj *obj) 734 { 735 list_del(&obj->head); 736 obj->funcs->atomic_destroy_state(obj, obj->state); 737 drm_modeset_lock_fini(&obj->lock); 738 } 739 EXPORT_SYMBOL(drm_atomic_private_obj_fini); 740 741 /** 742 * drm_atomic_get_private_obj_state - get private object state 743 * @state: global atomic state 744 * @obj: private object to get the state for 745 * 746 * This function returns the private object state for the given private object, 747 * allocating the state if needed. It will also grab the relevant private 748 * object lock to make sure that the state is consistent. 749 * 750 * RETURNS: 751 * 752 * Either the allocated state or the error code encoded into a pointer. 753 */ 754 struct drm_private_state * 755 drm_atomic_get_private_obj_state(struct drm_atomic_state *state, 756 struct drm_private_obj *obj) 757 { 758 int index, num_objs, i, ret; 759 size_t size; 760 struct __drm_private_objs_state *arr; 761 struct drm_private_state *obj_state; 762 763 for (i = 0; i < state->num_private_objs; i++) 764 if (obj == state->private_objs[i].ptr) 765 return state->private_objs[i].state; 766 767 ret = drm_modeset_lock(&obj->lock, state->acquire_ctx); 768 if (ret) 769 return ERR_PTR(ret); 770 771 num_objs = state->num_private_objs + 1; 772 size = sizeof(*state->private_objs) * num_objs; 773 arr = krealloc(state->private_objs, size, GFP_KERNEL); 774 if (!arr) 775 return ERR_PTR(-ENOMEM); 776 777 state->private_objs = arr; 778 index = state->num_private_objs; 779 memset(&state->private_objs[index], 0, sizeof(*state->private_objs)); 780 781 obj_state = obj->funcs->atomic_duplicate_state(obj); 782 if (!obj_state) 783 return ERR_PTR(-ENOMEM); 784 785 state->private_objs[index].state = obj_state; 786 state->private_objs[index].old_state = obj->state; 787 state->private_objs[index].new_state = obj_state; 788 state->private_objs[index].ptr = obj; 789 obj_state->state = state; 790 791 state->num_private_objs = num_objs; 792 793 DRM_DEBUG_ATOMIC("Added new private object %p state %p to %p\n", 794 obj, obj_state, state); 795 796 return obj_state; 797 } 798 EXPORT_SYMBOL(drm_atomic_get_private_obj_state); 799 800 /** 801 * drm_atomic_get_connector_state - get connector state 802 * @state: global atomic state object 803 * @connector: connector to get state object for 804 * 805 * This function returns the connector state for the given connector, 806 * allocating it if needed. It will also grab the relevant connector lock to 807 * make sure that the state is consistent. 808 * 809 * Returns: 810 * 811 * Either the allocated state or the error code encoded into the pointer. When 812 * the error is EDEADLK then the w/w mutex code has detected a deadlock and the 813 * entire atomic sequence must be restarted. All other errors are fatal. 814 */ 815 struct drm_connector_state * 816 drm_atomic_get_connector_state(struct drm_atomic_state *state, 817 struct drm_connector *connector) 818 { 819 int ret, index; 820 struct drm_mode_config *config = &connector->dev->mode_config; 821 struct drm_connector_state *connector_state; 822 823 WARN_ON(!state->acquire_ctx); 824 825 ret = drm_modeset_lock(&config->connection_mutex, state->acquire_ctx); 826 if (ret) 827 return ERR_PTR(ret); 828 829 index = drm_connector_index(connector); 830 831 if (index >= state->num_connector) { 832 struct __drm_connnectors_state *c; 833 int alloc = max(index + 1, config->num_connector); 834 835 c = krealloc(state->connectors, alloc * sizeof(*state->connectors), GFP_KERNEL); 836 if (!c) 837 return ERR_PTR(-ENOMEM); 838 839 state->connectors = c; 840 memset(&state->connectors[state->num_connector], 0, 841 sizeof(*state->connectors) * (alloc - state->num_connector)); 842 843 state->num_connector = alloc; 844 } 845 846 if (state->connectors[index].state) 847 return state->connectors[index].state; 848 849 connector_state = connector->funcs->atomic_duplicate_state(connector); 850 if (!connector_state) 851 return ERR_PTR(-ENOMEM); 852 853 drm_connector_get(connector); 854 state->connectors[index].state = connector_state; 855 state->connectors[index].old_state = connector->state; 856 state->connectors[index].new_state = connector_state; 857 state->connectors[index].ptr = connector; 858 connector_state->state = state; 859 860 DRM_DEBUG_ATOMIC("Added [CONNECTOR:%d:%s] %p state to %p\n", 861 connector->base.id, connector->name, 862 connector_state, state); 863 864 if (connector_state->crtc) { 865 struct drm_crtc_state *crtc_state; 866 867 crtc_state = drm_atomic_get_crtc_state(state, 868 connector_state->crtc); 869 if (IS_ERR(crtc_state)) 870 return ERR_CAST(crtc_state); 871 } 872 873 return connector_state; 874 } 875 EXPORT_SYMBOL(drm_atomic_get_connector_state); 876 877 static void drm_atomic_connector_print_state(struct drm_printer *p, 878 const struct drm_connector_state *state) 879 { 880 struct drm_connector *connector = state->connector; 881 882 drm_printf(p, "connector[%u]: %s\n", connector->base.id, connector->name); 883 drm_printf(p, "\tcrtc=%s\n", state->crtc ? state->crtc->name : "(null)"); 884 885 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 886 if (state->writeback_job && state->writeback_job->fb) 887 drm_printf(p, "\tfb=%d\n", state->writeback_job->fb->base.id); 888 889 if (connector->funcs->atomic_print_state) 890 connector->funcs->atomic_print_state(p, state); 891 } 892 893 /** 894 * drm_atomic_add_affected_connectors - add connectors for crtc 895 * @state: atomic state 896 * @crtc: DRM crtc 897 * 898 * This function walks the current configuration and adds all connectors 899 * currently using @crtc to the atomic configuration @state. Note that this 900 * function must acquire the connection mutex. This can potentially cause 901 * unneeded seralization if the update is just for the planes on one crtc. Hence 902 * drivers and helpers should only call this when really needed (e.g. when a 903 * full modeset needs to happen due to some change). 904 * 905 * Returns: 906 * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK 907 * then the w/w mutex code has detected a deadlock and the entire atomic 908 * sequence must be restarted. All other errors are fatal. 909 */ 910 int 911 drm_atomic_add_affected_connectors(struct drm_atomic_state *state, 912 struct drm_crtc *crtc) 913 { 914 struct drm_mode_config *config = &state->dev->mode_config; 915 struct drm_connector *connector; 916 struct drm_connector_state *conn_state; 917 struct drm_connector_list_iter conn_iter; 918 struct drm_crtc_state *crtc_state; 919 int ret; 920 921 crtc_state = drm_atomic_get_crtc_state(state, crtc); 922 if (IS_ERR(crtc_state)) 923 return PTR_ERR(crtc_state); 924 925 ret = drm_modeset_lock(&config->connection_mutex, state->acquire_ctx); 926 if (ret) 927 return ret; 928 929 DRM_DEBUG_ATOMIC("Adding all current connectors for [CRTC:%d:%s] to %p\n", 930 crtc->base.id, crtc->name, state); 931 932 /* 933 * Changed connectors are already in @state, so only need to look 934 * at the connector_mask in crtc_state. 935 */ 936 drm_connector_list_iter_begin(state->dev, &conn_iter); 937 drm_for_each_connector_iter(connector, &conn_iter) { 938 if (!(crtc_state->connector_mask & drm_connector_mask(connector))) 939 continue; 940 941 conn_state = drm_atomic_get_connector_state(state, connector); 942 if (IS_ERR(conn_state)) { 943 drm_connector_list_iter_end(&conn_iter); 944 return PTR_ERR(conn_state); 945 } 946 } 947 drm_connector_list_iter_end(&conn_iter); 948 949 return 0; 950 } 951 EXPORT_SYMBOL(drm_atomic_add_affected_connectors); 952 953 /** 954 * drm_atomic_add_affected_planes - add planes for crtc 955 * @state: atomic state 956 * @crtc: DRM crtc 957 * 958 * This function walks the current configuration and adds all planes 959 * currently used by @crtc to the atomic configuration @state. This is useful 960 * when an atomic commit also needs to check all currently enabled plane on 961 * @crtc, e.g. when changing the mode. It's also useful when re-enabling a CRTC 962 * to avoid special code to force-enable all planes. 963 * 964 * Since acquiring a plane state will always also acquire the w/w mutex of the 965 * current CRTC for that plane (if there is any) adding all the plane states for 966 * a CRTC will not reduce parallism of atomic updates. 967 * 968 * Returns: 969 * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK 970 * then the w/w mutex code has detected a deadlock and the entire atomic 971 * sequence must be restarted. All other errors are fatal. 972 */ 973 int 974 drm_atomic_add_affected_planes(struct drm_atomic_state *state, 975 struct drm_crtc *crtc) 976 { 977 const struct drm_crtc_state *old_crtc_state = 978 drm_atomic_get_old_crtc_state(state, crtc); 979 struct drm_plane *plane; 980 981 WARN_ON(!drm_atomic_get_new_crtc_state(state, crtc)); 982 983 DRM_DEBUG_ATOMIC("Adding all current planes for [CRTC:%d:%s] to %p\n", 984 crtc->base.id, crtc->name, state); 985 986 drm_for_each_plane_mask(plane, state->dev, old_crtc_state->plane_mask) { 987 struct drm_plane_state *plane_state = 988 drm_atomic_get_plane_state(state, plane); 989 990 if (IS_ERR(plane_state)) 991 return PTR_ERR(plane_state); 992 } 993 return 0; 994 } 995 EXPORT_SYMBOL(drm_atomic_add_affected_planes); 996 997 /** 998 * drm_atomic_check_only - check whether a given config would work 999 * @state: atomic configuration to check 1000 * 1001 * Note that this function can return -EDEADLK if the driver needed to acquire 1002 * more locks but encountered a deadlock. The caller must then do the usual w/w 1003 * backoff dance and restart. All other errors are fatal. 1004 * 1005 * Returns: 1006 * 0 on success, negative error code on failure. 1007 */ 1008 int drm_atomic_check_only(struct drm_atomic_state *state) 1009 { 1010 struct drm_device *dev = state->dev; 1011 struct drm_mode_config *config = &dev->mode_config; 1012 struct drm_plane *plane; 1013 struct drm_plane_state *old_plane_state; 1014 struct drm_plane_state *new_plane_state; 1015 struct drm_crtc *crtc; 1016 struct drm_crtc_state *old_crtc_state; 1017 struct drm_crtc_state *new_crtc_state; 1018 struct drm_connector *conn; 1019 struct drm_connector_state *conn_state; 1020 int i, ret = 0; 1021 1022 DRM_DEBUG_ATOMIC("checking %p\n", state); 1023 1024 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { 1025 ret = drm_atomic_plane_check(old_plane_state, new_plane_state); 1026 if (ret) { 1027 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] atomic core check failed\n", 1028 plane->base.id, plane->name); 1029 return ret; 1030 } 1031 } 1032 1033 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 1034 ret = drm_atomic_crtc_check(old_crtc_state, new_crtc_state); 1035 if (ret) { 1036 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic core check failed\n", 1037 crtc->base.id, crtc->name); 1038 return ret; 1039 } 1040 } 1041 1042 for_each_new_connector_in_state(state, conn, conn_state, i) { 1043 ret = drm_atomic_connector_check(conn, conn_state); 1044 if (ret) { 1045 DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] atomic core check failed\n", 1046 conn->base.id, conn->name); 1047 return ret; 1048 } 1049 } 1050 1051 if (config->funcs->atomic_check) { 1052 ret = config->funcs->atomic_check(state->dev, state); 1053 1054 if (ret) { 1055 DRM_DEBUG_ATOMIC("atomic driver check for %p failed: %d\n", 1056 state, ret); 1057 return ret; 1058 } 1059 } 1060 1061 if (!state->allow_modeset) { 1062 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 1063 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) { 1064 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] requires full modeset\n", 1065 crtc->base.id, crtc->name); 1066 return -EINVAL; 1067 } 1068 } 1069 } 1070 1071 return 0; 1072 } 1073 EXPORT_SYMBOL(drm_atomic_check_only); 1074 1075 /** 1076 * drm_atomic_commit - commit configuration atomically 1077 * @state: atomic configuration to check 1078 * 1079 * Note that this function can return -EDEADLK if the driver needed to acquire 1080 * more locks but encountered a deadlock. The caller must then do the usual w/w 1081 * backoff dance and restart. All other errors are fatal. 1082 * 1083 * This function will take its own reference on @state. 1084 * Callers should always release their reference with drm_atomic_state_put(). 1085 * 1086 * Returns: 1087 * 0 on success, negative error code on failure. 1088 */ 1089 int drm_atomic_commit(struct drm_atomic_state *state) 1090 { 1091 struct drm_mode_config *config = &state->dev->mode_config; 1092 int ret; 1093 1094 ret = drm_atomic_check_only(state); 1095 if (ret) 1096 return ret; 1097 1098 DRM_DEBUG_ATOMIC("committing %p\n", state); 1099 1100 return config->funcs->atomic_commit(state->dev, state, false); 1101 } 1102 EXPORT_SYMBOL(drm_atomic_commit); 1103 1104 /** 1105 * drm_atomic_nonblocking_commit - atomic nonblocking commit 1106 * @state: atomic configuration to check 1107 * 1108 * Note that this function can return -EDEADLK if the driver needed to acquire 1109 * more locks but encountered a deadlock. The caller must then do the usual w/w 1110 * backoff dance and restart. All other errors are fatal. 1111 * 1112 * This function will take its own reference on @state. 1113 * Callers should always release their reference with drm_atomic_state_put(). 1114 * 1115 * Returns: 1116 * 0 on success, negative error code on failure. 1117 */ 1118 int drm_atomic_nonblocking_commit(struct drm_atomic_state *state) 1119 { 1120 struct drm_mode_config *config = &state->dev->mode_config; 1121 int ret; 1122 1123 ret = drm_atomic_check_only(state); 1124 if (ret) 1125 return ret; 1126 1127 DRM_DEBUG_ATOMIC("committing %p nonblocking\n", state); 1128 1129 return config->funcs->atomic_commit(state->dev, state, true); 1130 } 1131 EXPORT_SYMBOL(drm_atomic_nonblocking_commit); 1132 1133 void drm_atomic_print_state(const struct drm_atomic_state *state) 1134 { 1135 struct drm_printer p = drm_info_printer(state->dev->dev); 1136 struct drm_plane *plane; 1137 struct drm_plane_state *plane_state; 1138 struct drm_crtc *crtc; 1139 struct drm_crtc_state *crtc_state; 1140 struct drm_connector *connector; 1141 struct drm_connector_state *connector_state; 1142 int i; 1143 1144 DRM_DEBUG_ATOMIC("checking %p\n", state); 1145 1146 for_each_new_plane_in_state(state, plane, plane_state, i) 1147 drm_atomic_plane_print_state(&p, plane_state); 1148 1149 for_each_new_crtc_in_state(state, crtc, crtc_state, i) 1150 drm_atomic_crtc_print_state(&p, crtc_state); 1151 1152 for_each_new_connector_in_state(state, connector, connector_state, i) 1153 drm_atomic_connector_print_state(&p, connector_state); 1154 } 1155 1156 static void __drm_state_dump(struct drm_device *dev, struct drm_printer *p, 1157 bool take_locks) 1158 { 1159 struct drm_mode_config *config = &dev->mode_config; 1160 struct drm_plane *plane; 1161 struct drm_crtc *crtc; 1162 struct drm_connector *connector; 1163 struct drm_connector_list_iter conn_iter; 1164 1165 if (!drm_drv_uses_atomic_modeset(dev)) 1166 return; 1167 1168 list_for_each_entry(plane, &config->plane_list, head) { 1169 if (take_locks) 1170 drm_modeset_lock(&plane->mutex, NULL); 1171 drm_atomic_plane_print_state(p, plane->state); 1172 if (take_locks) 1173 drm_modeset_unlock(&plane->mutex); 1174 } 1175 1176 list_for_each_entry(crtc, &config->crtc_list, head) { 1177 if (take_locks) 1178 drm_modeset_lock(&crtc->mutex, NULL); 1179 drm_atomic_crtc_print_state(p, crtc->state); 1180 if (take_locks) 1181 drm_modeset_unlock(&crtc->mutex); 1182 } 1183 1184 drm_connector_list_iter_begin(dev, &conn_iter); 1185 if (take_locks) 1186 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); 1187 drm_for_each_connector_iter(connector, &conn_iter) 1188 drm_atomic_connector_print_state(p, connector->state); 1189 if (take_locks) 1190 drm_modeset_unlock(&dev->mode_config.connection_mutex); 1191 drm_connector_list_iter_end(&conn_iter); 1192 } 1193 1194 /** 1195 * drm_state_dump - dump entire device atomic state 1196 * @dev: the drm device 1197 * @p: where to print the state to 1198 * 1199 * Just for debugging. Drivers might want an option to dump state 1200 * to dmesg in case of error irq's. (Hint, you probably want to 1201 * ratelimit this!) 1202 * 1203 * The caller must drm_modeset_lock_all(), or if this is called 1204 * from error irq handler, it should not be enabled by default. 1205 * (Ie. if you are debugging errors you might not care that this 1206 * is racey. But calling this without all modeset locks held is 1207 * not inherently safe.) 1208 */ 1209 void drm_state_dump(struct drm_device *dev, struct drm_printer *p) 1210 { 1211 __drm_state_dump(dev, p, false); 1212 } 1213 EXPORT_SYMBOL(drm_state_dump); 1214 1215 #ifdef CONFIG_DEBUG_FS 1216 static int drm_state_info(struct seq_file *m, void *data) 1217 { 1218 struct drm_info_node *node = (struct drm_info_node *) m->private; 1219 struct drm_device *dev = node->minor->dev; 1220 struct drm_printer p = drm_seq_file_printer(m); 1221 1222 __drm_state_dump(dev, &p, true); 1223 1224 return 0; 1225 } 1226 1227 /* any use in debugfs files to dump individual planes/crtc/etc? */ 1228 static const struct drm_info_list drm_atomic_debugfs_list[] = { 1229 {"state", drm_state_info, 0}, 1230 }; 1231 1232 int drm_atomic_debugfs_init(struct drm_minor *minor) 1233 { 1234 return drm_debugfs_create_files(drm_atomic_debugfs_list, 1235 ARRAY_SIZE(drm_atomic_debugfs_list), 1236 minor->debugfs_root, minor); 1237 } 1238 #endif 1239 1240