1 /* 2 * Copyright (C) 2014 Red Hat 3 * Copyright (C) 2014 Intel Corp. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be included in 13 * all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 * OTHER DEALINGS IN THE SOFTWARE. 22 * 23 * Authors: 24 * Rob Clark <robdclark@gmail.com> 25 * Daniel Vetter <daniel.vetter@ffwll.ch> 26 */ 27 28 #include <linux/export.h> 29 #include <linux/dma-fence.h> 30 #include <linux/ktime.h> 31 32 #include <drm/drm_atomic.h> 33 #include <drm/drm_atomic_helper.h> 34 #include <drm/drm_atomic_uapi.h> 35 #include <drm/drm_blend.h> 36 #include <drm/drm_bridge.h> 37 #include <drm/drm_colorop.h> 38 #include <drm/drm_damage_helper.h> 39 #include <drm/drm_device.h> 40 #include <drm/drm_drv.h> 41 #include <drm/drm_framebuffer.h> 42 #include <drm/drm_gem_atomic_helper.h> 43 #include <drm/drm_panic.h> 44 #include <drm/drm_print.h> 45 #include <drm/drm_self_refresh_helper.h> 46 #include <drm/drm_vblank.h> 47 #include <drm/drm_writeback.h> 48 49 #include "drm_crtc_helper_internal.h" 50 #include "drm_crtc_internal.h" 51 52 /** 53 * DOC: overview 54 * 55 * This helper library provides implementations of check and commit functions on 56 * top of the CRTC modeset helper callbacks and the plane helper callbacks. It 57 * also provides convenience implementations for the atomic state handling 58 * callbacks for drivers which don't need to subclass the drm core structures to 59 * add their own additional internal state. 60 * 61 * This library also provides default implementations for the check callback in 62 * drm_atomic_helper_check() and for the commit callback with 63 * drm_atomic_helper_commit(). But the individual stages and callbacks are 64 * exposed to allow drivers to mix and match and e.g. use the plane helpers only 65 * together with a driver private modeset implementation. 66 * 67 * This library also provides implementations for all the legacy driver 68 * interfaces on top of the atomic interface. See drm_atomic_helper_set_config(), 69 * drm_atomic_helper_disable_plane(), and the various functions to implement 70 * set_property callbacks. New drivers must not implement these functions 71 * themselves but must use the provided helpers. 72 * 73 * The atomic helper uses the same function table structures as all other 74 * modesetting helpers. See the documentation for &struct drm_crtc_helper_funcs, 75 * struct &drm_encoder_helper_funcs and &struct drm_connector_helper_funcs. It 76 * also shares the &struct drm_plane_helper_funcs function table with the plane 77 * helpers. 78 */ 79 static void 80 drm_atomic_helper_plane_changed(struct drm_atomic_state *state, 81 struct drm_plane_state *old_plane_state, 82 struct drm_plane_state *plane_state, 83 struct drm_plane *plane) 84 { 85 struct drm_crtc_state *crtc_state; 86 87 if (old_plane_state->crtc) { 88 crtc_state = drm_atomic_get_new_crtc_state(state, 89 old_plane_state->crtc); 90 91 if (WARN_ON(!crtc_state)) 92 return; 93 94 crtc_state->planes_changed = true; 95 } 96 97 if (plane_state->crtc) { 98 crtc_state = drm_atomic_get_new_crtc_state(state, plane_state->crtc); 99 100 if (WARN_ON(!crtc_state)) 101 return; 102 103 crtc_state->planes_changed = true; 104 } 105 } 106 107 static int handle_conflicting_encoders(struct drm_atomic_state *state, 108 bool disable_conflicting_encoders) 109 { 110 struct drm_connector_state *new_conn_state; 111 struct drm_connector *connector; 112 struct drm_connector_list_iter conn_iter; 113 struct drm_encoder *encoder; 114 unsigned int encoder_mask = 0; 115 int i, ret = 0; 116 117 /* 118 * First loop, find all newly assigned encoders from the connectors 119 * part of the state. If the same encoder is assigned to multiple 120 * connectors bail out. 121 */ 122 for_each_new_connector_in_state(state, connector, new_conn_state, i) { 123 const struct drm_connector_helper_funcs *funcs = connector->helper_private; 124 struct drm_encoder *new_encoder; 125 126 if (!new_conn_state->crtc) 127 continue; 128 129 if (funcs->atomic_best_encoder) 130 new_encoder = funcs->atomic_best_encoder(connector, 131 state); 132 else if (funcs->best_encoder) 133 new_encoder = funcs->best_encoder(connector); 134 else 135 new_encoder = drm_connector_get_single_encoder(connector); 136 137 if (new_encoder) { 138 if (encoder_mask & drm_encoder_mask(new_encoder)) { 139 drm_dbg_atomic(connector->dev, 140 "[ENCODER:%d:%s] on [CONNECTOR:%d:%s] already assigned\n", 141 new_encoder->base.id, new_encoder->name, 142 connector->base.id, connector->name); 143 144 return -EINVAL; 145 } 146 147 encoder_mask |= drm_encoder_mask(new_encoder); 148 } 149 } 150 151 if (!encoder_mask) 152 return 0; 153 154 /* 155 * Second loop, iterate over all connectors not part of the state. 156 * 157 * If a conflicting encoder is found and disable_conflicting_encoders 158 * is not set, an error is returned. Userspace can provide a solution 159 * through the atomic ioctl. 160 * 161 * If the flag is set conflicting connectors are removed from the CRTC 162 * and the CRTC is disabled if no encoder is left. This preserves 163 * compatibility with the legacy set_config behavior. 164 */ 165 drm_connector_list_iter_begin(state->dev, &conn_iter); 166 drm_for_each_connector_iter(connector, &conn_iter) { 167 struct drm_crtc_state *crtc_state; 168 169 if (drm_atomic_get_new_connector_state(state, connector)) 170 continue; 171 172 encoder = connector->state->best_encoder; 173 if (!encoder || !(encoder_mask & drm_encoder_mask(encoder))) 174 continue; 175 176 if (!disable_conflicting_encoders) { 177 drm_dbg_atomic(connector->dev, 178 "[ENCODER:%d:%s] in use on [CRTC:%d:%s] by [CONNECTOR:%d:%s]\n", 179 encoder->base.id, encoder->name, 180 connector->state->crtc->base.id, 181 connector->state->crtc->name, 182 connector->base.id, connector->name); 183 ret = -EINVAL; 184 goto out; 185 } 186 187 new_conn_state = drm_atomic_get_connector_state(state, connector); 188 if (IS_ERR(new_conn_state)) { 189 ret = PTR_ERR(new_conn_state); 190 goto out; 191 } 192 193 drm_dbg_atomic(connector->dev, 194 "[ENCODER:%d:%s] in use on [CRTC:%d:%s], disabling [CONNECTOR:%d:%s]\n", 195 encoder->base.id, encoder->name, 196 new_conn_state->crtc->base.id, new_conn_state->crtc->name, 197 connector->base.id, connector->name); 198 199 crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc); 200 201 ret = drm_atomic_set_crtc_for_connector(new_conn_state, NULL); 202 if (ret) 203 goto out; 204 205 if (!crtc_state->connector_mask) { 206 ret = drm_atomic_set_mode_prop_for_crtc(crtc_state, 207 NULL); 208 if (ret < 0) 209 goto out; 210 211 crtc_state->active = false; 212 } 213 } 214 out: 215 drm_connector_list_iter_end(&conn_iter); 216 217 return ret; 218 } 219 220 static void 221 set_best_encoder(struct drm_atomic_state *state, 222 struct drm_connector_state *conn_state, 223 struct drm_encoder *encoder) 224 { 225 struct drm_crtc_state *crtc_state; 226 struct drm_crtc *crtc; 227 228 if (conn_state->best_encoder) { 229 /* Unset the encoder_mask in the old crtc state. */ 230 crtc = conn_state->connector->state->crtc; 231 232 /* A NULL crtc is an error here because we should have 233 * duplicated a NULL best_encoder when crtc was NULL. 234 * As an exception restoring duplicated atomic state 235 * during resume is allowed, so don't warn when 236 * best_encoder is equal to encoder we intend to set. 237 */ 238 WARN_ON(!crtc && encoder != conn_state->best_encoder); 239 if (crtc) { 240 crtc_state = drm_atomic_get_new_crtc_state(state, crtc); 241 242 crtc_state->encoder_mask &= 243 ~drm_encoder_mask(conn_state->best_encoder); 244 } 245 } 246 247 if (encoder) { 248 crtc = conn_state->crtc; 249 WARN_ON(!crtc); 250 if (crtc) { 251 crtc_state = drm_atomic_get_new_crtc_state(state, crtc); 252 253 crtc_state->encoder_mask |= 254 drm_encoder_mask(encoder); 255 } 256 } 257 258 conn_state->best_encoder = encoder; 259 } 260 261 static void 262 steal_encoder(struct drm_atomic_state *state, 263 struct drm_encoder *encoder) 264 { 265 struct drm_crtc_state *crtc_state; 266 struct drm_connector *connector; 267 struct drm_connector_state *old_connector_state, *new_connector_state; 268 int i; 269 270 for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) { 271 struct drm_crtc *encoder_crtc; 272 273 if (new_connector_state->best_encoder != encoder) 274 continue; 275 276 encoder_crtc = old_connector_state->crtc; 277 278 drm_dbg_atomic(encoder->dev, 279 "[ENCODER:%d:%s] in use on [CRTC:%d:%s], stealing it\n", 280 encoder->base.id, encoder->name, 281 encoder_crtc->base.id, encoder_crtc->name); 282 283 set_best_encoder(state, new_connector_state, NULL); 284 285 crtc_state = drm_atomic_get_new_crtc_state(state, encoder_crtc); 286 crtc_state->connectors_changed = true; 287 288 return; 289 } 290 } 291 292 static int 293 update_connector_routing(struct drm_atomic_state *state, 294 struct drm_connector *connector, 295 struct drm_connector_state *old_connector_state, 296 struct drm_connector_state *new_connector_state, 297 bool added_by_user) 298 { 299 const struct drm_connector_helper_funcs *funcs; 300 struct drm_encoder *new_encoder; 301 struct drm_crtc_state *crtc_state; 302 303 drm_dbg_atomic(connector->dev, "Updating routing for [CONNECTOR:%d:%s]\n", 304 connector->base.id, connector->name); 305 306 if (old_connector_state->crtc != new_connector_state->crtc) { 307 if (old_connector_state->crtc) { 308 crtc_state = drm_atomic_get_new_crtc_state(state, old_connector_state->crtc); 309 crtc_state->connectors_changed = true; 310 } 311 312 if (new_connector_state->crtc) { 313 crtc_state = drm_atomic_get_new_crtc_state(state, new_connector_state->crtc); 314 crtc_state->connectors_changed = true; 315 } 316 } 317 318 if (!new_connector_state->crtc) { 319 drm_dbg_atomic(connector->dev, "Disabling [CONNECTOR:%d:%s]\n", 320 connector->base.id, connector->name); 321 322 set_best_encoder(state, new_connector_state, NULL); 323 324 return 0; 325 } 326 327 crtc_state = drm_atomic_get_new_crtc_state(state, 328 new_connector_state->crtc); 329 /* 330 * For compatibility with legacy users, we want to make sure that 331 * we allow DPMS On->Off modesets on unregistered connectors. Modesets 332 * which would result in anything else must be considered invalid, to 333 * avoid turning on new displays on dead connectors. 334 * 335 * Since the connector can be unregistered at any point during an 336 * atomic check or commit, this is racy. But that's OK: all we care 337 * about is ensuring that userspace can't do anything but shut off the 338 * display on a connector that was destroyed after it's been notified, 339 * not before. 340 * 341 * Additionally, we also want to ignore connector registration when 342 * we're trying to restore an atomic state during system resume since 343 * there's a chance the connector may have been destroyed during the 344 * process, but it's better to ignore that then cause 345 * drm_atomic_helper_resume() to fail. 346 * 347 * Last, we want to ignore connector registration when the connector 348 * was not pulled in the atomic state by user-space (ie, was pulled 349 * in by the driver, e.g. when updating a DP-MST stream). 350 */ 351 if (!state->duplicated && drm_connector_is_unregistered(connector) && 352 added_by_user && crtc_state->active) { 353 drm_dbg_atomic(connector->dev, 354 "[CONNECTOR:%d:%s] is not registered\n", 355 connector->base.id, connector->name); 356 return -EINVAL; 357 } 358 359 funcs = connector->helper_private; 360 361 if (funcs->atomic_best_encoder) 362 new_encoder = funcs->atomic_best_encoder(connector, state); 363 else if (funcs->best_encoder) 364 new_encoder = funcs->best_encoder(connector); 365 else 366 new_encoder = drm_connector_get_single_encoder(connector); 367 368 if (!new_encoder) { 369 drm_dbg_atomic(connector->dev, 370 "No suitable encoder found for [CONNECTOR:%d:%s]\n", 371 connector->base.id, connector->name); 372 return -EINVAL; 373 } 374 375 if (!drm_encoder_crtc_ok(new_encoder, new_connector_state->crtc)) { 376 drm_dbg_atomic(connector->dev, 377 "[ENCODER:%d:%s] incompatible with [CRTC:%d:%s]\n", 378 new_encoder->base.id, 379 new_encoder->name, 380 new_connector_state->crtc->base.id, 381 new_connector_state->crtc->name); 382 return -EINVAL; 383 } 384 385 if (new_encoder == new_connector_state->best_encoder) { 386 set_best_encoder(state, new_connector_state, new_encoder); 387 388 drm_dbg_atomic(connector->dev, 389 "[CONNECTOR:%d:%s] keeps [ENCODER:%d:%s], now on [CRTC:%d:%s]\n", 390 connector->base.id, 391 connector->name, 392 new_encoder->base.id, 393 new_encoder->name, 394 new_connector_state->crtc->base.id, 395 new_connector_state->crtc->name); 396 397 return 0; 398 } 399 400 steal_encoder(state, new_encoder); 401 402 set_best_encoder(state, new_connector_state, new_encoder); 403 404 crtc_state->connectors_changed = true; 405 406 drm_dbg_atomic(connector->dev, 407 "[CONNECTOR:%d:%s] using [ENCODER:%d:%s] on [CRTC:%d:%s]\n", 408 connector->base.id, 409 connector->name, 410 new_encoder->base.id, 411 new_encoder->name, 412 new_connector_state->crtc->base.id, 413 new_connector_state->crtc->name); 414 415 return 0; 416 } 417 418 static int 419 mode_fixup(struct drm_atomic_state *state) 420 { 421 struct drm_crtc *crtc; 422 struct drm_crtc_state *new_crtc_state; 423 struct drm_connector *connector; 424 struct drm_connector_state *new_conn_state; 425 int i; 426 int ret; 427 428 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 429 if (!new_crtc_state->mode_changed && 430 !new_crtc_state->connectors_changed) 431 continue; 432 433 drm_mode_copy(&new_crtc_state->adjusted_mode, &new_crtc_state->mode); 434 } 435 436 for_each_new_connector_in_state(state, connector, new_conn_state, i) { 437 const struct drm_encoder_helper_funcs *funcs; 438 struct drm_encoder *encoder; 439 struct drm_bridge *bridge; 440 441 WARN_ON(!!new_conn_state->best_encoder != !!new_conn_state->crtc); 442 443 if (!new_conn_state->crtc || !new_conn_state->best_encoder) 444 continue; 445 446 new_crtc_state = 447 drm_atomic_get_new_crtc_state(state, new_conn_state->crtc); 448 449 /* 450 * Each encoder has at most one connector (since we always steal 451 * it away), so we won't call ->mode_fixup twice. 452 */ 453 encoder = new_conn_state->best_encoder; 454 funcs = encoder->helper_private; 455 456 bridge = drm_bridge_chain_get_first_bridge(encoder); 457 ret = drm_atomic_bridge_chain_check(bridge, 458 new_crtc_state, 459 new_conn_state); 460 drm_bridge_put(bridge); 461 if (ret) { 462 drm_dbg_atomic(encoder->dev, "Bridge atomic check failed\n"); 463 return ret; 464 } 465 466 if (funcs && funcs->atomic_check) { 467 ret = funcs->atomic_check(encoder, new_crtc_state, 468 new_conn_state); 469 if (ret) { 470 drm_dbg_atomic(encoder->dev, 471 "[ENCODER:%d:%s] check failed\n", 472 encoder->base.id, encoder->name); 473 return ret; 474 } 475 } else if (funcs && funcs->mode_fixup) { 476 ret = funcs->mode_fixup(encoder, &new_crtc_state->mode, 477 &new_crtc_state->adjusted_mode); 478 if (!ret) { 479 drm_dbg_atomic(encoder->dev, 480 "[ENCODER:%d:%s] fixup failed\n", 481 encoder->base.id, encoder->name); 482 return -EINVAL; 483 } 484 } 485 } 486 487 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 488 const struct drm_crtc_helper_funcs *funcs; 489 490 if (!new_crtc_state->enable) 491 continue; 492 493 if (!new_crtc_state->mode_changed && 494 !new_crtc_state->connectors_changed) 495 continue; 496 497 funcs = crtc->helper_private; 498 if (!funcs || !funcs->mode_fixup) 499 continue; 500 501 ret = funcs->mode_fixup(crtc, &new_crtc_state->mode, 502 &new_crtc_state->adjusted_mode); 503 if (!ret) { 504 drm_dbg_atomic(crtc->dev, "[CRTC:%d:%s] fixup failed\n", 505 crtc->base.id, crtc->name); 506 return -EINVAL; 507 } 508 } 509 510 return 0; 511 } 512 513 static enum drm_mode_status mode_valid_path(struct drm_connector *connector, 514 struct drm_encoder *encoder, 515 struct drm_crtc *crtc, 516 const struct drm_display_mode *mode) 517 { 518 struct drm_bridge *bridge; 519 enum drm_mode_status ret; 520 521 ret = drm_encoder_mode_valid(encoder, mode); 522 if (ret != MODE_OK) { 523 drm_dbg_atomic(encoder->dev, 524 "[ENCODER:%d:%s] mode_valid() failed\n", 525 encoder->base.id, encoder->name); 526 return ret; 527 } 528 529 bridge = drm_bridge_chain_get_first_bridge(encoder); 530 ret = drm_bridge_chain_mode_valid(bridge, &connector->display_info, 531 mode); 532 drm_bridge_put(bridge); 533 if (ret != MODE_OK) { 534 drm_dbg_atomic(encoder->dev, "[BRIDGE] mode_valid() failed\n"); 535 return ret; 536 } 537 538 ret = drm_crtc_mode_valid(crtc, mode); 539 if (ret != MODE_OK) { 540 drm_dbg_atomic(encoder->dev, "[CRTC:%d:%s] mode_valid() failed\n", 541 crtc->base.id, crtc->name); 542 return ret; 543 } 544 545 return ret; 546 } 547 548 static int 549 mode_valid(struct drm_atomic_state *state) 550 { 551 struct drm_connector_state *conn_state; 552 struct drm_connector *connector; 553 int i; 554 555 for_each_new_connector_in_state(state, connector, conn_state, i) { 556 struct drm_encoder *encoder = conn_state->best_encoder; 557 struct drm_crtc *crtc = conn_state->crtc; 558 struct drm_crtc_state *crtc_state; 559 enum drm_mode_status mode_status; 560 const struct drm_display_mode *mode; 561 562 if (!crtc || !encoder) 563 continue; 564 565 crtc_state = drm_atomic_get_new_crtc_state(state, crtc); 566 if (!crtc_state) 567 continue; 568 if (!crtc_state->mode_changed && !crtc_state->connectors_changed) 569 continue; 570 571 mode = &crtc_state->mode; 572 573 mode_status = mode_valid_path(connector, encoder, crtc, mode); 574 if (mode_status != MODE_OK) 575 return -EINVAL; 576 } 577 578 return 0; 579 } 580 581 static int drm_atomic_check_valid_clones(struct drm_atomic_state *state, 582 struct drm_crtc *crtc) 583 { 584 struct drm_encoder *drm_enc; 585 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, 586 crtc); 587 588 drm_for_each_encoder_mask(drm_enc, crtc->dev, crtc_state->encoder_mask) { 589 if (!drm_enc->possible_clones) { 590 DRM_DEBUG("enc%d possible_clones is 0\n", drm_enc->base.id); 591 continue; 592 } 593 594 if ((crtc_state->encoder_mask & drm_enc->possible_clones) != 595 crtc_state->encoder_mask) { 596 DRM_DEBUG("crtc%d failed valid clone check for mask 0x%x\n", 597 crtc->base.id, crtc_state->encoder_mask); 598 return -EINVAL; 599 } 600 } 601 602 return 0; 603 } 604 605 /** 606 * drm_atomic_helper_check_modeset - validate state object for modeset changes 607 * @dev: DRM device 608 * @state: the driver state object 609 * 610 * Check the state object to see if the requested state is physically possible. 611 * This does all the CRTC and connector related computations for an atomic 612 * update and adds any additional connectors needed for full modesets. It calls 613 * the various per-object callbacks in the follow order: 614 * 615 * 1. &drm_connector_helper_funcs.atomic_best_encoder for determining the new encoder. 616 * 2. &drm_connector_helper_funcs.atomic_check to validate the connector state. 617 * 3. If it's determined a modeset is needed then all connectors on the affected 618 * CRTC are added and &drm_connector_helper_funcs.atomic_check is run on them. 619 * 4. &drm_encoder_helper_funcs.mode_valid, &drm_bridge_funcs.mode_valid and 620 * &drm_crtc_helper_funcs.mode_valid are called on the affected components. 621 * 5. &drm_bridge_funcs.mode_fixup is called on all encoder bridges. 622 * 6. &drm_encoder_helper_funcs.atomic_check is called to validate any encoder state. 623 * This function is only called when the encoder will be part of a configured CRTC, 624 * it must not be used for implementing connector property validation. 625 * If this function is NULL, &drm_atomic_encoder_helper_funcs.mode_fixup is called 626 * instead. 627 * 7. &drm_crtc_helper_funcs.mode_fixup is called last, to fix up the mode with CRTC constraints. 628 * 629 * &drm_crtc_state.mode_changed is set when the input mode is changed. 630 * &drm_crtc_state.connectors_changed is set when a connector is added or 631 * removed from the CRTC. &drm_crtc_state.active_changed is set when 632 * &drm_crtc_state.active changes, which is used for DPMS. 633 * &drm_crtc_state.no_vblank is set from the result of drm_dev_has_vblank(). 634 * See also: drm_atomic_crtc_needs_modeset() 635 * 636 * IMPORTANT: 637 * 638 * Drivers which set &drm_crtc_state.mode_changed (e.g. in their 639 * &drm_plane_helper_funcs.atomic_check hooks if a plane update can't be done 640 * without a full modeset) _must_ call this function after that change. It is 641 * permitted to call this function multiple times for the same update, e.g. 642 * when the &drm_crtc_helper_funcs.atomic_check functions depend upon the 643 * adjusted dotclock for fifo space allocation and watermark computation. 644 * 645 * RETURNS: 646 * Zero for success or -errno 647 */ 648 int 649 drm_atomic_helper_check_modeset(struct drm_device *dev, 650 struct drm_atomic_state *state) 651 { 652 struct drm_crtc *crtc; 653 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 654 struct drm_connector *connector; 655 struct drm_connector_state *old_connector_state, *new_connector_state; 656 int i, ret; 657 unsigned int connectors_mask = 0, user_connectors_mask = 0; 658 659 for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) 660 user_connectors_mask |= BIT(i); 661 662 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 663 bool has_connectors = 664 !!new_crtc_state->connector_mask; 665 666 WARN_ON(!drm_modeset_is_locked(&crtc->mutex)); 667 668 if (!drm_mode_equal(&old_crtc_state->mode, &new_crtc_state->mode)) { 669 drm_dbg_atomic(dev, "[CRTC:%d:%s] mode changed\n", 670 crtc->base.id, crtc->name); 671 new_crtc_state->mode_changed = true; 672 } 673 674 if (old_crtc_state->enable != new_crtc_state->enable) { 675 drm_dbg_atomic(dev, "[CRTC:%d:%s] enable changed\n", 676 crtc->base.id, crtc->name); 677 678 /* 679 * For clarity this assignment is done here, but 680 * enable == 0 is only true when there are no 681 * connectors and a NULL mode. 682 * 683 * The other way around is true as well. enable != 0 684 * implies that connectors are attached and a mode is set. 685 */ 686 new_crtc_state->mode_changed = true; 687 new_crtc_state->connectors_changed = true; 688 } 689 690 if (old_crtc_state->active != new_crtc_state->active) { 691 drm_dbg_atomic(dev, "[CRTC:%d:%s] active changed\n", 692 crtc->base.id, crtc->name); 693 new_crtc_state->active_changed = true; 694 } 695 696 if (new_crtc_state->enable != has_connectors) { 697 drm_dbg_atomic(dev, "[CRTC:%d:%s] enabled/connectors mismatch (%d/%d)\n", 698 crtc->base.id, crtc->name, 699 new_crtc_state->enable, has_connectors); 700 701 return -EINVAL; 702 } 703 704 if (drm_dev_has_vblank(dev)) 705 new_crtc_state->no_vblank = false; 706 else 707 new_crtc_state->no_vblank = true; 708 } 709 710 ret = handle_conflicting_encoders(state, false); 711 if (ret) 712 return ret; 713 714 for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) { 715 const struct drm_connector_helper_funcs *funcs = connector->helper_private; 716 717 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); 718 719 /* 720 * This only sets crtc->connectors_changed for routing changes, 721 * drivers must set crtc->connectors_changed themselves when 722 * connector properties need to be updated. 723 */ 724 ret = update_connector_routing(state, connector, 725 old_connector_state, 726 new_connector_state, 727 BIT(i) & user_connectors_mask); 728 if (ret) 729 return ret; 730 if (old_connector_state->crtc) { 731 new_crtc_state = drm_atomic_get_new_crtc_state(state, 732 old_connector_state->crtc); 733 if (old_connector_state->link_status != 734 new_connector_state->link_status) 735 new_crtc_state->connectors_changed = true; 736 737 if (old_connector_state->max_requested_bpc != 738 new_connector_state->max_requested_bpc) 739 new_crtc_state->connectors_changed = true; 740 } 741 742 if (funcs->atomic_check) 743 ret = funcs->atomic_check(connector, state); 744 if (ret) { 745 drm_dbg_atomic(dev, 746 "[CONNECTOR:%d:%s] driver check failed\n", 747 connector->base.id, connector->name); 748 return ret; 749 } 750 751 connectors_mask |= BIT(i); 752 } 753 754 /* 755 * After all the routing has been prepared we need to add in any 756 * connector which is itself unchanged, but whose CRTC changes its 757 * configuration. This must be done before calling mode_fixup in case a 758 * crtc only changed its mode but has the same set of connectors. 759 */ 760 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 761 if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) 762 continue; 763 764 drm_dbg_atomic(dev, 765 "[CRTC:%d:%s] needs all connectors, enable: %c, active: %c\n", 766 crtc->base.id, crtc->name, 767 new_crtc_state->enable ? 'y' : 'n', 768 new_crtc_state->active ? 'y' : 'n'); 769 770 ret = drm_atomic_add_affected_connectors(state, crtc); 771 if (ret != 0) 772 return ret; 773 774 ret = drm_atomic_add_affected_planes(state, crtc); 775 if (ret != 0) 776 return ret; 777 778 ret = drm_atomic_check_valid_clones(state, crtc); 779 if (ret != 0) 780 return ret; 781 } 782 783 /* 784 * Iterate over all connectors again, to make sure atomic_check() 785 * has been called on them when a modeset is forced. 786 */ 787 for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) { 788 const struct drm_connector_helper_funcs *funcs = connector->helper_private; 789 790 if (connectors_mask & BIT(i)) 791 continue; 792 793 if (funcs->atomic_check) 794 ret = funcs->atomic_check(connector, state); 795 if (ret) { 796 drm_dbg_atomic(dev, 797 "[CONNECTOR:%d:%s] driver check failed\n", 798 connector->base.id, connector->name); 799 return ret; 800 } 801 } 802 803 /* 804 * Iterate over all connectors again, and add all affected bridges to 805 * the state. 806 */ 807 for_each_oldnew_connector_in_state(state, connector, 808 old_connector_state, 809 new_connector_state, i) { 810 struct drm_encoder *encoder; 811 812 encoder = old_connector_state->best_encoder; 813 ret = drm_atomic_add_encoder_bridges(state, encoder); 814 if (ret) 815 return ret; 816 817 encoder = new_connector_state->best_encoder; 818 ret = drm_atomic_add_encoder_bridges(state, encoder); 819 if (ret) 820 return ret; 821 } 822 823 ret = mode_valid(state); 824 if (ret) 825 return ret; 826 827 return mode_fixup(state); 828 } 829 EXPORT_SYMBOL(drm_atomic_helper_check_modeset); 830 831 /** 832 * drm_atomic_helper_check_wb_connector_state() - Check writeback connector state 833 * @connector: corresponding connector 834 * @state: the driver state object 835 * 836 * Checks if the writeback connector state is valid, and returns an error if it 837 * isn't. 838 * 839 * RETURNS: 840 * Zero for success or -errno 841 */ 842 int 843 drm_atomic_helper_check_wb_connector_state(struct drm_connector *connector, 844 struct drm_atomic_state *state) 845 { 846 struct drm_connector_state *conn_state = 847 drm_atomic_get_new_connector_state(state, connector); 848 struct drm_writeback_job *wb_job = conn_state->writeback_job; 849 struct drm_property_blob *pixel_format_blob; 850 struct drm_framebuffer *fb; 851 size_t i, nformats; 852 u32 *formats; 853 854 if (!wb_job || !wb_job->fb) 855 return 0; 856 857 pixel_format_blob = wb_job->connector->pixel_formats_blob_ptr; 858 nformats = pixel_format_blob->length / sizeof(u32); 859 formats = pixel_format_blob->data; 860 fb = wb_job->fb; 861 862 for (i = 0; i < nformats; i++) 863 if (fb->format->format == formats[i]) 864 return 0; 865 866 drm_dbg_kms(connector->dev, "Invalid pixel format %p4cc\n", &fb->format->format); 867 868 return -EINVAL; 869 } 870 EXPORT_SYMBOL(drm_atomic_helper_check_wb_connector_state); 871 872 /** 873 * drm_atomic_helper_check_plane_state() - Check plane state for validity 874 * @plane_state: plane state to check 875 * @crtc_state: CRTC state to check 876 * @min_scale: minimum @src:@dest scaling factor in 16.16 fixed point 877 * @max_scale: maximum @src:@dest scaling factor in 16.16 fixed point 878 * @can_position: is it legal to position the plane such that it 879 * doesn't cover the entire CRTC? This will generally 880 * only be false for primary planes. 881 * @can_update_disabled: can the plane be updated while the CRTC 882 * is disabled? 883 * 884 * Checks that a desired plane update is valid, and updates various 885 * bits of derived state (clipped coordinates etc.). Drivers that provide 886 * their own plane handling rather than helper-provided implementations may 887 * still wish to call this function to avoid duplication of error checking 888 * code. 889 * 890 * RETURNS: 891 * Zero if update appears valid, error code on failure 892 */ 893 int drm_atomic_helper_check_plane_state(struct drm_plane_state *plane_state, 894 const struct drm_crtc_state *crtc_state, 895 int min_scale, 896 int max_scale, 897 bool can_position, 898 bool can_update_disabled) 899 { 900 struct drm_framebuffer *fb = plane_state->fb; 901 struct drm_rect *src = &plane_state->src; 902 struct drm_rect *dst = &plane_state->dst; 903 unsigned int rotation = plane_state->rotation; 904 struct drm_rect clip = {}; 905 int hscale, vscale; 906 907 WARN_ON(plane_state->crtc && plane_state->crtc != crtc_state->crtc); 908 909 *src = drm_plane_state_src(plane_state); 910 *dst = drm_plane_state_dest(plane_state); 911 912 if (!fb) { 913 plane_state->visible = false; 914 return 0; 915 } 916 917 /* crtc should only be NULL when disabling (i.e., !fb) */ 918 if (WARN_ON(!plane_state->crtc)) { 919 plane_state->visible = false; 920 return 0; 921 } 922 923 if (!crtc_state->enable && !can_update_disabled) { 924 drm_dbg_kms(plane_state->plane->dev, 925 "Cannot update plane of a disabled CRTC.\n"); 926 return -EINVAL; 927 } 928 929 drm_rect_rotate(src, fb->width << 16, fb->height << 16, rotation); 930 931 /* Check scaling */ 932 hscale = drm_rect_calc_hscale(src, dst, min_scale, max_scale); 933 vscale = drm_rect_calc_vscale(src, dst, min_scale, max_scale); 934 if (hscale < 0 || vscale < 0) { 935 drm_dbg_kms(plane_state->plane->dev, 936 "Invalid scaling of plane\n"); 937 drm_rect_debug_print("src: ", &plane_state->src, true); 938 drm_rect_debug_print("dst: ", &plane_state->dst, false); 939 return -ERANGE; 940 } 941 942 if (crtc_state->enable) 943 drm_mode_get_hv_timing(&crtc_state->mode, &clip.x2, &clip.y2); 944 945 plane_state->visible = drm_rect_clip_scaled(src, dst, &clip); 946 947 drm_rect_rotate_inv(src, fb->width << 16, fb->height << 16, rotation); 948 949 if (!plane_state->visible) 950 /* 951 * Plane isn't visible; some drivers can handle this 952 * so we just return success here. Drivers that can't 953 * (including those that use the primary plane helper's 954 * update function) will return an error from their 955 * update_plane handler. 956 */ 957 return 0; 958 959 if (!can_position && !drm_rect_equals(dst, &clip)) { 960 drm_dbg_kms(plane_state->plane->dev, 961 "Plane must cover entire CRTC\n"); 962 drm_rect_debug_print("dst: ", dst, false); 963 drm_rect_debug_print("clip: ", &clip, false); 964 return -EINVAL; 965 } 966 967 return 0; 968 } 969 EXPORT_SYMBOL(drm_atomic_helper_check_plane_state); 970 971 /** 972 * drm_atomic_helper_check_crtc_primary_plane() - Check CRTC state for primary plane 973 * @crtc_state: CRTC state to check 974 * 975 * Checks that a CRTC has at least one primary plane attached to it, which is 976 * a requirement on some hardware. Note that this only involves the CRTC side 977 * of the test. To test if the primary plane is visible or if it can be updated 978 * without the CRTC being enabled, use drm_atomic_helper_check_plane_state() in 979 * the plane's atomic check. 980 * 981 * RETURNS: 982 * 0 if a primary plane is attached to the CRTC, or an error code otherwise 983 */ 984 int drm_atomic_helper_check_crtc_primary_plane(struct drm_crtc_state *crtc_state) 985 { 986 struct drm_crtc *crtc = crtc_state->crtc; 987 struct drm_device *dev = crtc->dev; 988 struct drm_plane *plane; 989 990 /* needs at least one primary plane to be enabled */ 991 drm_for_each_plane_mask(plane, dev, crtc_state->plane_mask) { 992 if (plane->type == DRM_PLANE_TYPE_PRIMARY) 993 return 0; 994 } 995 996 drm_dbg_atomic(dev, "[CRTC:%d:%s] primary plane missing\n", crtc->base.id, crtc->name); 997 998 return -EINVAL; 999 } 1000 EXPORT_SYMBOL(drm_atomic_helper_check_crtc_primary_plane); 1001 1002 /** 1003 * drm_atomic_helper_check_planes - validate state object for planes changes 1004 * @dev: DRM device 1005 * @state: the driver state object 1006 * 1007 * Check the state object to see if the requested state is physically possible. 1008 * This does all the plane update related checks using by calling into the 1009 * &drm_crtc_helper_funcs.atomic_check and &drm_plane_helper_funcs.atomic_check 1010 * hooks provided by the driver. 1011 * 1012 * It also sets &drm_crtc_state.planes_changed to indicate that a CRTC has 1013 * updated planes. 1014 * 1015 * RETURNS: 1016 * Zero for success or -errno 1017 */ 1018 int 1019 drm_atomic_helper_check_planes(struct drm_device *dev, 1020 struct drm_atomic_state *state) 1021 { 1022 struct drm_crtc *crtc; 1023 struct drm_crtc_state *new_crtc_state; 1024 struct drm_plane *plane; 1025 struct drm_plane_state *new_plane_state, *old_plane_state; 1026 int i, ret = 0; 1027 1028 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { 1029 const struct drm_plane_helper_funcs *funcs; 1030 1031 WARN_ON(!drm_modeset_is_locked(&plane->mutex)); 1032 1033 funcs = plane->helper_private; 1034 1035 drm_atomic_helper_plane_changed(state, old_plane_state, new_plane_state, plane); 1036 1037 drm_atomic_helper_check_plane_damage(state, new_plane_state); 1038 1039 if (!funcs || !funcs->atomic_check) 1040 continue; 1041 1042 ret = funcs->atomic_check(plane, state); 1043 if (ret) { 1044 drm_dbg_atomic(plane->dev, 1045 "[PLANE:%d:%s] atomic driver check failed\n", 1046 plane->base.id, plane->name); 1047 return ret; 1048 } 1049 } 1050 1051 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 1052 const struct drm_crtc_helper_funcs *funcs; 1053 1054 funcs = crtc->helper_private; 1055 1056 if (!funcs || !funcs->atomic_check) 1057 continue; 1058 1059 ret = funcs->atomic_check(crtc, state); 1060 if (ret) { 1061 drm_dbg_atomic(crtc->dev, 1062 "[CRTC:%d:%s] atomic driver check failed\n", 1063 crtc->base.id, crtc->name); 1064 return ret; 1065 } 1066 } 1067 1068 return ret; 1069 } 1070 EXPORT_SYMBOL(drm_atomic_helper_check_planes); 1071 1072 /** 1073 * drm_atomic_helper_check - validate state object 1074 * @dev: DRM device 1075 * @state: the driver state object 1076 * 1077 * Check the state object to see if the requested state is physically possible. 1078 * Only CRTCs and planes have check callbacks, so for any additional (global) 1079 * checking that a driver needs it can simply wrap that around this function. 1080 * Drivers without such needs can directly use this as their 1081 * &drm_mode_config_funcs.atomic_check callback. 1082 * 1083 * This just wraps the two parts of the state checking for planes and modeset 1084 * state in the default order: First it calls drm_atomic_helper_check_modeset() 1085 * and then drm_atomic_helper_check_planes(). The assumption is that the 1086 * @drm_plane_helper_funcs.atomic_check and @drm_crtc_helper_funcs.atomic_check 1087 * functions depend upon an updated adjusted_mode.clock to e.g. properly compute 1088 * watermarks. 1089 * 1090 * Note that zpos normalization will add all enable planes to the state which 1091 * might not desired for some drivers. 1092 * For example enable/disable of a cursor plane which have fixed zpos value 1093 * would trigger all other enabled planes to be forced to the state change. 1094 * 1095 * IMPORTANT: 1096 * 1097 * As this function calls drm_atomic_helper_check_modeset() internally, its 1098 * restrictions also apply: 1099 * Drivers which set &drm_crtc_state.mode_changed (e.g. in their 1100 * &drm_plane_helper_funcs.atomic_check hooks if a plane update can't be done 1101 * without a full modeset) _must_ call drm_atomic_helper_check_modeset() 1102 * function again after that change. 1103 * 1104 * RETURNS: 1105 * Zero for success or -errno 1106 */ 1107 int drm_atomic_helper_check(struct drm_device *dev, 1108 struct drm_atomic_state *state) 1109 { 1110 int ret; 1111 1112 ret = drm_atomic_helper_check_modeset(dev, state); 1113 if (ret) 1114 return ret; 1115 1116 if (dev->mode_config.normalize_zpos) { 1117 ret = drm_atomic_normalize_zpos(dev, state); 1118 if (ret) 1119 return ret; 1120 } 1121 1122 ret = drm_atomic_helper_check_planes(dev, state); 1123 if (ret) 1124 return ret; 1125 1126 if (state->legacy_cursor_update) 1127 state->async_update = !drm_atomic_helper_async_check(dev, state); 1128 1129 drm_self_refresh_helper_alter_state(state); 1130 1131 return ret; 1132 } 1133 EXPORT_SYMBOL(drm_atomic_helper_check); 1134 1135 static bool 1136 crtc_needs_disable(struct drm_crtc_state *old_state, 1137 struct drm_crtc_state *new_state) 1138 { 1139 /* 1140 * No new_state means the CRTC is off, so the only criteria is whether 1141 * it's currently active or in self refresh mode. 1142 */ 1143 if (!new_state) 1144 return drm_atomic_crtc_effectively_active(old_state); 1145 1146 /* 1147 * We need to disable bridge(s) and CRTC if we're transitioning out of 1148 * self-refresh and changing CRTCs at the same time, because the 1149 * bridge tracks self-refresh status via CRTC state. 1150 */ 1151 if (old_state->self_refresh_active && 1152 old_state->crtc != new_state->crtc) 1153 return true; 1154 1155 /* 1156 * We also need to run through the crtc_funcs->disable() function if 1157 * the CRTC is currently on, if it's transitioning to self refresh 1158 * mode, or if it's in self refresh mode and needs to be fully 1159 * disabled. 1160 */ 1161 return old_state->active || 1162 (old_state->self_refresh_active && !new_state->active) || 1163 new_state->self_refresh_active; 1164 } 1165 1166 static void 1167 encoder_bridge_disable(struct drm_device *dev, struct drm_atomic_state *state) 1168 { 1169 struct drm_connector *connector; 1170 struct drm_connector_state *old_conn_state, *new_conn_state; 1171 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 1172 int i; 1173 1174 for_each_oldnew_connector_in_state(state, connector, old_conn_state, new_conn_state, i) { 1175 const struct drm_encoder_helper_funcs *funcs; 1176 struct drm_encoder *encoder; 1177 struct drm_bridge *bridge; 1178 1179 /* 1180 * Shut down everything that's in the changeset and currently 1181 * still on. So need to check the old, saved state. 1182 */ 1183 if (!old_conn_state->crtc) 1184 continue; 1185 1186 old_crtc_state = drm_atomic_get_old_crtc_state(state, old_conn_state->crtc); 1187 1188 if (new_conn_state->crtc) 1189 new_crtc_state = drm_atomic_get_new_crtc_state( 1190 state, 1191 new_conn_state->crtc); 1192 else 1193 new_crtc_state = NULL; 1194 1195 if (!crtc_needs_disable(old_crtc_state, new_crtc_state) || 1196 !drm_atomic_crtc_needs_modeset(old_conn_state->crtc->state)) 1197 continue; 1198 1199 encoder = old_conn_state->best_encoder; 1200 1201 /* We shouldn't get this far if we didn't previously have 1202 * an encoder.. but WARN_ON() rather than explode. 1203 */ 1204 if (WARN_ON(!encoder)) 1205 continue; 1206 1207 funcs = encoder->helper_private; 1208 1209 drm_dbg_atomic(dev, "disabling [ENCODER:%d:%s]\n", 1210 encoder->base.id, encoder->name); 1211 1212 /* 1213 * Each encoder has at most one connector (since we always steal 1214 * it away), so we won't call disable hooks twice. 1215 */ 1216 bridge = drm_bridge_chain_get_first_bridge(encoder); 1217 drm_atomic_bridge_chain_disable(bridge, state); 1218 drm_bridge_put(bridge); 1219 1220 /* Right function depends upon target state. */ 1221 if (funcs) { 1222 if (funcs->atomic_disable) 1223 funcs->atomic_disable(encoder, state); 1224 else if (new_conn_state->crtc && funcs->prepare) 1225 funcs->prepare(encoder); 1226 else if (funcs->disable) 1227 funcs->disable(encoder); 1228 else if (funcs->dpms) 1229 funcs->dpms(encoder, DRM_MODE_DPMS_OFF); 1230 } 1231 } 1232 } 1233 1234 static void 1235 crtc_disable(struct drm_device *dev, struct drm_atomic_state *state) 1236 { 1237 struct drm_crtc *crtc; 1238 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 1239 int i; 1240 1241 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 1242 const struct drm_crtc_helper_funcs *funcs; 1243 int ret; 1244 1245 /* Shut down everything that needs a full modeset. */ 1246 if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) 1247 continue; 1248 1249 if (!crtc_needs_disable(old_crtc_state, new_crtc_state)) 1250 continue; 1251 1252 funcs = crtc->helper_private; 1253 1254 drm_dbg_atomic(dev, "disabling [CRTC:%d:%s]\n", 1255 crtc->base.id, crtc->name); 1256 1257 1258 /* Right function depends upon target state. */ 1259 if (new_crtc_state->enable && funcs->prepare) 1260 funcs->prepare(crtc); 1261 else if (funcs->atomic_disable) 1262 funcs->atomic_disable(crtc, state); 1263 else if (funcs->disable) 1264 funcs->disable(crtc); 1265 else if (funcs->dpms) 1266 funcs->dpms(crtc, DRM_MODE_DPMS_OFF); 1267 1268 if (!drm_dev_has_vblank(dev)) 1269 continue; 1270 1271 ret = drm_crtc_vblank_get(crtc); 1272 /* 1273 * Self-refresh is not a true "disable"; ensure vblank remains 1274 * enabled. 1275 */ 1276 if (new_crtc_state->self_refresh_active) 1277 WARN_ONCE(ret != 0, 1278 "driver disabled vblank in self-refresh\n"); 1279 else 1280 WARN_ONCE(ret != -EINVAL, 1281 "driver forgot to call drm_crtc_vblank_off()\n"); 1282 if (ret == 0) 1283 drm_crtc_vblank_put(crtc); 1284 } 1285 } 1286 1287 static void 1288 encoder_bridge_post_disable(struct drm_device *dev, struct drm_atomic_state *state) 1289 { 1290 struct drm_connector *connector; 1291 struct drm_connector_state *old_conn_state, *new_conn_state; 1292 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 1293 int i; 1294 1295 for_each_oldnew_connector_in_state(state, connector, old_conn_state, new_conn_state, i) { 1296 struct drm_encoder *encoder; 1297 struct drm_bridge *bridge; 1298 1299 /* 1300 * Shut down everything that's in the changeset and currently 1301 * still on. So need to check the old, saved state. 1302 */ 1303 if (!old_conn_state->crtc) 1304 continue; 1305 1306 old_crtc_state = drm_atomic_get_old_crtc_state(state, old_conn_state->crtc); 1307 1308 if (new_conn_state->crtc) 1309 new_crtc_state = drm_atomic_get_new_crtc_state(state, 1310 new_conn_state->crtc); 1311 else 1312 new_crtc_state = NULL; 1313 1314 if (!crtc_needs_disable(old_crtc_state, new_crtc_state) || 1315 !drm_atomic_crtc_needs_modeset(old_conn_state->crtc->state)) 1316 continue; 1317 1318 encoder = old_conn_state->best_encoder; 1319 1320 /* 1321 * We shouldn't get this far if we didn't previously have 1322 * an encoder.. but WARN_ON() rather than explode. 1323 */ 1324 if (WARN_ON(!encoder)) 1325 continue; 1326 1327 drm_dbg_atomic(dev, "post-disabling bridges [ENCODER:%d:%s]\n", 1328 encoder->base.id, encoder->name); 1329 1330 /* 1331 * Each encoder has at most one connector (since we always steal 1332 * it away), so we won't call disable hooks twice. 1333 */ 1334 bridge = drm_bridge_chain_get_first_bridge(encoder); 1335 drm_atomic_bridge_chain_post_disable(bridge, state); 1336 drm_bridge_put(bridge); 1337 } 1338 } 1339 1340 static void 1341 disable_outputs(struct drm_device *dev, struct drm_atomic_state *state) 1342 { 1343 encoder_bridge_disable(dev, state); 1344 1345 crtc_disable(dev, state); 1346 1347 encoder_bridge_post_disable(dev, state); 1348 } 1349 1350 /** 1351 * drm_atomic_helper_update_legacy_modeset_state - update legacy modeset state 1352 * @dev: DRM device 1353 * @state: atomic state object being committed 1354 * 1355 * This function updates all the various legacy modeset state pointers in 1356 * connectors, encoders and CRTCs. 1357 * 1358 * Drivers can use this for building their own atomic commit if they don't have 1359 * a pure helper-based modeset implementation. 1360 * 1361 * Since these updates are not synchronized with lockings, only code paths 1362 * called from &drm_mode_config_helper_funcs.atomic_commit_tail can look at the 1363 * legacy state filled out by this helper. Defacto this means this helper and 1364 * the legacy state pointers are only really useful for transitioning an 1365 * existing driver to the atomic world. 1366 */ 1367 void 1368 drm_atomic_helper_update_legacy_modeset_state(struct drm_device *dev, 1369 struct drm_atomic_state *state) 1370 { 1371 struct drm_connector *connector; 1372 struct drm_connector_state *old_conn_state, *new_conn_state; 1373 struct drm_crtc *crtc; 1374 struct drm_crtc_state *new_crtc_state; 1375 int i; 1376 1377 /* clear out existing links and update dpms */ 1378 for_each_oldnew_connector_in_state(state, connector, old_conn_state, new_conn_state, i) { 1379 if (connector->encoder) { 1380 WARN_ON(!connector->encoder->crtc); 1381 1382 connector->encoder->crtc = NULL; 1383 connector->encoder = NULL; 1384 } 1385 1386 crtc = new_conn_state->crtc; 1387 if ((!crtc && old_conn_state->crtc) || 1388 (crtc && drm_atomic_crtc_needs_modeset(crtc->state))) { 1389 int mode = DRM_MODE_DPMS_OFF; 1390 1391 if (crtc && crtc->state->active) 1392 mode = DRM_MODE_DPMS_ON; 1393 1394 connector->dpms = mode; 1395 } 1396 } 1397 1398 /* set new links */ 1399 for_each_new_connector_in_state(state, connector, new_conn_state, i) { 1400 if (!new_conn_state->crtc) 1401 continue; 1402 1403 if (WARN_ON(!new_conn_state->best_encoder)) 1404 continue; 1405 1406 connector->encoder = new_conn_state->best_encoder; 1407 connector->encoder->crtc = new_conn_state->crtc; 1408 } 1409 1410 /* set legacy state in the crtc structure */ 1411 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 1412 struct drm_plane *primary = crtc->primary; 1413 struct drm_plane_state *new_plane_state; 1414 1415 crtc->mode = new_crtc_state->mode; 1416 crtc->enabled = new_crtc_state->enable; 1417 1418 new_plane_state = 1419 drm_atomic_get_new_plane_state(state, primary); 1420 1421 if (new_plane_state && new_plane_state->crtc == crtc) { 1422 crtc->x = new_plane_state->src_x >> 16; 1423 crtc->y = new_plane_state->src_y >> 16; 1424 } 1425 } 1426 } 1427 EXPORT_SYMBOL(drm_atomic_helper_update_legacy_modeset_state); 1428 1429 /** 1430 * drm_atomic_helper_calc_timestamping_constants - update vblank timestamping constants 1431 * @state: atomic state object 1432 * 1433 * Updates the timestamping constants used for precise vblank timestamps 1434 * by calling drm_calc_timestamping_constants() for all enabled crtcs in @state. 1435 */ 1436 void drm_atomic_helper_calc_timestamping_constants(struct drm_atomic_state *state) 1437 { 1438 struct drm_crtc_state *new_crtc_state; 1439 struct drm_crtc *crtc; 1440 int i; 1441 1442 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 1443 if (new_crtc_state->enable) 1444 drm_calc_timestamping_constants(crtc, 1445 &new_crtc_state->adjusted_mode); 1446 } 1447 } 1448 EXPORT_SYMBOL(drm_atomic_helper_calc_timestamping_constants); 1449 1450 static void 1451 crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *state) 1452 { 1453 struct drm_crtc *crtc; 1454 struct drm_crtc_state *new_crtc_state; 1455 struct drm_connector *connector; 1456 struct drm_connector_state *new_conn_state; 1457 int i; 1458 1459 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 1460 const struct drm_crtc_helper_funcs *funcs; 1461 1462 if (!new_crtc_state->mode_changed) 1463 continue; 1464 1465 funcs = crtc->helper_private; 1466 1467 if (new_crtc_state->enable && funcs->mode_set_nofb) { 1468 drm_dbg_atomic(dev, "modeset on [CRTC:%d:%s]\n", 1469 crtc->base.id, crtc->name); 1470 1471 funcs->mode_set_nofb(crtc); 1472 } 1473 } 1474 1475 for_each_new_connector_in_state(state, connector, new_conn_state, i) { 1476 const struct drm_encoder_helper_funcs *funcs; 1477 struct drm_encoder *encoder; 1478 struct drm_display_mode *mode, *adjusted_mode; 1479 struct drm_bridge *bridge; 1480 1481 if (!new_conn_state->best_encoder) 1482 continue; 1483 1484 encoder = new_conn_state->best_encoder; 1485 funcs = encoder->helper_private; 1486 new_crtc_state = new_conn_state->crtc->state; 1487 mode = &new_crtc_state->mode; 1488 adjusted_mode = &new_crtc_state->adjusted_mode; 1489 1490 if (!new_crtc_state->mode_changed && !new_crtc_state->connectors_changed) 1491 continue; 1492 1493 drm_dbg_atomic(dev, "modeset on [ENCODER:%d:%s]\n", 1494 encoder->base.id, encoder->name); 1495 1496 /* 1497 * Each encoder has at most one connector (since we always steal 1498 * it away), so we won't call mode_set hooks twice. 1499 */ 1500 if (funcs && funcs->atomic_mode_set) { 1501 funcs->atomic_mode_set(encoder, new_crtc_state, 1502 new_conn_state); 1503 } else if (funcs && funcs->mode_set) { 1504 funcs->mode_set(encoder, mode, adjusted_mode); 1505 } 1506 1507 bridge = drm_bridge_chain_get_first_bridge(encoder); 1508 drm_bridge_chain_mode_set(bridge, mode, adjusted_mode); 1509 drm_bridge_put(bridge); 1510 } 1511 } 1512 1513 /** 1514 * drm_atomic_helper_commit_modeset_disables - modeset commit to disable outputs 1515 * @dev: DRM device 1516 * @state: atomic state object being committed 1517 * 1518 * This function shuts down all the outputs that need to be shut down and 1519 * prepares them (if required) with the new mode. 1520 * 1521 * For compatibility with legacy CRTC helpers this should be called before 1522 * drm_atomic_helper_commit_planes(), which is what the default commit function 1523 * does. But drivers with different needs can group the modeset commits together 1524 * and do the plane commits at the end. This is useful for drivers doing runtime 1525 * PM since planes updates then only happen when the CRTC is actually enabled. 1526 */ 1527 void drm_atomic_helper_commit_modeset_disables(struct drm_device *dev, 1528 struct drm_atomic_state *state) 1529 { 1530 disable_outputs(dev, state); 1531 1532 drm_atomic_helper_update_legacy_modeset_state(dev, state); 1533 drm_atomic_helper_calc_timestamping_constants(state); 1534 1535 crtc_set_mode(dev, state); 1536 } 1537 EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_disables); 1538 1539 static void drm_atomic_helper_commit_writebacks(struct drm_device *dev, 1540 struct drm_atomic_state *state) 1541 { 1542 struct drm_connector *connector; 1543 struct drm_connector_state *new_conn_state; 1544 int i; 1545 1546 for_each_new_connector_in_state(state, connector, new_conn_state, i) { 1547 const struct drm_connector_helper_funcs *funcs; 1548 1549 funcs = connector->helper_private; 1550 if (!funcs->atomic_commit) 1551 continue; 1552 1553 if (new_conn_state->writeback_job && new_conn_state->writeback_job->fb) { 1554 WARN_ON(connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK); 1555 funcs->atomic_commit(connector, state); 1556 } 1557 } 1558 } 1559 1560 static void 1561 encoder_bridge_pre_enable(struct drm_device *dev, struct drm_atomic_state *state) 1562 { 1563 struct drm_connector *connector; 1564 struct drm_connector_state *new_conn_state; 1565 int i; 1566 1567 for_each_new_connector_in_state(state, connector, new_conn_state, i) { 1568 struct drm_encoder *encoder; 1569 struct drm_bridge *bridge; 1570 1571 if (!new_conn_state->best_encoder) 1572 continue; 1573 1574 if (!new_conn_state->crtc->state->active || 1575 !drm_atomic_crtc_needs_modeset(new_conn_state->crtc->state)) 1576 continue; 1577 1578 encoder = new_conn_state->best_encoder; 1579 1580 drm_dbg_atomic(dev, "pre-enabling bridges [ENCODER:%d:%s]\n", 1581 encoder->base.id, encoder->name); 1582 1583 /* 1584 * Each encoder has at most one connector (since we always steal 1585 * it away), so we won't call enable hooks twice. 1586 */ 1587 bridge = drm_bridge_chain_get_first_bridge(encoder); 1588 drm_atomic_bridge_chain_pre_enable(bridge, state); 1589 drm_bridge_put(bridge); 1590 } 1591 } 1592 1593 static void 1594 crtc_enable(struct drm_device *dev, struct drm_atomic_state *state) 1595 { 1596 struct drm_crtc *crtc; 1597 struct drm_crtc_state *old_crtc_state; 1598 struct drm_crtc_state *new_crtc_state; 1599 int i; 1600 1601 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 1602 const struct drm_crtc_helper_funcs *funcs; 1603 1604 /* Need to filter out CRTCs where only planes change. */ 1605 if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) 1606 continue; 1607 1608 if (!new_crtc_state->active) 1609 continue; 1610 1611 funcs = crtc->helper_private; 1612 1613 if (new_crtc_state->enable) { 1614 drm_dbg_atomic(dev, "enabling [CRTC:%d:%s]\n", 1615 crtc->base.id, crtc->name); 1616 if (funcs->atomic_enable) 1617 funcs->atomic_enable(crtc, state); 1618 else if (funcs->commit) 1619 funcs->commit(crtc); 1620 } 1621 } 1622 } 1623 1624 static void 1625 encoder_bridge_enable(struct drm_device *dev, struct drm_atomic_state *state) 1626 { 1627 struct drm_connector *connector; 1628 struct drm_connector_state *new_conn_state; 1629 int i; 1630 1631 for_each_new_connector_in_state(state, connector, new_conn_state, i) { 1632 const struct drm_encoder_helper_funcs *funcs; 1633 struct drm_encoder *encoder; 1634 struct drm_bridge *bridge; 1635 1636 if (!new_conn_state->best_encoder) 1637 continue; 1638 1639 if (!new_conn_state->crtc->state->active || 1640 !drm_atomic_crtc_needs_modeset(new_conn_state->crtc->state)) 1641 continue; 1642 1643 encoder = new_conn_state->best_encoder; 1644 funcs = encoder->helper_private; 1645 1646 drm_dbg_atomic(dev, "enabling [ENCODER:%d:%s]\n", 1647 encoder->base.id, encoder->name); 1648 1649 /* 1650 * Each encoder has at most one connector (since we always steal 1651 * it away), so we won't call enable hooks twice. 1652 */ 1653 bridge = drm_bridge_chain_get_first_bridge(encoder); 1654 1655 if (funcs) { 1656 if (funcs->atomic_enable) 1657 funcs->atomic_enable(encoder, state); 1658 else if (funcs->enable) 1659 funcs->enable(encoder); 1660 else if (funcs->commit) 1661 funcs->commit(encoder); 1662 } 1663 1664 drm_atomic_bridge_chain_enable(bridge, state); 1665 drm_bridge_put(bridge); 1666 } 1667 } 1668 1669 /** 1670 * drm_atomic_helper_commit_modeset_enables - modeset commit to enable outputs 1671 * @dev: DRM device 1672 * @state: atomic state object being committed 1673 * 1674 * This function enables all the outputs with the new configuration which had to 1675 * be turned off for the update. 1676 * 1677 * For compatibility with legacy CRTC helpers this should be called after 1678 * drm_atomic_helper_commit_planes(), which is what the default commit function 1679 * does. But drivers with different needs can group the modeset commits together 1680 * and do the plane commits at the end. This is useful for drivers doing runtime 1681 * PM since planes updates then only happen when the CRTC is actually enabled. 1682 */ 1683 void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev, 1684 struct drm_atomic_state *state) 1685 { 1686 encoder_bridge_pre_enable(dev, state); 1687 1688 crtc_enable(dev, state); 1689 1690 encoder_bridge_enable(dev, state); 1691 1692 drm_atomic_helper_commit_writebacks(dev, state); 1693 } 1694 EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_enables); 1695 1696 /* 1697 * For atomic updates which touch just a single CRTC, calculate the time of the 1698 * next vblank, and inform all the fences of the deadline. 1699 */ 1700 static void set_fence_deadline(struct drm_device *dev, 1701 struct drm_atomic_state *state) 1702 { 1703 struct drm_crtc *crtc; 1704 struct drm_crtc_state *new_crtc_state; 1705 struct drm_plane *plane; 1706 struct drm_plane_state *new_plane_state; 1707 ktime_t vbltime = 0; 1708 int i; 1709 1710 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) { 1711 ktime_t v; 1712 1713 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) 1714 continue; 1715 1716 if (!new_crtc_state->active) 1717 continue; 1718 1719 if (drm_crtc_next_vblank_start(crtc, &v)) 1720 continue; 1721 1722 if (!vbltime || ktime_before(v, vbltime)) 1723 vbltime = v; 1724 } 1725 1726 /* If no CRTCs updated, then nothing to do: */ 1727 if (!vbltime) 1728 return; 1729 1730 for_each_new_plane_in_state (state, plane, new_plane_state, i) { 1731 if (!new_plane_state->fence) 1732 continue; 1733 dma_fence_set_deadline(new_plane_state->fence, vbltime); 1734 } 1735 } 1736 1737 /** 1738 * drm_atomic_helper_wait_for_fences - wait for fences stashed in plane state 1739 * @dev: DRM device 1740 * @state: atomic state object with old state structures 1741 * @pre_swap: If true, do an interruptible wait, and @state is the new state. 1742 * Otherwise @state is the old state. 1743 * 1744 * For implicit sync, driver should fish the exclusive fence out from the 1745 * incoming fb's and stash it in the drm_plane_state. This is called after 1746 * drm_atomic_helper_swap_state() so it uses the current plane state (and 1747 * just uses the atomic state to find the changed planes) 1748 * 1749 * Note that @pre_swap is needed since the point where we block for fences moves 1750 * around depending upon whether an atomic commit is blocking or 1751 * non-blocking. For non-blocking commit all waiting needs to happen after 1752 * drm_atomic_helper_swap_state() is called, but for blocking commits we want 1753 * to wait **before** we do anything that can't be easily rolled back. That is 1754 * before we call drm_atomic_helper_swap_state(). 1755 * 1756 * Returns zero if success or < 0 if dma_fence_wait() fails. 1757 */ 1758 int drm_atomic_helper_wait_for_fences(struct drm_device *dev, 1759 struct drm_atomic_state *state, 1760 bool pre_swap) 1761 { 1762 struct drm_plane *plane; 1763 struct drm_plane_state *new_plane_state; 1764 int i, ret; 1765 1766 set_fence_deadline(dev, state); 1767 1768 for_each_new_plane_in_state(state, plane, new_plane_state, i) { 1769 if (!new_plane_state->fence) 1770 continue; 1771 1772 WARN_ON(!new_plane_state->fb); 1773 1774 /* 1775 * If waiting for fences pre-swap (ie: nonblock), userspace can 1776 * still interrupt the operation. Instead of blocking until the 1777 * timer expires, make the wait interruptible. 1778 */ 1779 ret = dma_fence_wait(new_plane_state->fence, pre_swap); 1780 if (ret) 1781 return ret; 1782 1783 dma_fence_put(new_plane_state->fence); 1784 new_plane_state->fence = NULL; 1785 } 1786 1787 return 0; 1788 } 1789 EXPORT_SYMBOL(drm_atomic_helper_wait_for_fences); 1790 1791 /** 1792 * drm_atomic_helper_wait_for_vblanks - wait for vblank on CRTCs 1793 * @dev: DRM device 1794 * @state: atomic state object being committed 1795 * 1796 * Helper to, after atomic commit, wait for vblanks on all affected 1797 * CRTCs (ie. before cleaning up old framebuffers using 1798 * drm_atomic_helper_cleanup_planes()). It will only wait on CRTCs where the 1799 * framebuffers have actually changed to optimize for the legacy cursor and 1800 * plane update use-case. 1801 * 1802 * Drivers using the nonblocking commit tracking support initialized by calling 1803 * drm_atomic_helper_setup_commit() should look at 1804 * drm_atomic_helper_wait_for_flip_done() as an alternative. 1805 */ 1806 void 1807 drm_atomic_helper_wait_for_vblanks(struct drm_device *dev, 1808 struct drm_atomic_state *state) 1809 { 1810 struct drm_crtc *crtc; 1811 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 1812 int i, ret; 1813 unsigned int crtc_mask = 0; 1814 1815 /* 1816 * Legacy cursor ioctls are completely unsynced, and userspace 1817 * relies on that (by doing tons of cursor updates). 1818 */ 1819 if (state->legacy_cursor_update) 1820 return; 1821 1822 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 1823 if (!new_crtc_state->active) 1824 continue; 1825 1826 ret = drm_crtc_vblank_get(crtc); 1827 if (ret != 0) 1828 continue; 1829 1830 crtc_mask |= drm_crtc_mask(crtc); 1831 state->crtcs[i].last_vblank_count = drm_crtc_vblank_count(crtc); 1832 } 1833 1834 for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) { 1835 wait_queue_head_t *queue = drm_crtc_vblank_waitqueue(crtc); 1836 1837 if (!(crtc_mask & drm_crtc_mask(crtc))) 1838 continue; 1839 1840 ret = wait_event_timeout(*queue, 1841 state->crtcs[i].last_vblank_count != 1842 drm_crtc_vblank_count(crtc), 1843 msecs_to_jiffies(100)); 1844 1845 WARN(!ret, "[CRTC:%d:%s] vblank wait timed out\n", 1846 crtc->base.id, crtc->name); 1847 1848 drm_crtc_vblank_put(crtc); 1849 } 1850 } 1851 EXPORT_SYMBOL(drm_atomic_helper_wait_for_vblanks); 1852 1853 /** 1854 * drm_atomic_helper_wait_for_flip_done - wait for all page flips to be done 1855 * @dev: DRM device 1856 * @state: atomic state object being committed 1857 * 1858 * Helper to, after atomic commit, wait for page flips on all affected 1859 * crtcs (ie. before cleaning up old framebuffers using 1860 * drm_atomic_helper_cleanup_planes()). Compared to 1861 * drm_atomic_helper_wait_for_vblanks() this waits for the completion on all 1862 * CRTCs, assuming that cursors-only updates are signalling their completion 1863 * immediately (or using a different path). 1864 * 1865 * This requires that drivers use the nonblocking commit tracking support 1866 * initialized using drm_atomic_helper_setup_commit(). 1867 */ 1868 void drm_atomic_helper_wait_for_flip_done(struct drm_device *dev, 1869 struct drm_atomic_state *state) 1870 { 1871 struct drm_crtc *crtc; 1872 int i; 1873 1874 for (i = 0; i < dev->mode_config.num_crtc; i++) { 1875 struct drm_crtc_commit *commit = state->crtcs[i].commit; 1876 int ret; 1877 1878 crtc = state->crtcs[i].ptr; 1879 1880 if (!crtc || !commit) 1881 continue; 1882 1883 ret = wait_for_completion_timeout(&commit->flip_done, 10 * HZ); 1884 if (ret == 0) 1885 drm_err(dev, "[CRTC:%d:%s] flip_done timed out\n", 1886 crtc->base.id, crtc->name); 1887 } 1888 1889 if (state->fake_commit) 1890 complete_all(&state->fake_commit->flip_done); 1891 } 1892 EXPORT_SYMBOL(drm_atomic_helper_wait_for_flip_done); 1893 1894 /** 1895 * drm_atomic_helper_commit_tail - commit atomic update to hardware 1896 * @state: atomic state object being committed 1897 * 1898 * This is the default implementation for the 1899 * &drm_mode_config_helper_funcs.atomic_commit_tail hook, for drivers 1900 * that do not support runtime_pm or do not need the CRTC to be 1901 * enabled to perform a commit. Otherwise, see 1902 * drm_atomic_helper_commit_tail_rpm(). 1903 * 1904 * Note that the default ordering of how the various stages are called is to 1905 * match the legacy modeset helper library closest. 1906 */ 1907 void drm_atomic_helper_commit_tail(struct drm_atomic_state *state) 1908 { 1909 struct drm_device *dev = state->dev; 1910 1911 drm_atomic_helper_commit_modeset_disables(dev, state); 1912 1913 drm_atomic_helper_commit_planes(dev, state, 0); 1914 1915 drm_atomic_helper_commit_modeset_enables(dev, state); 1916 1917 drm_atomic_helper_fake_vblank(state); 1918 1919 drm_atomic_helper_commit_hw_done(state); 1920 1921 drm_atomic_helper_wait_for_vblanks(dev, state); 1922 1923 drm_atomic_helper_cleanup_planes(dev, state); 1924 } 1925 EXPORT_SYMBOL(drm_atomic_helper_commit_tail); 1926 1927 /** 1928 * drm_atomic_helper_commit_tail_rpm - commit atomic update to hardware 1929 * @state: new modeset state to be committed 1930 * 1931 * This is an alternative implementation for the 1932 * &drm_mode_config_helper_funcs.atomic_commit_tail hook, for drivers 1933 * that support runtime_pm or need the CRTC to be enabled to perform a 1934 * commit. Otherwise, one should use the default implementation 1935 * drm_atomic_helper_commit_tail(). 1936 */ 1937 void drm_atomic_helper_commit_tail_rpm(struct drm_atomic_state *state) 1938 { 1939 struct drm_device *dev = state->dev; 1940 1941 drm_atomic_helper_commit_modeset_disables(dev, state); 1942 1943 drm_atomic_helper_commit_modeset_enables(dev, state); 1944 1945 drm_atomic_helper_commit_planes(dev, state, 1946 DRM_PLANE_COMMIT_ACTIVE_ONLY); 1947 1948 drm_atomic_helper_fake_vblank(state); 1949 1950 drm_atomic_helper_commit_hw_done(state); 1951 1952 drm_atomic_helper_wait_for_vblanks(dev, state); 1953 1954 drm_atomic_helper_cleanup_planes(dev, state); 1955 } 1956 EXPORT_SYMBOL(drm_atomic_helper_commit_tail_rpm); 1957 1958 static void commit_tail(struct drm_atomic_state *state) 1959 { 1960 struct drm_device *dev = state->dev; 1961 const struct drm_mode_config_helper_funcs *funcs; 1962 struct drm_crtc_state *new_crtc_state; 1963 struct drm_crtc *crtc; 1964 ktime_t start; 1965 s64 commit_time_ms; 1966 unsigned int i, new_self_refresh_mask = 0; 1967 1968 funcs = dev->mode_config.helper_private; 1969 1970 /* 1971 * We're measuring the _entire_ commit, so the time will vary depending 1972 * on how many fences and objects are involved. For the purposes of self 1973 * refresh, this is desirable since it'll give us an idea of how 1974 * congested things are. This will inform our decision on how often we 1975 * should enter self refresh after idle. 1976 * 1977 * These times will be averaged out in the self refresh helpers to avoid 1978 * overreacting over one outlier frame 1979 */ 1980 start = ktime_get(); 1981 1982 drm_atomic_helper_wait_for_fences(dev, state, false); 1983 1984 drm_atomic_helper_wait_for_dependencies(state); 1985 1986 /* 1987 * We cannot safely access new_crtc_state after 1988 * drm_atomic_helper_commit_hw_done() so figure out which crtc's have 1989 * self-refresh active beforehand: 1990 */ 1991 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) 1992 if (new_crtc_state->self_refresh_active) 1993 new_self_refresh_mask |= BIT(i); 1994 1995 if (funcs && funcs->atomic_commit_tail) 1996 funcs->atomic_commit_tail(state); 1997 else 1998 drm_atomic_helper_commit_tail(state); 1999 2000 commit_time_ms = ktime_ms_delta(ktime_get(), start); 2001 if (commit_time_ms > 0) 2002 drm_self_refresh_helper_update_avg_times(state, 2003 (unsigned long)commit_time_ms, 2004 new_self_refresh_mask); 2005 2006 drm_atomic_helper_commit_cleanup_done(state); 2007 2008 drm_atomic_state_put(state); 2009 } 2010 2011 static void commit_work(struct work_struct *work) 2012 { 2013 struct drm_atomic_state *state = container_of(work, 2014 struct drm_atomic_state, 2015 commit_work); 2016 commit_tail(state); 2017 } 2018 2019 /** 2020 * drm_atomic_helper_async_check - check if state can be committed asynchronously 2021 * @dev: DRM device 2022 * @state: the driver state object 2023 * 2024 * This helper will check if it is possible to commit the state asynchronously. 2025 * Async commits are not supposed to swap the states like normal sync commits 2026 * but just do in-place changes on the current state. 2027 * 2028 * It will return 0 if the commit can happen in an asynchronous fashion or error 2029 * if not. Note that error just mean it can't be committed asynchronously, if it 2030 * fails the commit should be treated like a normal synchronous commit. 2031 */ 2032 int drm_atomic_helper_async_check(struct drm_device *dev, 2033 struct drm_atomic_state *state) 2034 { 2035 struct drm_crtc *crtc; 2036 struct drm_crtc_state *crtc_state; 2037 struct drm_plane *plane = NULL; 2038 struct drm_plane_state *old_plane_state = NULL; 2039 struct drm_plane_state *new_plane_state = NULL; 2040 const struct drm_plane_helper_funcs *funcs; 2041 int i, ret, n_planes = 0; 2042 2043 for_each_new_crtc_in_state(state, crtc, crtc_state, i) { 2044 if (drm_atomic_crtc_needs_modeset(crtc_state)) 2045 return -EINVAL; 2046 } 2047 2048 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) 2049 n_planes++; 2050 2051 /* FIXME: we support only single plane updates for now */ 2052 if (n_planes != 1) { 2053 drm_dbg_atomic(dev, 2054 "only single plane async updates are supported\n"); 2055 return -EINVAL; 2056 } 2057 2058 if (!new_plane_state->crtc || 2059 old_plane_state->crtc != new_plane_state->crtc) { 2060 drm_dbg_atomic(dev, 2061 "[PLANE:%d:%s] async update cannot change CRTC\n", 2062 plane->base.id, plane->name); 2063 return -EINVAL; 2064 } 2065 2066 funcs = plane->helper_private; 2067 if (!funcs->atomic_async_update) { 2068 drm_dbg_atomic(dev, 2069 "[PLANE:%d:%s] driver does not support async updates\n", 2070 plane->base.id, plane->name); 2071 return -EINVAL; 2072 } 2073 2074 if (new_plane_state->fence) { 2075 drm_dbg_atomic(dev, 2076 "[PLANE:%d:%s] missing fence for async update\n", 2077 plane->base.id, plane->name); 2078 return -EINVAL; 2079 } 2080 2081 /* 2082 * Don't do an async update if there is an outstanding commit modifying 2083 * the plane. This prevents our async update's changes from getting 2084 * overridden by a previous synchronous update's state. 2085 */ 2086 if (old_plane_state->commit && 2087 !try_wait_for_completion(&old_plane_state->commit->hw_done)) { 2088 drm_dbg_atomic(dev, 2089 "[PLANE:%d:%s] inflight previous commit preventing async commit\n", 2090 plane->base.id, plane->name); 2091 return -EBUSY; 2092 } 2093 2094 ret = funcs->atomic_async_check(plane, state, false); 2095 if (ret != 0) 2096 drm_dbg_atomic(dev, 2097 "[PLANE:%d:%s] driver async check failed\n", 2098 plane->base.id, plane->name); 2099 return ret; 2100 } 2101 EXPORT_SYMBOL(drm_atomic_helper_async_check); 2102 2103 /** 2104 * drm_atomic_helper_async_commit - commit state asynchronously 2105 * @dev: DRM device 2106 * @state: the driver state object 2107 * 2108 * This function commits a state asynchronously, i.e., not vblank 2109 * synchronized. It should be used on a state only when 2110 * drm_atomic_async_check() succeeds. Async commits are not supposed to swap 2111 * the states like normal sync commits, but just do in-place changes on the 2112 * current state. 2113 * 2114 * TODO: Implement full swap instead of doing in-place changes. 2115 */ 2116 void drm_atomic_helper_async_commit(struct drm_device *dev, 2117 struct drm_atomic_state *state) 2118 { 2119 struct drm_plane *plane; 2120 struct drm_plane_state *plane_state; 2121 const struct drm_plane_helper_funcs *funcs; 2122 int i; 2123 2124 for_each_new_plane_in_state(state, plane, plane_state, i) { 2125 struct drm_framebuffer *new_fb = plane_state->fb; 2126 struct drm_framebuffer *old_fb = plane->state->fb; 2127 2128 funcs = plane->helper_private; 2129 funcs->atomic_async_update(plane, state); 2130 2131 /* 2132 * ->atomic_async_update() is supposed to update the 2133 * plane->state in-place, make sure at least common 2134 * properties have been properly updated. 2135 */ 2136 WARN_ON_ONCE(plane->state->fb != new_fb); 2137 WARN_ON_ONCE(plane->state->crtc_x != plane_state->crtc_x); 2138 WARN_ON_ONCE(plane->state->crtc_y != plane_state->crtc_y); 2139 WARN_ON_ONCE(plane->state->src_x != plane_state->src_x); 2140 WARN_ON_ONCE(plane->state->src_y != plane_state->src_y); 2141 2142 /* 2143 * Make sure the FBs have been swapped so that cleanups in the 2144 * new_state performs a cleanup in the old FB. 2145 */ 2146 WARN_ON_ONCE(plane_state->fb != old_fb); 2147 } 2148 } 2149 EXPORT_SYMBOL(drm_atomic_helper_async_commit); 2150 2151 /** 2152 * drm_atomic_helper_commit - commit validated state object 2153 * @dev: DRM device 2154 * @state: the driver state object 2155 * @nonblock: whether nonblocking behavior is requested. 2156 * 2157 * This function commits a with drm_atomic_helper_check() pre-validated state 2158 * object. This can still fail when e.g. the framebuffer reservation fails. This 2159 * function implements nonblocking commits, using 2160 * drm_atomic_helper_setup_commit() and related functions. 2161 * 2162 * Committing the actual hardware state is done through the 2163 * &drm_mode_config_helper_funcs.atomic_commit_tail callback, or its default 2164 * implementation drm_atomic_helper_commit_tail(). 2165 * 2166 * RETURNS: 2167 * Zero for success or -errno. 2168 */ 2169 int drm_atomic_helper_commit(struct drm_device *dev, 2170 struct drm_atomic_state *state, 2171 bool nonblock) 2172 { 2173 int ret; 2174 2175 if (state->async_update) { 2176 ret = drm_atomic_helper_prepare_planes(dev, state); 2177 if (ret) 2178 return ret; 2179 2180 drm_atomic_helper_async_commit(dev, state); 2181 drm_atomic_helper_unprepare_planes(dev, state); 2182 2183 return 0; 2184 } 2185 2186 ret = drm_atomic_helper_setup_commit(state, nonblock); 2187 if (ret) 2188 return ret; 2189 2190 INIT_WORK(&state->commit_work, commit_work); 2191 2192 ret = drm_atomic_helper_prepare_planes(dev, state); 2193 if (ret) 2194 return ret; 2195 2196 if (!nonblock) { 2197 ret = drm_atomic_helper_wait_for_fences(dev, state, true); 2198 if (ret) 2199 goto err; 2200 } 2201 2202 /* 2203 * This is the point of no return - everything below never fails except 2204 * when the hw goes bonghits. Which means we can commit the new state on 2205 * the software side now. 2206 */ 2207 2208 ret = drm_atomic_helper_swap_state(state, true); 2209 if (ret) 2210 goto err; 2211 2212 /* 2213 * Everything below can be run asynchronously without the need to grab 2214 * any modeset locks at all under one condition: It must be guaranteed 2215 * that the asynchronous work has either been cancelled (if the driver 2216 * supports it, which at least requires that the framebuffers get 2217 * cleaned up with drm_atomic_helper_cleanup_planes()) or completed 2218 * before the new state gets committed on the software side with 2219 * drm_atomic_helper_swap_state(). 2220 * 2221 * This scheme allows new atomic state updates to be prepared and 2222 * checked in parallel to the asynchronous completion of the previous 2223 * update. Which is important since compositors need to figure out the 2224 * composition of the next frame right after having submitted the 2225 * current layout. 2226 * 2227 * NOTE: Commit work has multiple phases, first hardware commit, then 2228 * cleanup. We want them to overlap, hence need system_unbound_wq to 2229 * make sure work items don't artificially stall on each another. 2230 */ 2231 2232 drm_atomic_state_get(state); 2233 if (nonblock) 2234 queue_work(system_unbound_wq, &state->commit_work); 2235 else 2236 commit_tail(state); 2237 2238 return 0; 2239 2240 err: 2241 drm_atomic_helper_unprepare_planes(dev, state); 2242 return ret; 2243 } 2244 EXPORT_SYMBOL(drm_atomic_helper_commit); 2245 2246 /** 2247 * DOC: implementing nonblocking commit 2248 * 2249 * Nonblocking atomic commits should use struct &drm_crtc_commit to sequence 2250 * different operations against each another. Locks, especially struct 2251 * &drm_modeset_lock, should not be held in worker threads or any other 2252 * asynchronous context used to commit the hardware state. 2253 * 2254 * drm_atomic_helper_commit() implements the recommended sequence for 2255 * nonblocking commits, using drm_atomic_helper_setup_commit() internally: 2256 * 2257 * 1. Run drm_atomic_helper_prepare_planes(). Since this can fail and we 2258 * need to propagate out of memory/VRAM errors to userspace, it must be called 2259 * synchronously. 2260 * 2261 * 2. Synchronize with any outstanding nonblocking commit worker threads which 2262 * might be affected by the new state update. This is handled by 2263 * drm_atomic_helper_setup_commit(). 2264 * 2265 * Asynchronous workers need to have sufficient parallelism to be able to run 2266 * different atomic commits on different CRTCs in parallel. The simplest way to 2267 * achieve this is by running them on the &system_unbound_wq work queue. Note 2268 * that drivers are not required to split up atomic commits and run an 2269 * individual commit in parallel - userspace is supposed to do that if it cares. 2270 * But it might be beneficial to do that for modesets, since those necessarily 2271 * must be done as one global operation, and enabling or disabling a CRTC can 2272 * take a long time. But even that is not required. 2273 * 2274 * IMPORTANT: A &drm_atomic_state update for multiple CRTCs is sequenced 2275 * against all CRTCs therein. Therefore for atomic state updates which only flip 2276 * planes the driver must not get the struct &drm_crtc_state of unrelated CRTCs 2277 * in its atomic check code: This would prevent committing of atomic updates to 2278 * multiple CRTCs in parallel. In general, adding additional state structures 2279 * should be avoided as much as possible, because this reduces parallelism in 2280 * (nonblocking) commits, both due to locking and due to commit sequencing 2281 * requirements. 2282 * 2283 * 3. The software state is updated synchronously with 2284 * drm_atomic_helper_swap_state(). Doing this under the protection of all modeset 2285 * locks means concurrent callers never see inconsistent state. Note that commit 2286 * workers do not hold any locks; their access is only coordinated through 2287 * ordering. If workers would access state only through the pointers in the 2288 * free-standing state objects (currently not the case for any driver) then even 2289 * multiple pending commits could be in-flight at the same time. 2290 * 2291 * 4. Schedule a work item to do all subsequent steps, using the split-out 2292 * commit helpers: a) pre-plane commit b) plane commit c) post-plane commit and 2293 * then cleaning up the framebuffers after the old framebuffer is no longer 2294 * being displayed. The scheduled work should synchronize against other workers 2295 * using the &drm_crtc_commit infrastructure as needed. See 2296 * drm_atomic_helper_setup_commit() for more details. 2297 */ 2298 2299 static int stall_checks(struct drm_crtc *crtc, bool nonblock) 2300 { 2301 struct drm_crtc_commit *commit, *stall_commit = NULL; 2302 bool completed = true; 2303 int i; 2304 long ret = 0; 2305 2306 spin_lock(&crtc->commit_lock); 2307 i = 0; 2308 list_for_each_entry(commit, &crtc->commit_list, commit_entry) { 2309 if (i == 0) { 2310 completed = try_wait_for_completion(&commit->flip_done); 2311 /* 2312 * Userspace is not allowed to get ahead of the previous 2313 * commit with nonblocking ones. 2314 */ 2315 if (!completed && nonblock) { 2316 spin_unlock(&crtc->commit_lock); 2317 drm_dbg_atomic(crtc->dev, 2318 "[CRTC:%d:%s] busy with a previous commit\n", 2319 crtc->base.id, crtc->name); 2320 2321 return -EBUSY; 2322 } 2323 } else if (i == 1) { 2324 stall_commit = drm_crtc_commit_get(commit); 2325 break; 2326 } 2327 2328 i++; 2329 } 2330 spin_unlock(&crtc->commit_lock); 2331 2332 if (!stall_commit) 2333 return 0; 2334 2335 /* We don't want to let commits get ahead of cleanup work too much, 2336 * stalling on 2nd previous commit means triple-buffer won't ever stall. 2337 */ 2338 ret = wait_for_completion_interruptible_timeout(&stall_commit->cleanup_done, 2339 10*HZ); 2340 if (ret == 0) 2341 drm_err(crtc->dev, "[CRTC:%d:%s] cleanup_done timed out\n", 2342 crtc->base.id, crtc->name); 2343 2344 drm_crtc_commit_put(stall_commit); 2345 2346 return ret < 0 ? ret : 0; 2347 } 2348 2349 static void release_crtc_commit(struct completion *completion) 2350 { 2351 struct drm_crtc_commit *commit = container_of(completion, 2352 typeof(*commit), 2353 flip_done); 2354 2355 drm_crtc_commit_put(commit); 2356 } 2357 2358 static void init_commit(struct drm_crtc_commit *commit, struct drm_crtc *crtc) 2359 { 2360 init_completion(&commit->flip_done); 2361 init_completion(&commit->hw_done); 2362 init_completion(&commit->cleanup_done); 2363 INIT_LIST_HEAD(&commit->commit_entry); 2364 kref_init(&commit->ref); 2365 commit->crtc = crtc; 2366 } 2367 2368 static struct drm_crtc_commit * 2369 crtc_or_fake_commit(struct drm_atomic_state *state, struct drm_crtc *crtc) 2370 { 2371 if (crtc) { 2372 struct drm_crtc_state *new_crtc_state; 2373 2374 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc); 2375 2376 return new_crtc_state->commit; 2377 } 2378 2379 if (!state->fake_commit) { 2380 state->fake_commit = kzalloc(sizeof(*state->fake_commit), GFP_KERNEL); 2381 if (!state->fake_commit) 2382 return NULL; 2383 2384 init_commit(state->fake_commit, NULL); 2385 } 2386 2387 return state->fake_commit; 2388 } 2389 2390 /** 2391 * drm_atomic_helper_setup_commit - setup possibly nonblocking commit 2392 * @state: new modeset state to be committed 2393 * @nonblock: whether nonblocking behavior is requested. 2394 * 2395 * This function prepares @state to be used by the atomic helper's support for 2396 * nonblocking commits. Drivers using the nonblocking commit infrastructure 2397 * should always call this function from their 2398 * &drm_mode_config_funcs.atomic_commit hook. 2399 * 2400 * Drivers that need to extend the commit setup to private objects can use the 2401 * &drm_mode_config_helper_funcs.atomic_commit_setup hook. 2402 * 2403 * To be able to use this support drivers need to use a few more helper 2404 * functions. drm_atomic_helper_wait_for_dependencies() must be called before 2405 * actually committing the hardware state, and for nonblocking commits this call 2406 * must be placed in the async worker. See also drm_atomic_helper_swap_state() 2407 * and its stall parameter, for when a driver's commit hooks look at the 2408 * &drm_crtc.state, &drm_plane.state or &drm_connector.state pointer directly. 2409 * 2410 * Completion of the hardware commit step must be signalled using 2411 * drm_atomic_helper_commit_hw_done(). After this step the driver is not allowed 2412 * to read or change any permanent software or hardware modeset state. The only 2413 * exception is state protected by other means than &drm_modeset_lock locks. 2414 * Only the free standing @state with pointers to the old state structures can 2415 * be inspected, e.g. to clean up old buffers using 2416 * drm_atomic_helper_cleanup_planes(). 2417 * 2418 * At the very end, before cleaning up @state drivers must call 2419 * drm_atomic_helper_commit_cleanup_done(). 2420 * 2421 * This is all implemented by in drm_atomic_helper_commit(), giving drivers a 2422 * complete and easy-to-use default implementation of the atomic_commit() hook. 2423 * 2424 * The tracking of asynchronously executed and still pending commits is done 2425 * using the core structure &drm_crtc_commit. 2426 * 2427 * By default there's no need to clean up resources allocated by this function 2428 * explicitly: drm_atomic_state_default_clear() will take care of that 2429 * automatically. 2430 * 2431 * Returns: 2432 * 0 on success. -EBUSY when userspace schedules nonblocking commits too fast, 2433 * -ENOMEM on allocation failures and -EINTR when a signal is pending. 2434 */ 2435 int drm_atomic_helper_setup_commit(struct drm_atomic_state *state, 2436 bool nonblock) 2437 { 2438 struct drm_crtc *crtc; 2439 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 2440 struct drm_connector *conn; 2441 struct drm_connector_state *old_conn_state, *new_conn_state; 2442 struct drm_plane *plane; 2443 struct drm_plane_state *old_plane_state, *new_plane_state; 2444 struct drm_crtc_commit *commit; 2445 const struct drm_mode_config_helper_funcs *funcs; 2446 int i, ret; 2447 2448 funcs = state->dev->mode_config.helper_private; 2449 2450 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 2451 commit = kzalloc(sizeof(*commit), GFP_KERNEL); 2452 if (!commit) 2453 return -ENOMEM; 2454 2455 init_commit(commit, crtc); 2456 2457 new_crtc_state->commit = commit; 2458 2459 ret = stall_checks(crtc, nonblock); 2460 if (ret) 2461 return ret; 2462 2463 /* 2464 * Drivers only send out events when at least either current or 2465 * new CRTC state is active. Complete right away if everything 2466 * stays off. 2467 */ 2468 if (!old_crtc_state->active && !new_crtc_state->active) { 2469 complete_all(&commit->flip_done); 2470 continue; 2471 } 2472 2473 /* Legacy cursor updates are fully unsynced. */ 2474 if (state->legacy_cursor_update) { 2475 complete_all(&commit->flip_done); 2476 continue; 2477 } 2478 2479 if (!new_crtc_state->event) { 2480 commit->event = kzalloc(sizeof(*commit->event), 2481 GFP_KERNEL); 2482 if (!commit->event) 2483 return -ENOMEM; 2484 2485 new_crtc_state->event = commit->event; 2486 } 2487 2488 new_crtc_state->event->base.completion = &commit->flip_done; 2489 new_crtc_state->event->base.completion_release = release_crtc_commit; 2490 drm_crtc_commit_get(commit); 2491 2492 commit->abort_completion = true; 2493 2494 state->crtcs[i].commit = commit; 2495 drm_crtc_commit_get(commit); 2496 } 2497 2498 for_each_oldnew_connector_in_state(state, conn, old_conn_state, new_conn_state, i) { 2499 /* 2500 * Userspace is not allowed to get ahead of the previous 2501 * commit with nonblocking ones. 2502 */ 2503 if (nonblock && old_conn_state->commit && 2504 !try_wait_for_completion(&old_conn_state->commit->flip_done)) { 2505 drm_dbg_atomic(conn->dev, 2506 "[CONNECTOR:%d:%s] busy with a previous commit\n", 2507 conn->base.id, conn->name); 2508 2509 return -EBUSY; 2510 } 2511 2512 /* Always track connectors explicitly for e.g. link retraining. */ 2513 commit = crtc_or_fake_commit(state, new_conn_state->crtc ?: old_conn_state->crtc); 2514 if (!commit) 2515 return -ENOMEM; 2516 2517 new_conn_state->commit = drm_crtc_commit_get(commit); 2518 } 2519 2520 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { 2521 /* 2522 * Userspace is not allowed to get ahead of the previous 2523 * commit with nonblocking ones. 2524 */ 2525 if (nonblock && old_plane_state->commit && 2526 !try_wait_for_completion(&old_plane_state->commit->flip_done)) { 2527 drm_dbg_atomic(plane->dev, 2528 "[PLANE:%d:%s] busy with a previous commit\n", 2529 plane->base.id, plane->name); 2530 2531 return -EBUSY; 2532 } 2533 2534 /* Always track planes explicitly for async pageflip support. */ 2535 commit = crtc_or_fake_commit(state, new_plane_state->crtc ?: old_plane_state->crtc); 2536 if (!commit) 2537 return -ENOMEM; 2538 2539 new_plane_state->commit = drm_crtc_commit_get(commit); 2540 } 2541 2542 if (funcs && funcs->atomic_commit_setup) 2543 return funcs->atomic_commit_setup(state); 2544 2545 return 0; 2546 } 2547 EXPORT_SYMBOL(drm_atomic_helper_setup_commit); 2548 2549 /** 2550 * drm_atomic_helper_wait_for_dependencies - wait for required preceding commits 2551 * @state: atomic state object being committed 2552 * 2553 * This function waits for all preceding commits that touch the same CRTC as 2554 * @state to both be committed to the hardware (as signalled by 2555 * drm_atomic_helper_commit_hw_done()) and executed by the hardware (as signalled 2556 * by calling drm_crtc_send_vblank_event() on the &drm_crtc_state.event). 2557 * 2558 * This is part of the atomic helper support for nonblocking commits, see 2559 * drm_atomic_helper_setup_commit() for an overview. 2560 */ 2561 void drm_atomic_helper_wait_for_dependencies(struct drm_atomic_state *state) 2562 { 2563 struct drm_crtc *crtc; 2564 struct drm_crtc_state *old_crtc_state; 2565 struct drm_plane *plane; 2566 struct drm_plane_state *old_plane_state; 2567 struct drm_connector *conn; 2568 struct drm_connector_state *old_conn_state; 2569 int i; 2570 long ret; 2571 2572 for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) { 2573 ret = drm_crtc_commit_wait(old_crtc_state->commit); 2574 if (ret) 2575 drm_err(crtc->dev, 2576 "[CRTC:%d:%s] commit wait timed out\n", 2577 crtc->base.id, crtc->name); 2578 } 2579 2580 for_each_old_connector_in_state(state, conn, old_conn_state, i) { 2581 ret = drm_crtc_commit_wait(old_conn_state->commit); 2582 if (ret) 2583 drm_err(conn->dev, 2584 "[CONNECTOR:%d:%s] commit wait timed out\n", 2585 conn->base.id, conn->name); 2586 } 2587 2588 for_each_old_plane_in_state(state, plane, old_plane_state, i) { 2589 ret = drm_crtc_commit_wait(old_plane_state->commit); 2590 if (ret) 2591 drm_err(plane->dev, 2592 "[PLANE:%d:%s] commit wait timed out\n", 2593 plane->base.id, plane->name); 2594 } 2595 } 2596 EXPORT_SYMBOL(drm_atomic_helper_wait_for_dependencies); 2597 2598 /** 2599 * drm_atomic_helper_fake_vblank - fake VBLANK events if needed 2600 * @state: atomic state object being committed 2601 * 2602 * This function walks all CRTCs and fakes VBLANK events on those with 2603 * &drm_crtc_state.no_vblank set to true and &drm_crtc_state.event != NULL. 2604 * The primary use of this function is writeback connectors working in oneshot 2605 * mode and faking VBLANK events. In this case they only fake the VBLANK event 2606 * when a job is queued, and any change to the pipeline that does not touch the 2607 * connector is leading to timeouts when calling 2608 * drm_atomic_helper_wait_for_vblanks() or 2609 * drm_atomic_helper_wait_for_flip_done(). In addition to writeback 2610 * connectors, this function can also fake VBLANK events for CRTCs without 2611 * VBLANK interrupt. 2612 * 2613 * This is part of the atomic helper support for nonblocking commits, see 2614 * drm_atomic_helper_setup_commit() for an overview. 2615 */ 2616 void drm_atomic_helper_fake_vblank(struct drm_atomic_state *state) 2617 { 2618 struct drm_crtc_state *new_crtc_state; 2619 struct drm_crtc *crtc; 2620 int i; 2621 2622 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 2623 unsigned long flags; 2624 2625 if (!new_crtc_state->no_vblank) 2626 continue; 2627 2628 spin_lock_irqsave(&state->dev->event_lock, flags); 2629 if (new_crtc_state->event) { 2630 drm_crtc_send_vblank_event(crtc, 2631 new_crtc_state->event); 2632 new_crtc_state->event = NULL; 2633 } 2634 spin_unlock_irqrestore(&state->dev->event_lock, flags); 2635 } 2636 } 2637 EXPORT_SYMBOL(drm_atomic_helper_fake_vblank); 2638 2639 /** 2640 * drm_atomic_helper_commit_hw_done - setup possible nonblocking commit 2641 * @state: atomic state object being committed 2642 * 2643 * This function is used to signal completion of the hardware commit step. After 2644 * this step the driver is not allowed to read or change any permanent software 2645 * or hardware modeset state. The only exception is state protected by other 2646 * means than &drm_modeset_lock locks. 2647 * 2648 * Drivers should try to postpone any expensive or delayed cleanup work after 2649 * this function is called. 2650 * 2651 * This is part of the atomic helper support for nonblocking commits, see 2652 * drm_atomic_helper_setup_commit() for an overview. 2653 */ 2654 void drm_atomic_helper_commit_hw_done(struct drm_atomic_state *state) 2655 { 2656 struct drm_crtc *crtc; 2657 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 2658 struct drm_crtc_commit *commit; 2659 int i; 2660 2661 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 2662 commit = new_crtc_state->commit; 2663 if (!commit) 2664 continue; 2665 2666 /* 2667 * copy new_crtc_state->commit to old_crtc_state->commit, 2668 * it's unsafe to touch new_crtc_state after hw_done, 2669 * but we still need to do so in cleanup_done(). 2670 */ 2671 if (old_crtc_state->commit) 2672 drm_crtc_commit_put(old_crtc_state->commit); 2673 2674 old_crtc_state->commit = drm_crtc_commit_get(commit); 2675 2676 /* backend must have consumed any event by now */ 2677 WARN_ON(new_crtc_state->event); 2678 complete_all(&commit->hw_done); 2679 } 2680 2681 if (state->fake_commit) { 2682 complete_all(&state->fake_commit->hw_done); 2683 complete_all(&state->fake_commit->flip_done); 2684 } 2685 } 2686 EXPORT_SYMBOL(drm_atomic_helper_commit_hw_done); 2687 2688 /** 2689 * drm_atomic_helper_commit_cleanup_done - signal completion of commit 2690 * @state: atomic state object being committed 2691 * 2692 * This signals completion of the atomic update @state, including any 2693 * cleanup work. If used, it must be called right before calling 2694 * drm_atomic_state_put(). 2695 * 2696 * This is part of the atomic helper support for nonblocking commits, see 2697 * drm_atomic_helper_setup_commit() for an overview. 2698 */ 2699 void drm_atomic_helper_commit_cleanup_done(struct drm_atomic_state *state) 2700 { 2701 struct drm_crtc *crtc; 2702 struct drm_crtc_state *old_crtc_state; 2703 struct drm_crtc_commit *commit; 2704 int i; 2705 2706 for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) { 2707 commit = old_crtc_state->commit; 2708 if (WARN_ON(!commit)) 2709 continue; 2710 2711 complete_all(&commit->cleanup_done); 2712 WARN_ON(!try_wait_for_completion(&commit->hw_done)); 2713 2714 spin_lock(&crtc->commit_lock); 2715 list_del(&commit->commit_entry); 2716 spin_unlock(&crtc->commit_lock); 2717 } 2718 2719 if (state->fake_commit) { 2720 complete_all(&state->fake_commit->cleanup_done); 2721 WARN_ON(!try_wait_for_completion(&state->fake_commit->hw_done)); 2722 } 2723 } 2724 EXPORT_SYMBOL(drm_atomic_helper_commit_cleanup_done); 2725 2726 /** 2727 * drm_atomic_helper_prepare_planes - prepare plane resources before commit 2728 * @dev: DRM device 2729 * @state: atomic state object with new state structures 2730 * 2731 * This function prepares plane state, specifically framebuffers, for the new 2732 * configuration, by calling &drm_plane_helper_funcs.prepare_fb. If any failure 2733 * is encountered this function will call &drm_plane_helper_funcs.cleanup_fb on 2734 * any already successfully prepared framebuffer. 2735 * 2736 * Returns: 2737 * 0 on success, negative error code on failure. 2738 */ 2739 int drm_atomic_helper_prepare_planes(struct drm_device *dev, 2740 struct drm_atomic_state *state) 2741 { 2742 struct drm_connector *connector; 2743 struct drm_connector_state *new_conn_state; 2744 struct drm_plane *plane; 2745 struct drm_plane_state *new_plane_state; 2746 int ret, i, j; 2747 2748 for_each_new_connector_in_state(state, connector, new_conn_state, i) { 2749 if (!new_conn_state->writeback_job) 2750 continue; 2751 2752 ret = drm_writeback_prepare_job(new_conn_state->writeback_job); 2753 if (ret < 0) 2754 return ret; 2755 } 2756 2757 for_each_new_plane_in_state(state, plane, new_plane_state, i) { 2758 const struct drm_plane_helper_funcs *funcs; 2759 2760 funcs = plane->helper_private; 2761 2762 if (funcs->prepare_fb) { 2763 ret = funcs->prepare_fb(plane, new_plane_state); 2764 if (ret) 2765 goto fail_prepare_fb; 2766 } else { 2767 WARN_ON_ONCE(funcs->cleanup_fb); 2768 2769 if (!drm_core_check_feature(dev, DRIVER_GEM)) 2770 continue; 2771 2772 ret = drm_gem_plane_helper_prepare_fb(plane, new_plane_state); 2773 if (ret) 2774 goto fail_prepare_fb; 2775 } 2776 } 2777 2778 for_each_new_plane_in_state(state, plane, new_plane_state, i) { 2779 const struct drm_plane_helper_funcs *funcs = plane->helper_private; 2780 2781 if (funcs->begin_fb_access) { 2782 ret = funcs->begin_fb_access(plane, new_plane_state); 2783 if (ret) 2784 goto fail_begin_fb_access; 2785 } 2786 } 2787 2788 return 0; 2789 2790 fail_begin_fb_access: 2791 for_each_new_plane_in_state(state, plane, new_plane_state, j) { 2792 const struct drm_plane_helper_funcs *funcs = plane->helper_private; 2793 2794 if (j >= i) 2795 continue; 2796 2797 if (funcs->end_fb_access) 2798 funcs->end_fb_access(plane, new_plane_state); 2799 } 2800 i = j; /* set i to upper limit to cleanup all planes */ 2801 fail_prepare_fb: 2802 for_each_new_plane_in_state(state, plane, new_plane_state, j) { 2803 const struct drm_plane_helper_funcs *funcs; 2804 2805 if (j >= i) 2806 continue; 2807 2808 funcs = plane->helper_private; 2809 2810 if (funcs->cleanup_fb) 2811 funcs->cleanup_fb(plane, new_plane_state); 2812 } 2813 2814 return ret; 2815 } 2816 EXPORT_SYMBOL(drm_atomic_helper_prepare_planes); 2817 2818 /** 2819 * drm_atomic_helper_unprepare_planes - release plane resources on aborts 2820 * @dev: DRM device 2821 * @state: atomic state object with old state structures 2822 * 2823 * This function cleans up plane state, specifically framebuffers, from the 2824 * atomic state. It undoes the effects of drm_atomic_helper_prepare_planes() 2825 * when aborting an atomic commit. For cleaning up after a successful commit 2826 * use drm_atomic_helper_cleanup_planes(). 2827 */ 2828 void drm_atomic_helper_unprepare_planes(struct drm_device *dev, 2829 struct drm_atomic_state *state) 2830 { 2831 struct drm_plane *plane; 2832 struct drm_plane_state *new_plane_state; 2833 int i; 2834 2835 for_each_new_plane_in_state(state, plane, new_plane_state, i) { 2836 const struct drm_plane_helper_funcs *funcs = plane->helper_private; 2837 2838 if (funcs->end_fb_access) 2839 funcs->end_fb_access(plane, new_plane_state); 2840 } 2841 2842 for_each_new_plane_in_state(state, plane, new_plane_state, i) { 2843 const struct drm_plane_helper_funcs *funcs = plane->helper_private; 2844 2845 if (funcs->cleanup_fb) 2846 funcs->cleanup_fb(plane, new_plane_state); 2847 } 2848 } 2849 EXPORT_SYMBOL(drm_atomic_helper_unprepare_planes); 2850 2851 static bool plane_crtc_active(const struct drm_plane_state *state) 2852 { 2853 return state->crtc && state->crtc->state->active; 2854 } 2855 2856 /** 2857 * drm_atomic_helper_commit_planes - commit plane state 2858 * @dev: DRM device 2859 * @state: atomic state object being committed 2860 * @flags: flags for committing plane state 2861 * 2862 * This function commits the new plane state using the plane and atomic helper 2863 * functions for planes and CRTCs. It assumes that the atomic state has already 2864 * been pushed into the relevant object state pointers, since this step can no 2865 * longer fail. 2866 * 2867 * It still requires the global state object @state to know which planes and 2868 * crtcs need to be updated though. 2869 * 2870 * Note that this function does all plane updates across all CRTCs in one step. 2871 * If the hardware can't support this approach look at 2872 * drm_atomic_helper_commit_planes_on_crtc() instead. 2873 * 2874 * Plane parameters can be updated by applications while the associated CRTC is 2875 * disabled. The DRM/KMS core will store the parameters in the plane state, 2876 * which will be available to the driver when the CRTC is turned on. As a result 2877 * most drivers don't need to be immediately notified of plane updates for a 2878 * disabled CRTC. 2879 * 2880 * Unless otherwise needed, drivers are advised to set the ACTIVE_ONLY flag in 2881 * @flags in order not to receive plane update notifications related to a 2882 * disabled CRTC. This avoids the need to manually ignore plane updates in 2883 * driver code when the driver and/or hardware can't or just don't need to deal 2884 * with updates on disabled CRTCs, for example when supporting runtime PM. 2885 * 2886 * Drivers may set the NO_DISABLE_AFTER_MODESET flag in @flags if the relevant 2887 * display controllers require to disable a CRTC's planes when the CRTC is 2888 * disabled. This function would skip the &drm_plane_helper_funcs.atomic_disable 2889 * call for a plane if the CRTC of the old plane state needs a modesetting 2890 * operation. Of course, the drivers need to disable the planes in their CRTC 2891 * disable callbacks since no one else would do that. 2892 * 2893 * The drm_atomic_helper_commit() default implementation doesn't set the 2894 * ACTIVE_ONLY flag to most closely match the behaviour of the legacy helpers. 2895 * This should not be copied blindly by drivers. 2896 */ 2897 void drm_atomic_helper_commit_planes(struct drm_device *dev, 2898 struct drm_atomic_state *state, 2899 uint32_t flags) 2900 { 2901 struct drm_crtc *crtc; 2902 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 2903 struct drm_plane *plane; 2904 struct drm_plane_state *old_plane_state, *new_plane_state; 2905 int i; 2906 bool active_only = flags & DRM_PLANE_COMMIT_ACTIVE_ONLY; 2907 bool no_disable = flags & DRM_PLANE_COMMIT_NO_DISABLE_AFTER_MODESET; 2908 2909 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 2910 const struct drm_crtc_helper_funcs *funcs; 2911 2912 funcs = crtc->helper_private; 2913 2914 if (!funcs || !funcs->atomic_begin) 2915 continue; 2916 2917 if (active_only && !new_crtc_state->active) 2918 continue; 2919 2920 funcs->atomic_begin(crtc, state); 2921 } 2922 2923 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { 2924 const struct drm_plane_helper_funcs *funcs; 2925 bool disabling; 2926 2927 funcs = plane->helper_private; 2928 2929 if (!funcs) 2930 continue; 2931 2932 disabling = drm_atomic_plane_disabling(old_plane_state, 2933 new_plane_state); 2934 2935 if (active_only) { 2936 /* 2937 * Skip planes related to inactive CRTCs. If the plane 2938 * is enabled use the state of the current CRTC. If the 2939 * plane is being disabled use the state of the old 2940 * CRTC to avoid skipping planes being disabled on an 2941 * active CRTC. 2942 */ 2943 if (!disabling && !plane_crtc_active(new_plane_state)) 2944 continue; 2945 if (disabling && !plane_crtc_active(old_plane_state)) 2946 continue; 2947 } 2948 2949 /* 2950 * Special-case disabling the plane if drivers support it. 2951 */ 2952 if (disabling && funcs->atomic_disable) { 2953 struct drm_crtc_state *crtc_state; 2954 2955 crtc_state = old_plane_state->crtc->state; 2956 2957 if (drm_atomic_crtc_needs_modeset(crtc_state) && 2958 no_disable) 2959 continue; 2960 2961 funcs->atomic_disable(plane, state); 2962 } else if (new_plane_state->crtc || disabling) { 2963 funcs->atomic_update(plane, state); 2964 2965 if (!disabling && funcs->atomic_enable) { 2966 if (drm_atomic_plane_enabling(old_plane_state, new_plane_state)) 2967 funcs->atomic_enable(plane, state); 2968 } 2969 } 2970 } 2971 2972 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 2973 const struct drm_crtc_helper_funcs *funcs; 2974 2975 funcs = crtc->helper_private; 2976 2977 if (!funcs || !funcs->atomic_flush) 2978 continue; 2979 2980 if (active_only && !new_crtc_state->active) 2981 continue; 2982 2983 funcs->atomic_flush(crtc, state); 2984 } 2985 2986 /* 2987 * Signal end of framebuffer access here before hw_done. After hw_done, 2988 * a later commit might have already released the plane state. 2989 */ 2990 for_each_old_plane_in_state(state, plane, old_plane_state, i) { 2991 const struct drm_plane_helper_funcs *funcs = plane->helper_private; 2992 2993 if (funcs->end_fb_access) 2994 funcs->end_fb_access(plane, old_plane_state); 2995 } 2996 } 2997 EXPORT_SYMBOL(drm_atomic_helper_commit_planes); 2998 2999 /** 3000 * drm_atomic_helper_commit_planes_on_crtc - commit plane state for a CRTC 3001 * @old_crtc_state: atomic state object with the old CRTC state 3002 * 3003 * This function commits the new plane state using the plane and atomic helper 3004 * functions for planes on the specific CRTC. It assumes that the atomic state 3005 * has already been pushed into the relevant object state pointers, since this 3006 * step can no longer fail. 3007 * 3008 * This function is useful when plane updates should be done CRTC-by-CRTC 3009 * instead of one global step like drm_atomic_helper_commit_planes() does. 3010 * 3011 * This function can only be savely used when planes are not allowed to move 3012 * between different CRTCs because this function doesn't handle inter-CRTC 3013 * dependencies. Callers need to ensure that either no such dependencies exist, 3014 * resolve them through ordering of commit calls or through some other means. 3015 */ 3016 void 3017 drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state) 3018 { 3019 const struct drm_crtc_helper_funcs *crtc_funcs; 3020 struct drm_crtc *crtc = old_crtc_state->crtc; 3021 struct drm_atomic_state *old_state = old_crtc_state->state; 3022 struct drm_crtc_state *new_crtc_state = 3023 drm_atomic_get_new_crtc_state(old_state, crtc); 3024 struct drm_plane *plane; 3025 unsigned int plane_mask; 3026 3027 plane_mask = old_crtc_state->plane_mask; 3028 plane_mask |= new_crtc_state->plane_mask; 3029 3030 crtc_funcs = crtc->helper_private; 3031 if (crtc_funcs && crtc_funcs->atomic_begin) 3032 crtc_funcs->atomic_begin(crtc, old_state); 3033 3034 drm_for_each_plane_mask(plane, crtc->dev, plane_mask) { 3035 struct drm_plane_state *old_plane_state = 3036 drm_atomic_get_old_plane_state(old_state, plane); 3037 struct drm_plane_state *new_plane_state = 3038 drm_atomic_get_new_plane_state(old_state, plane); 3039 const struct drm_plane_helper_funcs *plane_funcs; 3040 bool disabling; 3041 3042 plane_funcs = plane->helper_private; 3043 3044 if (!old_plane_state || !plane_funcs) 3045 continue; 3046 3047 WARN_ON(new_plane_state->crtc && 3048 new_plane_state->crtc != crtc); 3049 3050 disabling = drm_atomic_plane_disabling(old_plane_state, new_plane_state); 3051 3052 if (disabling && plane_funcs->atomic_disable) { 3053 plane_funcs->atomic_disable(plane, old_state); 3054 } else if (new_plane_state->crtc || disabling) { 3055 plane_funcs->atomic_update(plane, old_state); 3056 3057 if (!disabling && plane_funcs->atomic_enable) { 3058 if (drm_atomic_plane_enabling(old_plane_state, new_plane_state)) 3059 plane_funcs->atomic_enable(plane, old_state); 3060 } 3061 } 3062 } 3063 3064 if (crtc_funcs && crtc_funcs->atomic_flush) 3065 crtc_funcs->atomic_flush(crtc, old_state); 3066 } 3067 EXPORT_SYMBOL(drm_atomic_helper_commit_planes_on_crtc); 3068 3069 /** 3070 * drm_atomic_helper_disable_planes_on_crtc - helper to disable CRTC's planes 3071 * @old_crtc_state: atomic state object with the old CRTC state 3072 * @atomic: if set, synchronize with CRTC's atomic_begin/flush hooks 3073 * 3074 * Disables all planes associated with the given CRTC. This can be 3075 * used for instance in the CRTC helper atomic_disable callback to disable 3076 * all planes. 3077 * 3078 * If the atomic-parameter is set the function calls the CRTC's 3079 * atomic_begin hook before and atomic_flush hook after disabling the 3080 * planes. 3081 * 3082 * It is a bug to call this function without having implemented the 3083 * &drm_plane_helper_funcs.atomic_disable plane hook. 3084 */ 3085 void 3086 drm_atomic_helper_disable_planes_on_crtc(struct drm_crtc_state *old_crtc_state, 3087 bool atomic) 3088 { 3089 struct drm_crtc *crtc = old_crtc_state->crtc; 3090 const struct drm_crtc_helper_funcs *crtc_funcs = 3091 crtc->helper_private; 3092 struct drm_plane *plane; 3093 3094 if (atomic && crtc_funcs && crtc_funcs->atomic_begin) 3095 crtc_funcs->atomic_begin(crtc, NULL); 3096 3097 drm_atomic_crtc_state_for_each_plane(plane, old_crtc_state) { 3098 const struct drm_plane_helper_funcs *plane_funcs = 3099 plane->helper_private; 3100 3101 if (!plane_funcs) 3102 continue; 3103 3104 WARN_ON(!plane_funcs->atomic_disable); 3105 if (plane_funcs->atomic_disable) 3106 plane_funcs->atomic_disable(plane, NULL); 3107 } 3108 3109 if (atomic && crtc_funcs && crtc_funcs->atomic_flush) 3110 crtc_funcs->atomic_flush(crtc, NULL); 3111 } 3112 EXPORT_SYMBOL(drm_atomic_helper_disable_planes_on_crtc); 3113 3114 /** 3115 * drm_atomic_helper_cleanup_planes - cleanup plane resources after commit 3116 * @dev: DRM device 3117 * @state: atomic state object being committed 3118 * 3119 * This function cleans up plane state, specifically framebuffers, from the old 3120 * configuration. Hence the old configuration must be perserved in @state to 3121 * be able to call this function. 3122 * 3123 * This function may not be called on the new state when the atomic update 3124 * fails at any point after calling drm_atomic_helper_prepare_planes(). Use 3125 * drm_atomic_helper_unprepare_planes() in this case. 3126 */ 3127 void drm_atomic_helper_cleanup_planes(struct drm_device *dev, 3128 struct drm_atomic_state *state) 3129 { 3130 struct drm_plane *plane; 3131 struct drm_plane_state *old_plane_state; 3132 int i; 3133 3134 for_each_old_plane_in_state(state, plane, old_plane_state, i) { 3135 const struct drm_plane_helper_funcs *funcs = plane->helper_private; 3136 3137 if (funcs->cleanup_fb) 3138 funcs->cleanup_fb(plane, old_plane_state); 3139 } 3140 } 3141 EXPORT_SYMBOL(drm_atomic_helper_cleanup_planes); 3142 3143 /** 3144 * drm_atomic_helper_swap_state - store atomic state into current sw state 3145 * @state: atomic state 3146 * @stall: stall for preceding commits 3147 * 3148 * This function stores the atomic state into the current state pointers in all 3149 * driver objects. It should be called after all failing steps have been done 3150 * and succeeded, but before the actual hardware state is committed. 3151 * 3152 * For cleanup and error recovery the current state for all changed objects will 3153 * be swapped into @state. 3154 * 3155 * With that sequence it fits perfectly into the plane prepare/cleanup sequence: 3156 * 3157 * 1. Call drm_atomic_helper_prepare_planes() with the staged atomic state. 3158 * 3159 * 2. Do any other steps that might fail. 3160 * 3161 * 3. Put the staged state into the current state pointers with this function. 3162 * 3163 * 4. Actually commit the hardware state. 3164 * 3165 * 5. Call drm_atomic_helper_cleanup_planes() with @state, which since step 3 3166 * contains the old state. Also do any other cleanup required with that state. 3167 * 3168 * @stall must be set when nonblocking commits for this driver directly access 3169 * the &drm_plane.state, &drm_crtc.state or &drm_connector.state pointer. With 3170 * the current atomic helpers this is almost always the case, since the helpers 3171 * don't pass the right state structures to the callbacks. 3172 * 3173 * Returns: 3174 * Returns 0 on success. Can return -ERESTARTSYS when @stall is true and the 3175 * waiting for the previous commits has been interrupted. 3176 */ 3177 int drm_atomic_helper_swap_state(struct drm_atomic_state *state, 3178 bool stall) 3179 { 3180 int i, ret; 3181 unsigned long flags = 0; 3182 struct drm_connector *connector; 3183 struct drm_connector_state *old_conn_state, *new_conn_state; 3184 struct drm_crtc *crtc; 3185 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 3186 struct drm_plane *plane; 3187 struct drm_plane_state *old_plane_state, *new_plane_state; 3188 struct drm_colorop *colorop; 3189 struct drm_colorop_state *old_colorop_state, *new_colorop_state; 3190 struct drm_crtc_commit *commit; 3191 struct drm_private_obj *obj; 3192 struct drm_private_state *old_obj_state, *new_obj_state; 3193 3194 if (stall) { 3195 /* 3196 * We have to stall for hw_done here before 3197 * drm_atomic_helper_wait_for_dependencies() because flip 3198 * depth > 1 is not yet supported by all drivers. As long as 3199 * obj->state is directly dereferenced anywhere in the drivers 3200 * atomic_commit_tail function, then it's unsafe to swap state 3201 * before drm_atomic_helper_commit_hw_done() is called. 3202 */ 3203 3204 for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) { 3205 commit = old_crtc_state->commit; 3206 3207 if (!commit) 3208 continue; 3209 3210 ret = wait_for_completion_interruptible(&commit->hw_done); 3211 if (ret) 3212 return ret; 3213 } 3214 3215 for_each_old_connector_in_state(state, connector, old_conn_state, i) { 3216 commit = old_conn_state->commit; 3217 3218 if (!commit) 3219 continue; 3220 3221 ret = wait_for_completion_interruptible(&commit->hw_done); 3222 if (ret) 3223 return ret; 3224 } 3225 3226 for_each_old_plane_in_state(state, plane, old_plane_state, i) { 3227 commit = old_plane_state->commit; 3228 3229 if (!commit) 3230 continue; 3231 3232 ret = wait_for_completion_interruptible(&commit->hw_done); 3233 if (ret) 3234 return ret; 3235 } 3236 } 3237 3238 for_each_oldnew_connector_in_state(state, connector, old_conn_state, new_conn_state, i) { 3239 WARN_ON(connector->state != old_conn_state); 3240 3241 old_conn_state->state = state; 3242 new_conn_state->state = NULL; 3243 3244 state->connectors[i].state_to_destroy = old_conn_state; 3245 connector->state = new_conn_state; 3246 } 3247 3248 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 3249 WARN_ON(crtc->state != old_crtc_state); 3250 3251 old_crtc_state->state = state; 3252 new_crtc_state->state = NULL; 3253 3254 state->crtcs[i].state_to_destroy = old_crtc_state; 3255 crtc->state = new_crtc_state; 3256 3257 if (new_crtc_state->commit) { 3258 spin_lock(&crtc->commit_lock); 3259 list_add(&new_crtc_state->commit->commit_entry, 3260 &crtc->commit_list); 3261 spin_unlock(&crtc->commit_lock); 3262 3263 new_crtc_state->commit->event = NULL; 3264 } 3265 } 3266 3267 for_each_oldnew_colorop_in_state(state, colorop, old_colorop_state, new_colorop_state, i) { 3268 WARN_ON(colorop->state != old_colorop_state); 3269 3270 old_colorop_state->state = state; 3271 new_colorop_state->state = NULL; 3272 3273 state->colorops[i].state = old_colorop_state; 3274 colorop->state = new_colorop_state; 3275 } 3276 3277 drm_panic_lock(state->dev, flags); 3278 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { 3279 WARN_ON(plane->state != old_plane_state); 3280 3281 old_plane_state->state = state; 3282 new_plane_state->state = NULL; 3283 3284 state->planes[i].state_to_destroy = old_plane_state; 3285 plane->state = new_plane_state; 3286 } 3287 drm_panic_unlock(state->dev, flags); 3288 3289 for_each_oldnew_private_obj_in_state(state, obj, old_obj_state, new_obj_state, i) { 3290 WARN_ON(obj->state != old_obj_state); 3291 3292 old_obj_state->state = state; 3293 new_obj_state->state = NULL; 3294 3295 state->private_objs[i].state_to_destroy = old_obj_state; 3296 obj->state = new_obj_state; 3297 } 3298 3299 return 0; 3300 } 3301 EXPORT_SYMBOL(drm_atomic_helper_swap_state); 3302 3303 /** 3304 * drm_atomic_helper_update_plane - Helper for primary plane update using atomic 3305 * @plane: plane object to update 3306 * @crtc: owning CRTC of owning plane 3307 * @fb: framebuffer to flip onto plane 3308 * @crtc_x: x offset of primary plane on @crtc 3309 * @crtc_y: y offset of primary plane on @crtc 3310 * @crtc_w: width of primary plane rectangle on @crtc 3311 * @crtc_h: height of primary plane rectangle on @crtc 3312 * @src_x: x offset of @fb for panning 3313 * @src_y: y offset of @fb for panning 3314 * @src_w: width of source rectangle in @fb 3315 * @src_h: height of source rectangle in @fb 3316 * @ctx: lock acquire context 3317 * 3318 * Provides a default plane update handler using the atomic driver interface. 3319 * 3320 * RETURNS: 3321 * Zero on success, error code on failure 3322 */ 3323 int drm_atomic_helper_update_plane(struct drm_plane *plane, 3324 struct drm_crtc *crtc, 3325 struct drm_framebuffer *fb, 3326 int crtc_x, int crtc_y, 3327 unsigned int crtc_w, unsigned int crtc_h, 3328 uint32_t src_x, uint32_t src_y, 3329 uint32_t src_w, uint32_t src_h, 3330 struct drm_modeset_acquire_ctx *ctx) 3331 { 3332 struct drm_atomic_state *state; 3333 struct drm_plane_state *plane_state; 3334 int ret = 0; 3335 3336 state = drm_atomic_state_alloc(plane->dev); 3337 if (!state) 3338 return -ENOMEM; 3339 3340 state->acquire_ctx = ctx; 3341 plane_state = drm_atomic_get_plane_state(state, plane); 3342 if (IS_ERR(plane_state)) { 3343 ret = PTR_ERR(plane_state); 3344 goto fail; 3345 } 3346 3347 ret = drm_atomic_set_crtc_for_plane(plane_state, crtc); 3348 if (ret != 0) 3349 goto fail; 3350 drm_atomic_set_fb_for_plane(plane_state, fb); 3351 plane_state->crtc_x = crtc_x; 3352 plane_state->crtc_y = crtc_y; 3353 plane_state->crtc_w = crtc_w; 3354 plane_state->crtc_h = crtc_h; 3355 plane_state->src_x = src_x; 3356 plane_state->src_y = src_y; 3357 plane_state->src_w = src_w; 3358 plane_state->src_h = src_h; 3359 3360 if (plane == crtc->cursor) 3361 state->legacy_cursor_update = true; 3362 3363 ret = drm_atomic_commit(state); 3364 fail: 3365 drm_atomic_state_put(state); 3366 return ret; 3367 } 3368 EXPORT_SYMBOL(drm_atomic_helper_update_plane); 3369 3370 /** 3371 * drm_atomic_helper_disable_plane - Helper for primary plane disable using atomic 3372 * @plane: plane to disable 3373 * @ctx: lock acquire context 3374 * 3375 * Provides a default plane disable handler using the atomic driver interface. 3376 * 3377 * RETURNS: 3378 * Zero on success, error code on failure 3379 */ 3380 int drm_atomic_helper_disable_plane(struct drm_plane *plane, 3381 struct drm_modeset_acquire_ctx *ctx) 3382 { 3383 struct drm_atomic_state *state; 3384 struct drm_plane_state *plane_state; 3385 int ret = 0; 3386 3387 state = drm_atomic_state_alloc(plane->dev); 3388 if (!state) 3389 return -ENOMEM; 3390 3391 state->acquire_ctx = ctx; 3392 plane_state = drm_atomic_get_plane_state(state, plane); 3393 if (IS_ERR(plane_state)) { 3394 ret = PTR_ERR(plane_state); 3395 goto fail; 3396 } 3397 3398 if (plane_state->crtc && plane_state->crtc->cursor == plane) 3399 plane_state->state->legacy_cursor_update = true; 3400 3401 ret = __drm_atomic_helper_disable_plane(plane, plane_state); 3402 if (ret != 0) 3403 goto fail; 3404 3405 ret = drm_atomic_commit(state); 3406 fail: 3407 drm_atomic_state_put(state); 3408 return ret; 3409 } 3410 EXPORT_SYMBOL(drm_atomic_helper_disable_plane); 3411 3412 /** 3413 * drm_atomic_helper_set_config - set a new config from userspace 3414 * @set: mode set configuration 3415 * @ctx: lock acquisition context 3416 * 3417 * Provides a default CRTC set_config handler using the atomic driver interface. 3418 * 3419 * NOTE: For backwards compatibility with old userspace this automatically 3420 * resets the "link-status" property to GOOD, to force any link 3421 * re-training. The SETCRTC ioctl does not define whether an update does 3422 * need a full modeset or just a plane update, hence we're allowed to do 3423 * that. See also drm_connector_set_link_status_property(). 3424 * 3425 * Returns: 3426 * Returns 0 on success, negative errno numbers on failure. 3427 */ 3428 int drm_atomic_helper_set_config(struct drm_mode_set *set, 3429 struct drm_modeset_acquire_ctx *ctx) 3430 { 3431 struct drm_atomic_state *state; 3432 struct drm_crtc *crtc = set->crtc; 3433 int ret = 0; 3434 3435 state = drm_atomic_state_alloc(crtc->dev); 3436 if (!state) 3437 return -ENOMEM; 3438 3439 state->acquire_ctx = ctx; 3440 ret = __drm_atomic_helper_set_config(set, state); 3441 if (ret != 0) 3442 goto fail; 3443 3444 ret = handle_conflicting_encoders(state, true); 3445 if (ret) 3446 goto fail; 3447 3448 ret = drm_atomic_commit(state); 3449 3450 fail: 3451 drm_atomic_state_put(state); 3452 return ret; 3453 } 3454 EXPORT_SYMBOL(drm_atomic_helper_set_config); 3455 3456 /** 3457 * drm_atomic_helper_disable_all - disable all currently active outputs 3458 * @dev: DRM device 3459 * @ctx: lock acquisition context 3460 * 3461 * Loops through all connectors, finding those that aren't turned off and then 3462 * turns them off by setting their DPMS mode to OFF and deactivating the CRTC 3463 * that they are connected to. 3464 * 3465 * This is used for example in suspend/resume to disable all currently active 3466 * functions when suspending. If you just want to shut down everything at e.g. 3467 * driver unload, look at drm_atomic_helper_shutdown(). 3468 * 3469 * Note that if callers haven't already acquired all modeset locks this might 3470 * return -EDEADLK, which must be handled by calling drm_modeset_backoff(). 3471 * 3472 * Returns: 3473 * 0 on success or a negative error code on failure. 3474 * 3475 * See also: 3476 * drm_atomic_helper_suspend(), drm_atomic_helper_resume() and 3477 * drm_atomic_helper_shutdown(). 3478 */ 3479 int drm_atomic_helper_disable_all(struct drm_device *dev, 3480 struct drm_modeset_acquire_ctx *ctx) 3481 { 3482 struct drm_atomic_state *state; 3483 struct drm_connector_state *conn_state; 3484 struct drm_connector *conn; 3485 struct drm_plane_state *plane_state; 3486 struct drm_plane *plane; 3487 struct drm_crtc_state *crtc_state; 3488 struct drm_crtc *crtc; 3489 int ret, i; 3490 3491 state = drm_atomic_state_alloc(dev); 3492 if (!state) 3493 return -ENOMEM; 3494 3495 state->acquire_ctx = ctx; 3496 3497 drm_for_each_crtc(crtc, dev) { 3498 crtc_state = drm_atomic_get_crtc_state(state, crtc); 3499 if (IS_ERR(crtc_state)) { 3500 ret = PTR_ERR(crtc_state); 3501 goto free; 3502 } 3503 3504 crtc_state->active = false; 3505 3506 ret = drm_atomic_set_mode_prop_for_crtc(crtc_state, NULL); 3507 if (ret < 0) 3508 goto free; 3509 3510 ret = drm_atomic_add_affected_planes(state, crtc); 3511 if (ret < 0) 3512 goto free; 3513 3514 ret = drm_atomic_add_affected_connectors(state, crtc); 3515 if (ret < 0) 3516 goto free; 3517 } 3518 3519 for_each_new_connector_in_state(state, conn, conn_state, i) { 3520 ret = drm_atomic_set_crtc_for_connector(conn_state, NULL); 3521 if (ret < 0) 3522 goto free; 3523 } 3524 3525 for_each_new_plane_in_state(state, plane, plane_state, i) { 3526 ret = drm_atomic_set_crtc_for_plane(plane_state, NULL); 3527 if (ret < 0) 3528 goto free; 3529 3530 drm_atomic_set_fb_for_plane(plane_state, NULL); 3531 } 3532 3533 ret = drm_atomic_commit(state); 3534 free: 3535 drm_atomic_state_put(state); 3536 return ret; 3537 } 3538 EXPORT_SYMBOL(drm_atomic_helper_disable_all); 3539 3540 /** 3541 * drm_atomic_helper_reset_crtc - reset the active outputs of a CRTC 3542 * @crtc: DRM CRTC 3543 * @ctx: lock acquisition context 3544 * 3545 * Reset the active outputs by indicating that connectors have changed. 3546 * This implies a reset of all active components available between the CRTC and 3547 * connectors. 3548 * 3549 * A variant of this function exists with 3550 * drm_bridge_helper_reset_crtc(), dedicated to bridges. 3551 * 3552 * NOTE: This relies on resetting &drm_crtc_state.connectors_changed. 3553 * For drivers which optimize out unnecessary modesets this will result in 3554 * a no-op commit, achieving nothing. 3555 * 3556 * Returns: 3557 * 0 on success or a negative error code on failure. 3558 */ 3559 int drm_atomic_helper_reset_crtc(struct drm_crtc *crtc, 3560 struct drm_modeset_acquire_ctx *ctx) 3561 { 3562 struct drm_atomic_state *state; 3563 struct drm_crtc_state *crtc_state; 3564 int ret; 3565 3566 state = drm_atomic_state_alloc(crtc->dev); 3567 if (!state) 3568 return -ENOMEM; 3569 3570 state->acquire_ctx = ctx; 3571 3572 crtc_state = drm_atomic_get_crtc_state(state, crtc); 3573 if (IS_ERR(crtc_state)) { 3574 ret = PTR_ERR(crtc_state); 3575 goto out; 3576 } 3577 3578 crtc_state->connectors_changed = true; 3579 3580 ret = drm_atomic_commit(state); 3581 out: 3582 drm_atomic_state_put(state); 3583 3584 return ret; 3585 } 3586 EXPORT_SYMBOL(drm_atomic_helper_reset_crtc); 3587 3588 /** 3589 * drm_atomic_helper_shutdown - shutdown all CRTC 3590 * @dev: DRM device 3591 * 3592 * This shuts down all CRTC, which is useful for driver unloading. Shutdown on 3593 * suspend should instead be handled with drm_atomic_helper_suspend(), since 3594 * that also takes a snapshot of the modeset state to be restored on resume. 3595 * 3596 * This is just a convenience wrapper around drm_atomic_helper_disable_all(), 3597 * and it is the atomic version of drm_helper_force_disable_all(). 3598 */ 3599 void drm_atomic_helper_shutdown(struct drm_device *dev) 3600 { 3601 struct drm_modeset_acquire_ctx ctx; 3602 int ret; 3603 3604 if (dev == NULL) 3605 return; 3606 3607 DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, ret); 3608 3609 ret = drm_atomic_helper_disable_all(dev, &ctx); 3610 if (ret) 3611 drm_err(dev, 3612 "Disabling all crtc's during unload failed with %i\n", 3613 ret); 3614 3615 DRM_MODESET_LOCK_ALL_END(dev, ctx, ret); 3616 } 3617 EXPORT_SYMBOL(drm_atomic_helper_shutdown); 3618 3619 /** 3620 * drm_atomic_helper_duplicate_state - duplicate an atomic state object 3621 * @dev: DRM device 3622 * @ctx: lock acquisition context 3623 * 3624 * Makes a copy of the current atomic state by looping over all objects and 3625 * duplicating their respective states. This is used for example by suspend/ 3626 * resume support code to save the state prior to suspend such that it can 3627 * be restored upon resume. 3628 * 3629 * Note that this treats atomic state as persistent between save and restore. 3630 * Drivers must make sure that this is possible and won't result in confusion 3631 * or erroneous behaviour. 3632 * 3633 * Note that if callers haven't already acquired all modeset locks this might 3634 * return -EDEADLK, which must be handled by calling drm_modeset_backoff(). 3635 * 3636 * Returns: 3637 * A pointer to the copy of the atomic state object on success or an 3638 * ERR_PTR()-encoded error code on failure. 3639 * 3640 * See also: 3641 * drm_atomic_helper_suspend(), drm_atomic_helper_resume() 3642 */ 3643 struct drm_atomic_state * 3644 drm_atomic_helper_duplicate_state(struct drm_device *dev, 3645 struct drm_modeset_acquire_ctx *ctx) 3646 { 3647 struct drm_atomic_state *state; 3648 struct drm_connector *conn; 3649 struct drm_connector_list_iter conn_iter; 3650 struct drm_plane *plane; 3651 struct drm_crtc *crtc; 3652 int err = 0; 3653 3654 state = drm_atomic_state_alloc(dev); 3655 if (!state) 3656 return ERR_PTR(-ENOMEM); 3657 3658 state->acquire_ctx = ctx; 3659 state->duplicated = true; 3660 3661 drm_for_each_crtc(crtc, dev) { 3662 struct drm_crtc_state *crtc_state; 3663 3664 crtc_state = drm_atomic_get_crtc_state(state, crtc); 3665 if (IS_ERR(crtc_state)) { 3666 err = PTR_ERR(crtc_state); 3667 goto free; 3668 } 3669 } 3670 3671 drm_for_each_plane(plane, dev) { 3672 struct drm_plane_state *plane_state; 3673 3674 plane_state = drm_atomic_get_plane_state(state, plane); 3675 if (IS_ERR(plane_state)) { 3676 err = PTR_ERR(plane_state); 3677 goto free; 3678 } 3679 } 3680 3681 drm_connector_list_iter_begin(dev, &conn_iter); 3682 drm_for_each_connector_iter(conn, &conn_iter) { 3683 struct drm_connector_state *conn_state; 3684 3685 conn_state = drm_atomic_get_connector_state(state, conn); 3686 if (IS_ERR(conn_state)) { 3687 err = PTR_ERR(conn_state); 3688 drm_connector_list_iter_end(&conn_iter); 3689 goto free; 3690 } 3691 } 3692 drm_connector_list_iter_end(&conn_iter); 3693 3694 /* clear the acquire context so that it isn't accidentally reused */ 3695 state->acquire_ctx = NULL; 3696 3697 free: 3698 if (err < 0) { 3699 drm_atomic_state_put(state); 3700 state = ERR_PTR(err); 3701 } 3702 3703 return state; 3704 } 3705 EXPORT_SYMBOL(drm_atomic_helper_duplicate_state); 3706 3707 /** 3708 * drm_atomic_helper_suspend - subsystem-level suspend helper 3709 * @dev: DRM device 3710 * 3711 * Duplicates the current atomic state, disables all active outputs and then 3712 * returns a pointer to the original atomic state to the caller. Drivers can 3713 * pass this pointer to the drm_atomic_helper_resume() helper upon resume to 3714 * restore the output configuration that was active at the time the system 3715 * entered suspend. 3716 * 3717 * Note that it is potentially unsafe to use this. The atomic state object 3718 * returned by this function is assumed to be persistent. Drivers must ensure 3719 * that this holds true. Before calling this function, drivers must make sure 3720 * to suspend fbdev emulation so that nothing can be using the device. 3721 * 3722 * Returns: 3723 * A pointer to a copy of the state before suspend on success or an ERR_PTR()- 3724 * encoded error code on failure. Drivers should store the returned atomic 3725 * state object and pass it to the drm_atomic_helper_resume() helper upon 3726 * resume. 3727 * 3728 * See also: 3729 * drm_atomic_helper_duplicate_state(), drm_atomic_helper_disable_all(), 3730 * drm_atomic_helper_resume(), drm_atomic_helper_commit_duplicated_state() 3731 */ 3732 struct drm_atomic_state *drm_atomic_helper_suspend(struct drm_device *dev) 3733 { 3734 struct drm_modeset_acquire_ctx ctx; 3735 struct drm_atomic_state *state; 3736 int err; 3737 3738 /* This can never be returned, but it makes the compiler happy */ 3739 state = ERR_PTR(-EINVAL); 3740 3741 DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, err); 3742 3743 state = drm_atomic_helper_duplicate_state(dev, &ctx); 3744 if (IS_ERR(state)) 3745 goto unlock; 3746 3747 err = drm_atomic_helper_disable_all(dev, &ctx); 3748 if (err < 0) { 3749 drm_atomic_state_put(state); 3750 state = ERR_PTR(err); 3751 goto unlock; 3752 } 3753 3754 unlock: 3755 DRM_MODESET_LOCK_ALL_END(dev, ctx, err); 3756 if (err) 3757 return ERR_PTR(err); 3758 3759 return state; 3760 } 3761 EXPORT_SYMBOL(drm_atomic_helper_suspend); 3762 3763 /** 3764 * drm_atomic_helper_commit_duplicated_state - commit duplicated state 3765 * @state: duplicated atomic state to commit 3766 * @ctx: pointer to acquire_ctx to use for commit. 3767 * 3768 * The state returned by drm_atomic_helper_duplicate_state() and 3769 * drm_atomic_helper_suspend() is partially invalid, and needs to 3770 * be fixed up before commit. 3771 * 3772 * Returns: 3773 * 0 on success or a negative error code on failure. 3774 * 3775 * See also: 3776 * drm_atomic_helper_suspend() 3777 */ 3778 int drm_atomic_helper_commit_duplicated_state(struct drm_atomic_state *state, 3779 struct drm_modeset_acquire_ctx *ctx) 3780 { 3781 int i, ret; 3782 struct drm_plane *plane; 3783 struct drm_plane_state *new_plane_state; 3784 struct drm_connector *connector; 3785 struct drm_connector_state *new_conn_state; 3786 struct drm_crtc *crtc; 3787 struct drm_crtc_state *new_crtc_state; 3788 3789 state->acquire_ctx = ctx; 3790 3791 for_each_new_plane_in_state(state, plane, new_plane_state, i) 3792 state->planes[i].old_state = plane->state; 3793 3794 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) 3795 state->crtcs[i].old_state = crtc->state; 3796 3797 for_each_new_connector_in_state(state, connector, new_conn_state, i) 3798 state->connectors[i].old_state = connector->state; 3799 3800 ret = drm_atomic_commit(state); 3801 3802 state->acquire_ctx = NULL; 3803 3804 return ret; 3805 } 3806 EXPORT_SYMBOL(drm_atomic_helper_commit_duplicated_state); 3807 3808 /** 3809 * drm_atomic_helper_resume - subsystem-level resume helper 3810 * @dev: DRM device 3811 * @state: atomic state to resume to 3812 * 3813 * Calls drm_mode_config_reset() to synchronize hardware and software states, 3814 * grabs all modeset locks and commits the atomic state object. This can be 3815 * used in conjunction with the drm_atomic_helper_suspend() helper to 3816 * implement suspend/resume for drivers that support atomic mode-setting. 3817 * 3818 * Returns: 3819 * 0 on success or a negative error code on failure. 3820 * 3821 * See also: 3822 * drm_atomic_helper_suspend() 3823 */ 3824 int drm_atomic_helper_resume(struct drm_device *dev, 3825 struct drm_atomic_state *state) 3826 { 3827 struct drm_modeset_acquire_ctx ctx; 3828 int err; 3829 3830 drm_mode_config_reset(dev); 3831 3832 DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, err); 3833 3834 err = drm_atomic_helper_commit_duplicated_state(state, &ctx); 3835 3836 DRM_MODESET_LOCK_ALL_END(dev, ctx, err); 3837 drm_atomic_state_put(state); 3838 3839 return err; 3840 } 3841 EXPORT_SYMBOL(drm_atomic_helper_resume); 3842 3843 static int page_flip_common(struct drm_atomic_state *state, 3844 struct drm_crtc *crtc, 3845 struct drm_framebuffer *fb, 3846 struct drm_pending_vblank_event *event, 3847 uint32_t flags) 3848 { 3849 struct drm_plane *plane = crtc->primary; 3850 struct drm_plane_state *plane_state; 3851 struct drm_crtc_state *crtc_state; 3852 int ret = 0; 3853 3854 crtc_state = drm_atomic_get_crtc_state(state, crtc); 3855 if (IS_ERR(crtc_state)) 3856 return PTR_ERR(crtc_state); 3857 3858 crtc_state->event = event; 3859 crtc_state->async_flip = flags & DRM_MODE_PAGE_FLIP_ASYNC; 3860 3861 plane_state = drm_atomic_get_plane_state(state, plane); 3862 if (IS_ERR(plane_state)) 3863 return PTR_ERR(plane_state); 3864 3865 ret = drm_atomic_set_crtc_for_plane(plane_state, crtc); 3866 if (ret != 0) 3867 return ret; 3868 drm_atomic_set_fb_for_plane(plane_state, fb); 3869 3870 /* Make sure we don't accidentally do a full modeset. */ 3871 state->allow_modeset = false; 3872 if (!crtc_state->active) { 3873 drm_dbg_atomic(crtc->dev, 3874 "[CRTC:%d:%s] disabled, rejecting legacy flip\n", 3875 crtc->base.id, crtc->name); 3876 return -EINVAL; 3877 } 3878 3879 return ret; 3880 } 3881 3882 /** 3883 * drm_atomic_helper_page_flip - execute a legacy page flip 3884 * @crtc: DRM CRTC 3885 * @fb: DRM framebuffer 3886 * @event: optional DRM event to signal upon completion 3887 * @flags: flip flags for non-vblank sync'ed updates 3888 * @ctx: lock acquisition context 3889 * 3890 * Provides a default &drm_crtc_funcs.page_flip implementation 3891 * using the atomic driver interface. 3892 * 3893 * Returns: 3894 * Returns 0 on success, negative errno numbers on failure. 3895 * 3896 * See also: 3897 * drm_atomic_helper_page_flip_target() 3898 */ 3899 int drm_atomic_helper_page_flip(struct drm_crtc *crtc, 3900 struct drm_framebuffer *fb, 3901 struct drm_pending_vblank_event *event, 3902 uint32_t flags, 3903 struct drm_modeset_acquire_ctx *ctx) 3904 { 3905 struct drm_plane *plane = crtc->primary; 3906 struct drm_atomic_state *state; 3907 int ret = 0; 3908 3909 state = drm_atomic_state_alloc(plane->dev); 3910 if (!state) 3911 return -ENOMEM; 3912 3913 state->acquire_ctx = ctx; 3914 3915 ret = page_flip_common(state, crtc, fb, event, flags); 3916 if (ret != 0) 3917 goto fail; 3918 3919 ret = drm_atomic_nonblocking_commit(state); 3920 fail: 3921 drm_atomic_state_put(state); 3922 return ret; 3923 } 3924 EXPORT_SYMBOL(drm_atomic_helper_page_flip); 3925 3926 /** 3927 * drm_atomic_helper_page_flip_target - do page flip on target vblank period. 3928 * @crtc: DRM CRTC 3929 * @fb: DRM framebuffer 3930 * @event: optional DRM event to signal upon completion 3931 * @flags: flip flags for non-vblank sync'ed updates 3932 * @target: specifying the target vblank period when the flip to take effect 3933 * @ctx: lock acquisition context 3934 * 3935 * Provides a default &drm_crtc_funcs.page_flip_target implementation. 3936 * Similar to drm_atomic_helper_page_flip() with extra parameter to specify 3937 * target vblank period to flip. 3938 * 3939 * Returns: 3940 * Returns 0 on success, negative errno numbers on failure. 3941 */ 3942 int drm_atomic_helper_page_flip_target(struct drm_crtc *crtc, 3943 struct drm_framebuffer *fb, 3944 struct drm_pending_vblank_event *event, 3945 uint32_t flags, 3946 uint32_t target, 3947 struct drm_modeset_acquire_ctx *ctx) 3948 { 3949 struct drm_plane *plane = crtc->primary; 3950 struct drm_atomic_state *state; 3951 struct drm_crtc_state *crtc_state; 3952 int ret = 0; 3953 3954 state = drm_atomic_state_alloc(plane->dev); 3955 if (!state) 3956 return -ENOMEM; 3957 3958 state->acquire_ctx = ctx; 3959 3960 ret = page_flip_common(state, crtc, fb, event, flags); 3961 if (ret != 0) 3962 goto fail; 3963 3964 crtc_state = drm_atomic_get_new_crtc_state(state, crtc); 3965 if (WARN_ON(!crtc_state)) { 3966 ret = -EINVAL; 3967 goto fail; 3968 } 3969 crtc_state->target_vblank = target; 3970 3971 ret = drm_atomic_nonblocking_commit(state); 3972 fail: 3973 drm_atomic_state_put(state); 3974 return ret; 3975 } 3976 EXPORT_SYMBOL(drm_atomic_helper_page_flip_target); 3977 3978 /** 3979 * drm_atomic_helper_bridge_propagate_bus_fmt() - Propagate output format to 3980 * the input end of a bridge 3981 * @bridge: bridge control structure 3982 * @bridge_state: new bridge state 3983 * @crtc_state: new CRTC state 3984 * @conn_state: new connector state 3985 * @output_fmt: tested output bus format 3986 * @num_input_fmts: will contain the size of the returned array 3987 * 3988 * This helper is a pluggable implementation of the 3989 * &drm_bridge_funcs.atomic_get_input_bus_fmts operation for bridges that don't 3990 * modify the bus configuration between their input and their output. It 3991 * returns an array of input formats with a single element set to @output_fmt. 3992 * 3993 * RETURNS: 3994 * a valid format array of size @num_input_fmts, or NULL if the allocation 3995 * failed 3996 */ 3997 u32 * 3998 drm_atomic_helper_bridge_propagate_bus_fmt(struct drm_bridge *bridge, 3999 struct drm_bridge_state *bridge_state, 4000 struct drm_crtc_state *crtc_state, 4001 struct drm_connector_state *conn_state, 4002 u32 output_fmt, 4003 unsigned int *num_input_fmts) 4004 { 4005 u32 *input_fmts; 4006 4007 input_fmts = kzalloc(sizeof(*input_fmts), GFP_KERNEL); 4008 if (!input_fmts) { 4009 *num_input_fmts = 0; 4010 return NULL; 4011 } 4012 4013 *num_input_fmts = 1; 4014 input_fmts[0] = output_fmt; 4015 return input_fmts; 4016 } 4017 EXPORT_SYMBOL(drm_atomic_helper_bridge_propagate_bus_fmt); 4018