1 /*
2 * Copyright (C) 2014 Red Hat
3 * Copyright (C) 2014 Intel Corp.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Rob Clark <robdclark@gmail.com>
25 * Daniel Vetter <daniel.vetter@ffwll.ch>
26 */
27
28 #include <linux/export.h>
29 #include <linux/dma-fence.h>
30 #include <linux/ktime.h>
31
32 #include <drm/drm_atomic.h>
33 #include <drm/drm_atomic_helper.h>
34 #include <drm/drm_atomic_uapi.h>
35 #include <drm/drm_blend.h>
36 #include <drm/drm_bridge.h>
37 #include <drm/drm_colorop.h>
38 #include <drm/drm_damage_helper.h>
39 #include <drm/drm_device.h>
40 #include <drm/drm_drv.h>
41 #include <drm/drm_framebuffer.h>
42 #include <drm/drm_gem_atomic_helper.h>
43 #include <drm/drm_panic.h>
44 #include <drm/drm_print.h>
45 #include <drm/drm_self_refresh_helper.h>
46 #include <drm/drm_vblank.h>
47 #include <drm/drm_writeback.h>
48
49 #include "drm_crtc_helper_internal.h"
50 #include "drm_crtc_internal.h"
51
52 /**
53 * DOC: overview
54 *
55 * This helper library provides implementations of check and commit functions on
56 * top of the CRTC modeset helper callbacks and the plane helper callbacks. It
57 * also provides convenience implementations for the atomic state handling
58 * callbacks for drivers which don't need to subclass the drm core structures to
59 * add their own additional internal state.
60 *
61 * This library also provides default implementations for the check callback in
62 * drm_atomic_helper_check() and for the commit callback with
63 * drm_atomic_helper_commit(). But the individual stages and callbacks are
64 * exposed to allow drivers to mix and match and e.g. use the plane helpers only
65 * together with a driver private modeset implementation.
66 *
67 * This library also provides implementations for all the legacy driver
68 * interfaces on top of the atomic interface. See drm_atomic_helper_set_config(),
69 * drm_atomic_helper_disable_plane(), and the various functions to implement
70 * set_property callbacks. New drivers must not implement these functions
71 * themselves but must use the provided helpers.
72 *
73 * The atomic helper uses the same function table structures as all other
74 * modesetting helpers. See the documentation for &struct drm_crtc_helper_funcs,
75 * struct &drm_encoder_helper_funcs and &struct drm_connector_helper_funcs. It
76 * also shares the &struct drm_plane_helper_funcs function table with the plane
77 * helpers.
78 */
79 static void
drm_atomic_helper_plane_changed(struct drm_atomic_state * state,struct drm_plane_state * old_plane_state,struct drm_plane_state * plane_state,struct drm_plane * plane)80 drm_atomic_helper_plane_changed(struct drm_atomic_state *state,
81 struct drm_plane_state *old_plane_state,
82 struct drm_plane_state *plane_state,
83 struct drm_plane *plane)
84 {
85 struct drm_crtc_state *crtc_state;
86
87 if (old_plane_state->crtc) {
88 crtc_state = drm_atomic_get_new_crtc_state(state,
89 old_plane_state->crtc);
90
91 if (WARN_ON(!crtc_state))
92 return;
93
94 crtc_state->planes_changed = true;
95 }
96
97 if (plane_state->crtc) {
98 crtc_state = drm_atomic_get_new_crtc_state(state, plane_state->crtc);
99
100 if (WARN_ON(!crtc_state))
101 return;
102
103 crtc_state->planes_changed = true;
104 }
105 }
106
handle_conflicting_encoders(struct drm_atomic_state * state,bool disable_conflicting_encoders)107 static int handle_conflicting_encoders(struct drm_atomic_state *state,
108 bool disable_conflicting_encoders)
109 {
110 struct drm_connector_state *new_conn_state;
111 struct drm_connector *connector;
112 struct drm_connector_list_iter conn_iter;
113 struct drm_encoder *encoder;
114 unsigned int encoder_mask = 0;
115 int i, ret = 0;
116
117 /*
118 * First loop, find all newly assigned encoders from the connectors
119 * part of the state. If the same encoder is assigned to multiple
120 * connectors bail out.
121 */
122 for_each_new_connector_in_state(state, connector, new_conn_state, i) {
123 const struct drm_connector_helper_funcs *funcs = connector->helper_private;
124 struct drm_encoder *new_encoder;
125
126 if (!new_conn_state->crtc)
127 continue;
128
129 if (funcs->atomic_best_encoder)
130 new_encoder = funcs->atomic_best_encoder(connector,
131 state);
132 else if (funcs->best_encoder)
133 new_encoder = funcs->best_encoder(connector);
134 else
135 new_encoder = drm_connector_get_single_encoder(connector);
136
137 if (new_encoder) {
138 if (encoder_mask & drm_encoder_mask(new_encoder)) {
139 drm_dbg_atomic(connector->dev,
140 "[ENCODER:%d:%s] on [CONNECTOR:%d:%s] already assigned\n",
141 new_encoder->base.id, new_encoder->name,
142 connector->base.id, connector->name);
143
144 return -EINVAL;
145 }
146
147 encoder_mask |= drm_encoder_mask(new_encoder);
148 }
149 }
150
151 if (!encoder_mask)
152 return 0;
153
154 /*
155 * Second loop, iterate over all connectors not part of the state.
156 *
157 * If a conflicting encoder is found and disable_conflicting_encoders
158 * is not set, an error is returned. Userspace can provide a solution
159 * through the atomic ioctl.
160 *
161 * If the flag is set conflicting connectors are removed from the CRTC
162 * and the CRTC is disabled if no encoder is left. This preserves
163 * compatibility with the legacy set_config behavior.
164 */
165 drm_connector_list_iter_begin(state->dev, &conn_iter);
166 drm_for_each_connector_iter(connector, &conn_iter) {
167 struct drm_crtc_state *crtc_state;
168
169 if (drm_atomic_get_new_connector_state(state, connector))
170 continue;
171
172 encoder = connector->state->best_encoder;
173 if (!encoder || !(encoder_mask & drm_encoder_mask(encoder)))
174 continue;
175
176 if (!disable_conflicting_encoders) {
177 drm_dbg_atomic(connector->dev,
178 "[ENCODER:%d:%s] in use on [CRTC:%d:%s] by [CONNECTOR:%d:%s]\n",
179 encoder->base.id, encoder->name,
180 connector->state->crtc->base.id,
181 connector->state->crtc->name,
182 connector->base.id, connector->name);
183 ret = -EINVAL;
184 goto out;
185 }
186
187 new_conn_state = drm_atomic_get_connector_state(state, connector);
188 if (IS_ERR(new_conn_state)) {
189 ret = PTR_ERR(new_conn_state);
190 goto out;
191 }
192
193 drm_dbg_atomic(connector->dev,
194 "[ENCODER:%d:%s] in use on [CRTC:%d:%s], disabling [CONNECTOR:%d:%s]\n",
195 encoder->base.id, encoder->name,
196 new_conn_state->crtc->base.id, new_conn_state->crtc->name,
197 connector->base.id, connector->name);
198
199 crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
200
201 ret = drm_atomic_set_crtc_for_connector(new_conn_state, NULL);
202 if (ret)
203 goto out;
204
205 if (!crtc_state->connector_mask) {
206 ret = drm_atomic_set_mode_prop_for_crtc(crtc_state,
207 NULL);
208 if (ret < 0)
209 goto out;
210
211 crtc_state->active = false;
212 }
213 }
214 out:
215 drm_connector_list_iter_end(&conn_iter);
216
217 return ret;
218 }
219
220 static void
set_best_encoder(struct drm_atomic_state * state,struct drm_connector_state * conn_state,struct drm_encoder * encoder)221 set_best_encoder(struct drm_atomic_state *state,
222 struct drm_connector_state *conn_state,
223 struct drm_encoder *encoder)
224 {
225 struct drm_crtc_state *crtc_state;
226 struct drm_crtc *crtc;
227
228 if (conn_state->best_encoder) {
229 /* Unset the encoder_mask in the old crtc state. */
230 crtc = conn_state->connector->state->crtc;
231
232 /* A NULL crtc is an error here because we should have
233 * duplicated a NULL best_encoder when crtc was NULL.
234 * As an exception restoring duplicated atomic state
235 * during resume is allowed, so don't warn when
236 * best_encoder is equal to encoder we intend to set.
237 */
238 WARN_ON(!crtc && encoder != conn_state->best_encoder);
239 if (crtc) {
240 crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
241
242 crtc_state->encoder_mask &=
243 ~drm_encoder_mask(conn_state->best_encoder);
244 }
245 }
246
247 if (encoder) {
248 crtc = conn_state->crtc;
249 WARN_ON(!crtc);
250 if (crtc) {
251 crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
252
253 crtc_state->encoder_mask |=
254 drm_encoder_mask(encoder);
255 }
256 }
257
258 conn_state->best_encoder = encoder;
259 }
260
261 static void
steal_encoder(struct drm_atomic_state * state,struct drm_encoder * encoder)262 steal_encoder(struct drm_atomic_state *state,
263 struct drm_encoder *encoder)
264 {
265 struct drm_crtc_state *crtc_state;
266 struct drm_connector *connector;
267 struct drm_connector_state *old_connector_state, *new_connector_state;
268 int i;
269
270 for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) {
271 struct drm_crtc *encoder_crtc;
272
273 if (new_connector_state->best_encoder != encoder)
274 continue;
275
276 encoder_crtc = old_connector_state->crtc;
277
278 drm_dbg_atomic(encoder->dev,
279 "[ENCODER:%d:%s] in use on [CRTC:%d:%s], stealing it\n",
280 encoder->base.id, encoder->name,
281 encoder_crtc->base.id, encoder_crtc->name);
282
283 set_best_encoder(state, new_connector_state, NULL);
284
285 crtc_state = drm_atomic_get_new_crtc_state(state, encoder_crtc);
286 crtc_state->connectors_changed = true;
287
288 return;
289 }
290 }
291
292 static int
update_connector_routing(struct drm_atomic_state * state,struct drm_connector * connector,struct drm_connector_state * old_connector_state,struct drm_connector_state * new_connector_state,bool added_by_user)293 update_connector_routing(struct drm_atomic_state *state,
294 struct drm_connector *connector,
295 struct drm_connector_state *old_connector_state,
296 struct drm_connector_state *new_connector_state,
297 bool added_by_user)
298 {
299 const struct drm_connector_helper_funcs *funcs;
300 struct drm_encoder *new_encoder;
301 struct drm_crtc_state *crtc_state;
302
303 drm_dbg_atomic(connector->dev, "Updating routing for [CONNECTOR:%d:%s]\n",
304 connector->base.id, connector->name);
305
306 if (old_connector_state->crtc != new_connector_state->crtc) {
307 if (old_connector_state->crtc) {
308 crtc_state = drm_atomic_get_new_crtc_state(state, old_connector_state->crtc);
309 crtc_state->connectors_changed = true;
310 }
311
312 if (new_connector_state->crtc) {
313 crtc_state = drm_atomic_get_new_crtc_state(state, new_connector_state->crtc);
314 crtc_state->connectors_changed = true;
315 }
316 }
317
318 if (!new_connector_state->crtc) {
319 drm_dbg_atomic(connector->dev, "Disabling [CONNECTOR:%d:%s]\n",
320 connector->base.id, connector->name);
321
322 set_best_encoder(state, new_connector_state, NULL);
323
324 return 0;
325 }
326
327 crtc_state = drm_atomic_get_new_crtc_state(state,
328 new_connector_state->crtc);
329 /*
330 * For compatibility with legacy users, we want to make sure that
331 * we allow DPMS On->Off modesets on unregistered connectors. Modesets
332 * which would result in anything else must be considered invalid, to
333 * avoid turning on new displays on dead connectors.
334 *
335 * Since the connector can be unregistered at any point during an
336 * atomic check or commit, this is racy. But that's OK: all we care
337 * about is ensuring that userspace can't do anything but shut off the
338 * display on a connector that was destroyed after it's been notified,
339 * not before.
340 *
341 * Additionally, we also want to ignore connector registration when
342 * we're trying to restore an atomic state during system resume since
343 * there's a chance the connector may have been destroyed during the
344 * process, but it's better to ignore that then cause
345 * drm_atomic_helper_resume() to fail.
346 *
347 * Last, we want to ignore connector registration when the connector
348 * was not pulled in the atomic state by user-space (ie, was pulled
349 * in by the driver, e.g. when updating a DP-MST stream).
350 */
351 if (!state->duplicated && drm_connector_is_unregistered(connector) &&
352 added_by_user && crtc_state->active) {
353 drm_dbg_atomic(connector->dev,
354 "[CONNECTOR:%d:%s] is not registered\n",
355 connector->base.id, connector->name);
356 return -EINVAL;
357 }
358
359 funcs = connector->helper_private;
360
361 if (funcs->atomic_best_encoder)
362 new_encoder = funcs->atomic_best_encoder(connector, state);
363 else if (funcs->best_encoder)
364 new_encoder = funcs->best_encoder(connector);
365 else
366 new_encoder = drm_connector_get_single_encoder(connector);
367
368 if (!new_encoder) {
369 drm_dbg_atomic(connector->dev,
370 "No suitable encoder found for [CONNECTOR:%d:%s]\n",
371 connector->base.id, connector->name);
372 return -EINVAL;
373 }
374
375 if (!drm_encoder_crtc_ok(new_encoder, new_connector_state->crtc)) {
376 drm_dbg_atomic(connector->dev,
377 "[ENCODER:%d:%s] incompatible with [CRTC:%d:%s]\n",
378 new_encoder->base.id,
379 new_encoder->name,
380 new_connector_state->crtc->base.id,
381 new_connector_state->crtc->name);
382 return -EINVAL;
383 }
384
385 if (new_encoder == new_connector_state->best_encoder) {
386 set_best_encoder(state, new_connector_state, new_encoder);
387
388 drm_dbg_atomic(connector->dev,
389 "[CONNECTOR:%d:%s] keeps [ENCODER:%d:%s], now on [CRTC:%d:%s]\n",
390 connector->base.id,
391 connector->name,
392 new_encoder->base.id,
393 new_encoder->name,
394 new_connector_state->crtc->base.id,
395 new_connector_state->crtc->name);
396
397 return 0;
398 }
399
400 steal_encoder(state, new_encoder);
401
402 set_best_encoder(state, new_connector_state, new_encoder);
403
404 crtc_state->connectors_changed = true;
405
406 drm_dbg_atomic(connector->dev,
407 "[CONNECTOR:%d:%s] using [ENCODER:%d:%s] on [CRTC:%d:%s]\n",
408 connector->base.id,
409 connector->name,
410 new_encoder->base.id,
411 new_encoder->name,
412 new_connector_state->crtc->base.id,
413 new_connector_state->crtc->name);
414
415 return 0;
416 }
417
418 static int
mode_fixup(struct drm_atomic_state * state)419 mode_fixup(struct drm_atomic_state *state)
420 {
421 struct drm_crtc *crtc;
422 struct drm_crtc_state *new_crtc_state;
423 struct drm_connector *connector;
424 struct drm_connector_state *new_conn_state;
425 int i;
426 int ret;
427
428 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
429 if (!new_crtc_state->mode_changed &&
430 !new_crtc_state->connectors_changed)
431 continue;
432
433 drm_mode_copy(&new_crtc_state->adjusted_mode, &new_crtc_state->mode);
434 }
435
436 for_each_new_connector_in_state(state, connector, new_conn_state, i) {
437 const struct drm_encoder_helper_funcs *funcs;
438 struct drm_encoder *encoder;
439 struct drm_bridge *bridge;
440
441 WARN_ON(!!new_conn_state->best_encoder != !!new_conn_state->crtc);
442
443 if (!new_conn_state->crtc || !new_conn_state->best_encoder)
444 continue;
445
446 new_crtc_state =
447 drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
448
449 /*
450 * Each encoder has at most one connector (since we always steal
451 * it away), so we won't call ->mode_fixup twice.
452 */
453 encoder = new_conn_state->best_encoder;
454 funcs = encoder->helper_private;
455
456 bridge = drm_bridge_chain_get_first_bridge(encoder);
457 ret = drm_atomic_bridge_chain_check(bridge,
458 new_crtc_state,
459 new_conn_state);
460 drm_bridge_put(bridge);
461 if (ret) {
462 drm_dbg_atomic(encoder->dev, "Bridge atomic check failed\n");
463 return ret;
464 }
465
466 if (funcs && funcs->atomic_check) {
467 ret = funcs->atomic_check(encoder, new_crtc_state,
468 new_conn_state);
469 if (ret) {
470 drm_dbg_atomic(encoder->dev,
471 "[ENCODER:%d:%s] check failed\n",
472 encoder->base.id, encoder->name);
473 return ret;
474 }
475 } else if (funcs && funcs->mode_fixup) {
476 ret = funcs->mode_fixup(encoder, &new_crtc_state->mode,
477 &new_crtc_state->adjusted_mode);
478 if (!ret) {
479 drm_dbg_atomic(encoder->dev,
480 "[ENCODER:%d:%s] fixup failed\n",
481 encoder->base.id, encoder->name);
482 return -EINVAL;
483 }
484 }
485 }
486
487 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
488 const struct drm_crtc_helper_funcs *funcs;
489
490 if (!new_crtc_state->enable)
491 continue;
492
493 if (!new_crtc_state->mode_changed &&
494 !new_crtc_state->connectors_changed)
495 continue;
496
497 funcs = crtc->helper_private;
498 if (!funcs || !funcs->mode_fixup)
499 continue;
500
501 ret = funcs->mode_fixup(crtc, &new_crtc_state->mode,
502 &new_crtc_state->adjusted_mode);
503 if (!ret) {
504 drm_dbg_atomic(crtc->dev, "[CRTC:%d:%s] fixup failed\n",
505 crtc->base.id, crtc->name);
506 return -EINVAL;
507 }
508 }
509
510 return 0;
511 }
512
mode_valid_path(struct drm_connector * connector,struct drm_encoder * encoder,struct drm_crtc * crtc,const struct drm_display_mode * mode)513 static enum drm_mode_status mode_valid_path(struct drm_connector *connector,
514 struct drm_encoder *encoder,
515 struct drm_crtc *crtc,
516 const struct drm_display_mode *mode)
517 {
518 struct drm_bridge *bridge;
519 enum drm_mode_status ret;
520
521 ret = drm_encoder_mode_valid(encoder, mode);
522 if (ret != MODE_OK) {
523 drm_dbg_atomic(encoder->dev,
524 "[ENCODER:%d:%s] mode_valid() failed\n",
525 encoder->base.id, encoder->name);
526 return ret;
527 }
528
529 bridge = drm_bridge_chain_get_first_bridge(encoder);
530 ret = drm_bridge_chain_mode_valid(bridge, &connector->display_info,
531 mode);
532 drm_bridge_put(bridge);
533 if (ret != MODE_OK) {
534 drm_dbg_atomic(encoder->dev, "[BRIDGE] mode_valid() failed\n");
535 return ret;
536 }
537
538 ret = drm_crtc_mode_valid(crtc, mode);
539 if (ret != MODE_OK) {
540 drm_dbg_atomic(encoder->dev, "[CRTC:%d:%s] mode_valid() failed\n",
541 crtc->base.id, crtc->name);
542 return ret;
543 }
544
545 return ret;
546 }
547
548 static int
mode_valid(struct drm_atomic_state * state)549 mode_valid(struct drm_atomic_state *state)
550 {
551 struct drm_connector_state *conn_state;
552 struct drm_connector *connector;
553 int i;
554
555 for_each_new_connector_in_state(state, connector, conn_state, i) {
556 struct drm_encoder *encoder = conn_state->best_encoder;
557 struct drm_crtc *crtc = conn_state->crtc;
558 struct drm_crtc_state *crtc_state;
559 enum drm_mode_status mode_status;
560 const struct drm_display_mode *mode;
561
562 if (!crtc || !encoder)
563 continue;
564
565 crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
566 if (!crtc_state)
567 continue;
568 if (!crtc_state->mode_changed && !crtc_state->connectors_changed)
569 continue;
570
571 mode = &crtc_state->mode;
572
573 mode_status = mode_valid_path(connector, encoder, crtc, mode);
574 if (mode_status != MODE_OK)
575 return -EINVAL;
576 }
577
578 return 0;
579 }
580
drm_atomic_check_valid_clones(struct drm_atomic_state * state,struct drm_crtc * crtc)581 static int drm_atomic_check_valid_clones(struct drm_atomic_state *state,
582 struct drm_crtc *crtc)
583 {
584 struct drm_encoder *drm_enc;
585 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
586 crtc);
587
588 drm_for_each_encoder_mask(drm_enc, crtc->dev, crtc_state->encoder_mask) {
589 if (!drm_enc->possible_clones) {
590 DRM_DEBUG("enc%d possible_clones is 0\n", drm_enc->base.id);
591 continue;
592 }
593
594 if ((crtc_state->encoder_mask & drm_enc->possible_clones) !=
595 crtc_state->encoder_mask) {
596 DRM_DEBUG("crtc%d failed valid clone check for mask 0x%x\n",
597 crtc->base.id, crtc_state->encoder_mask);
598 return -EINVAL;
599 }
600 }
601
602 return 0;
603 }
604
605 /**
606 * drm_atomic_helper_check_modeset - validate state object for modeset changes
607 * @dev: DRM device
608 * @state: the driver state object
609 *
610 * Check the state object to see if the requested state is physically possible.
611 * This does all the CRTC and connector related computations for an atomic
612 * update and adds any additional connectors needed for full modesets. It calls
613 * the various per-object callbacks in the follow order:
614 *
615 * 1. &drm_connector_helper_funcs.atomic_best_encoder for determining the new encoder.
616 * 2. &drm_connector_helper_funcs.atomic_check to validate the connector state.
617 * 3. If it's determined a modeset is needed then all connectors on the affected
618 * CRTC are added and &drm_connector_helper_funcs.atomic_check is run on them.
619 * 4. &drm_encoder_helper_funcs.mode_valid, &drm_bridge_funcs.mode_valid and
620 * &drm_crtc_helper_funcs.mode_valid are called on the affected components.
621 * 5. &drm_bridge_funcs.mode_fixup is called on all encoder bridges.
622 * 6. &drm_encoder_helper_funcs.atomic_check is called to validate any encoder state.
623 * This function is only called when the encoder will be part of a configured CRTC,
624 * it must not be used for implementing connector property validation.
625 * If this function is NULL, &drm_atomic_encoder_helper_funcs.mode_fixup is called
626 * instead.
627 * 7. &drm_crtc_helper_funcs.mode_fixup is called last, to fix up the mode with CRTC constraints.
628 *
629 * &drm_crtc_state.mode_changed is set when the input mode is changed.
630 * &drm_crtc_state.connectors_changed is set when a connector is added or
631 * removed from the CRTC. &drm_crtc_state.active_changed is set when
632 * &drm_crtc_state.active changes, which is used for DPMS.
633 * &drm_crtc_state.no_vblank is set from the result of drm_dev_has_vblank().
634 * See also: drm_atomic_crtc_needs_modeset()
635 *
636 * IMPORTANT:
637 *
638 * Drivers which set &drm_crtc_state.mode_changed (e.g. in their
639 * &drm_plane_helper_funcs.atomic_check hooks if a plane update can't be done
640 * without a full modeset) _must_ call this function after that change. It is
641 * permitted to call this function multiple times for the same update, e.g.
642 * when the &drm_crtc_helper_funcs.atomic_check functions depend upon the
643 * adjusted dotclock for fifo space allocation and watermark computation.
644 *
645 * RETURNS:
646 * Zero for success or -errno
647 */
648 int
drm_atomic_helper_check_modeset(struct drm_device * dev,struct drm_atomic_state * state)649 drm_atomic_helper_check_modeset(struct drm_device *dev,
650 struct drm_atomic_state *state)
651 {
652 struct drm_crtc *crtc;
653 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
654 struct drm_connector *connector;
655 struct drm_connector_state *old_connector_state, *new_connector_state;
656 int i, ret;
657 unsigned int connectors_mask = 0, user_connectors_mask = 0;
658
659 for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i)
660 user_connectors_mask |= BIT(i);
661
662 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
663 bool has_connectors =
664 !!new_crtc_state->connector_mask;
665
666 WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
667
668 if (!drm_mode_equal(&old_crtc_state->mode, &new_crtc_state->mode)) {
669 drm_dbg_atomic(dev, "[CRTC:%d:%s] mode changed\n",
670 crtc->base.id, crtc->name);
671 new_crtc_state->mode_changed = true;
672 }
673
674 if (old_crtc_state->enable != new_crtc_state->enable) {
675 drm_dbg_atomic(dev, "[CRTC:%d:%s] enable changed\n",
676 crtc->base.id, crtc->name);
677
678 /*
679 * For clarity this assignment is done here, but
680 * enable == 0 is only true when there are no
681 * connectors and a NULL mode.
682 *
683 * The other way around is true as well. enable != 0
684 * implies that connectors are attached and a mode is set.
685 */
686 new_crtc_state->mode_changed = true;
687 new_crtc_state->connectors_changed = true;
688 }
689
690 if (old_crtc_state->active != new_crtc_state->active) {
691 drm_dbg_atomic(dev, "[CRTC:%d:%s] active changed\n",
692 crtc->base.id, crtc->name);
693 new_crtc_state->active_changed = true;
694 }
695
696 if (new_crtc_state->enable != has_connectors) {
697 drm_dbg_atomic(dev, "[CRTC:%d:%s] enabled/connectors mismatch (%d/%d)\n",
698 crtc->base.id, crtc->name,
699 new_crtc_state->enable, has_connectors);
700
701 return -EINVAL;
702 }
703
704 if (drm_dev_has_vblank(dev))
705 new_crtc_state->no_vblank = false;
706 else
707 new_crtc_state->no_vblank = true;
708 }
709
710 ret = handle_conflicting_encoders(state, false);
711 if (ret)
712 return ret;
713
714 for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) {
715 const struct drm_connector_helper_funcs *funcs = connector->helper_private;
716
717 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
718
719 /*
720 * This only sets crtc->connectors_changed for routing changes,
721 * drivers must set crtc->connectors_changed themselves when
722 * connector properties need to be updated.
723 */
724 ret = update_connector_routing(state, connector,
725 old_connector_state,
726 new_connector_state,
727 BIT(i) & user_connectors_mask);
728 if (ret)
729 return ret;
730 if (old_connector_state->crtc) {
731 new_crtc_state = drm_atomic_get_new_crtc_state(state,
732 old_connector_state->crtc);
733 if (old_connector_state->link_status !=
734 new_connector_state->link_status)
735 new_crtc_state->connectors_changed = true;
736
737 if (old_connector_state->max_requested_bpc !=
738 new_connector_state->max_requested_bpc)
739 new_crtc_state->connectors_changed = true;
740 }
741
742 if (funcs->atomic_check)
743 ret = funcs->atomic_check(connector, state);
744 if (ret) {
745 drm_dbg_atomic(dev,
746 "[CONNECTOR:%d:%s] driver check failed\n",
747 connector->base.id, connector->name);
748 return ret;
749 }
750
751 connectors_mask |= BIT(i);
752 }
753
754 /*
755 * After all the routing has been prepared we need to add in any
756 * connector which is itself unchanged, but whose CRTC changes its
757 * configuration. This must be done before calling mode_fixup in case a
758 * crtc only changed its mode but has the same set of connectors.
759 */
760 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
761 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
762 continue;
763
764 drm_dbg_atomic(dev,
765 "[CRTC:%d:%s] needs all connectors, enable: %c, active: %c\n",
766 crtc->base.id, crtc->name,
767 new_crtc_state->enable ? 'y' : 'n',
768 new_crtc_state->active ? 'y' : 'n');
769
770 ret = drm_atomic_add_affected_connectors(state, crtc);
771 if (ret != 0)
772 return ret;
773
774 ret = drm_atomic_add_affected_planes(state, crtc);
775 if (ret != 0)
776 return ret;
777
778 ret = drm_atomic_check_valid_clones(state, crtc);
779 if (ret != 0)
780 return ret;
781 }
782
783 /*
784 * Iterate over all connectors again, to make sure atomic_check()
785 * has been called on them when a modeset is forced.
786 */
787 for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) {
788 const struct drm_connector_helper_funcs *funcs = connector->helper_private;
789
790 if (connectors_mask & BIT(i))
791 continue;
792
793 if (funcs->atomic_check)
794 ret = funcs->atomic_check(connector, state);
795 if (ret) {
796 drm_dbg_atomic(dev,
797 "[CONNECTOR:%d:%s] driver check failed\n",
798 connector->base.id, connector->name);
799 return ret;
800 }
801 }
802
803 /*
804 * Iterate over all connectors again, and add all affected bridges to
805 * the state.
806 */
807 for_each_oldnew_connector_in_state(state, connector,
808 old_connector_state,
809 new_connector_state, i) {
810 struct drm_encoder *encoder;
811
812 encoder = old_connector_state->best_encoder;
813 ret = drm_atomic_add_encoder_bridges(state, encoder);
814 if (ret)
815 return ret;
816
817 encoder = new_connector_state->best_encoder;
818 ret = drm_atomic_add_encoder_bridges(state, encoder);
819 if (ret)
820 return ret;
821 }
822
823 ret = mode_valid(state);
824 if (ret)
825 return ret;
826
827 return mode_fixup(state);
828 }
829 EXPORT_SYMBOL(drm_atomic_helper_check_modeset);
830
831 /**
832 * drm_atomic_helper_check_wb_connector_state() - Check writeback connector state
833 * @connector: corresponding connector
834 * @state: the driver state object
835 *
836 * Checks if the writeback connector state is valid, and returns an error if it
837 * isn't.
838 *
839 * RETURNS:
840 * Zero for success or -errno
841 */
842 int
drm_atomic_helper_check_wb_connector_state(struct drm_connector * connector,struct drm_atomic_state * state)843 drm_atomic_helper_check_wb_connector_state(struct drm_connector *connector,
844 struct drm_atomic_state *state)
845 {
846 struct drm_connector_state *conn_state =
847 drm_atomic_get_new_connector_state(state, connector);
848 struct drm_writeback_job *wb_job = conn_state->writeback_job;
849 struct drm_property_blob *pixel_format_blob;
850 struct drm_framebuffer *fb;
851 size_t i, nformats;
852 u32 *formats;
853
854 if (!wb_job || !wb_job->fb)
855 return 0;
856
857 pixel_format_blob = wb_job->connector->pixel_formats_blob_ptr;
858 nformats = pixel_format_blob->length / sizeof(u32);
859 formats = pixel_format_blob->data;
860 fb = wb_job->fb;
861
862 for (i = 0; i < nformats; i++)
863 if (fb->format->format == formats[i])
864 return 0;
865
866 drm_dbg_kms(connector->dev, "Invalid pixel format %p4cc\n", &fb->format->format);
867
868 return -EINVAL;
869 }
870 EXPORT_SYMBOL(drm_atomic_helper_check_wb_connector_state);
871
872 /**
873 * drm_atomic_helper_check_plane_state() - Check plane state for validity
874 * @plane_state: plane state to check
875 * @crtc_state: CRTC state to check
876 * @min_scale: minimum @src:@dest scaling factor in 16.16 fixed point
877 * @max_scale: maximum @src:@dest scaling factor in 16.16 fixed point
878 * @can_position: is it legal to position the plane such that it
879 * doesn't cover the entire CRTC? This will generally
880 * only be false for primary planes.
881 * @can_update_disabled: can the plane be updated while the CRTC
882 * is disabled?
883 *
884 * Checks that a desired plane update is valid, and updates various
885 * bits of derived state (clipped coordinates etc.). Drivers that provide
886 * their own plane handling rather than helper-provided implementations may
887 * still wish to call this function to avoid duplication of error checking
888 * code.
889 *
890 * RETURNS:
891 * Zero if update appears valid, error code on failure
892 */
drm_atomic_helper_check_plane_state(struct drm_plane_state * plane_state,const struct drm_crtc_state * crtc_state,int min_scale,int max_scale,bool can_position,bool can_update_disabled)893 int drm_atomic_helper_check_plane_state(struct drm_plane_state *plane_state,
894 const struct drm_crtc_state *crtc_state,
895 int min_scale,
896 int max_scale,
897 bool can_position,
898 bool can_update_disabled)
899 {
900 struct drm_framebuffer *fb = plane_state->fb;
901 struct drm_rect *src = &plane_state->src;
902 struct drm_rect *dst = &plane_state->dst;
903 unsigned int rotation = plane_state->rotation;
904 struct drm_rect clip = {};
905 int hscale, vscale;
906
907 WARN_ON(plane_state->crtc && plane_state->crtc != crtc_state->crtc);
908
909 *src = drm_plane_state_src(plane_state);
910 *dst = drm_plane_state_dest(plane_state);
911
912 if (!fb) {
913 plane_state->visible = false;
914 return 0;
915 }
916
917 /* crtc should only be NULL when disabling (i.e., !fb) */
918 if (WARN_ON(!plane_state->crtc)) {
919 plane_state->visible = false;
920 return 0;
921 }
922
923 if (!crtc_state->enable && !can_update_disabled) {
924 drm_dbg_kms(plane_state->plane->dev,
925 "Cannot update plane of a disabled CRTC.\n");
926 return -EINVAL;
927 }
928
929 drm_rect_rotate(src, fb->width << 16, fb->height << 16, rotation);
930
931 /* Check scaling */
932 hscale = drm_rect_calc_hscale(src, dst, min_scale, max_scale);
933 vscale = drm_rect_calc_vscale(src, dst, min_scale, max_scale);
934 if (hscale < 0 || vscale < 0) {
935 drm_dbg_kms(plane_state->plane->dev,
936 "Invalid scaling of plane\n");
937 drm_rect_debug_print("src: ", &plane_state->src, true);
938 drm_rect_debug_print("dst: ", &plane_state->dst, false);
939 return -ERANGE;
940 }
941
942 if (crtc_state->enable)
943 drm_mode_get_hv_timing(&crtc_state->mode, &clip.x2, &clip.y2);
944
945 plane_state->visible = drm_rect_clip_scaled(src, dst, &clip);
946
947 drm_rect_rotate_inv(src, fb->width << 16, fb->height << 16, rotation);
948
949 if (!plane_state->visible)
950 /*
951 * Plane isn't visible; some drivers can handle this
952 * so we just return success here. Drivers that can't
953 * (including those that use the primary plane helper's
954 * update function) will return an error from their
955 * update_plane handler.
956 */
957 return 0;
958
959 if (!can_position && !drm_rect_equals(dst, &clip)) {
960 drm_dbg_kms(plane_state->plane->dev,
961 "Plane must cover entire CRTC\n");
962 drm_rect_debug_print("dst: ", dst, false);
963 drm_rect_debug_print("clip: ", &clip, false);
964 return -EINVAL;
965 }
966
967 return 0;
968 }
969 EXPORT_SYMBOL(drm_atomic_helper_check_plane_state);
970
971 /**
972 * drm_atomic_helper_check_crtc_primary_plane() - Check CRTC state for primary plane
973 * @crtc_state: CRTC state to check
974 *
975 * Checks that a CRTC has at least one primary plane attached to it, which is
976 * a requirement on some hardware. Note that this only involves the CRTC side
977 * of the test. To test if the primary plane is visible or if it can be updated
978 * without the CRTC being enabled, use drm_atomic_helper_check_plane_state() in
979 * the plane's atomic check.
980 *
981 * RETURNS:
982 * 0 if a primary plane is attached to the CRTC, or an error code otherwise
983 */
drm_atomic_helper_check_crtc_primary_plane(struct drm_crtc_state * crtc_state)984 int drm_atomic_helper_check_crtc_primary_plane(struct drm_crtc_state *crtc_state)
985 {
986 struct drm_crtc *crtc = crtc_state->crtc;
987 struct drm_device *dev = crtc->dev;
988 struct drm_plane *plane;
989
990 /* needs at least one primary plane to be enabled */
991 drm_for_each_plane_mask(plane, dev, crtc_state->plane_mask) {
992 if (plane->type == DRM_PLANE_TYPE_PRIMARY)
993 return 0;
994 }
995
996 drm_dbg_atomic(dev, "[CRTC:%d:%s] primary plane missing\n", crtc->base.id, crtc->name);
997
998 return -EINVAL;
999 }
1000 EXPORT_SYMBOL(drm_atomic_helper_check_crtc_primary_plane);
1001
1002 /**
1003 * drm_atomic_helper_check_planes - validate state object for planes changes
1004 * @dev: DRM device
1005 * @state: the driver state object
1006 *
1007 * Check the state object to see if the requested state is physically possible.
1008 * This does all the plane update related checks using by calling into the
1009 * &drm_crtc_helper_funcs.atomic_check and &drm_plane_helper_funcs.atomic_check
1010 * hooks provided by the driver.
1011 *
1012 * It also sets &drm_crtc_state.planes_changed to indicate that a CRTC has
1013 * updated planes.
1014 *
1015 * RETURNS:
1016 * Zero for success or -errno
1017 */
1018 int
drm_atomic_helper_check_planes(struct drm_device * dev,struct drm_atomic_state * state)1019 drm_atomic_helper_check_planes(struct drm_device *dev,
1020 struct drm_atomic_state *state)
1021 {
1022 struct drm_crtc *crtc;
1023 struct drm_crtc_state *new_crtc_state;
1024 struct drm_plane *plane;
1025 struct drm_plane_state *new_plane_state, *old_plane_state;
1026 int i, ret = 0;
1027
1028 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
1029 const struct drm_plane_helper_funcs *funcs;
1030
1031 WARN_ON(!drm_modeset_is_locked(&plane->mutex));
1032
1033 funcs = plane->helper_private;
1034
1035 drm_atomic_helper_plane_changed(state, old_plane_state, new_plane_state, plane);
1036
1037 drm_atomic_helper_check_plane_damage(state, new_plane_state);
1038
1039 if (!funcs || !funcs->atomic_check)
1040 continue;
1041
1042 ret = funcs->atomic_check(plane, state);
1043 if (ret) {
1044 drm_dbg_atomic(plane->dev,
1045 "[PLANE:%d:%s] atomic driver check failed\n",
1046 plane->base.id, plane->name);
1047 return ret;
1048 }
1049 }
1050
1051 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
1052 const struct drm_crtc_helper_funcs *funcs;
1053
1054 funcs = crtc->helper_private;
1055
1056 if (!funcs || !funcs->atomic_check)
1057 continue;
1058
1059 ret = funcs->atomic_check(crtc, state);
1060 if (ret) {
1061 drm_dbg_atomic(crtc->dev,
1062 "[CRTC:%d:%s] atomic driver check failed\n",
1063 crtc->base.id, crtc->name);
1064 return ret;
1065 }
1066 }
1067
1068 return ret;
1069 }
1070 EXPORT_SYMBOL(drm_atomic_helper_check_planes);
1071
1072 /**
1073 * drm_atomic_helper_check - validate state object
1074 * @dev: DRM device
1075 * @state: the driver state object
1076 *
1077 * Check the state object to see if the requested state is physically possible.
1078 * Only CRTCs and planes have check callbacks, so for any additional (global)
1079 * checking that a driver needs it can simply wrap that around this function.
1080 * Drivers without such needs can directly use this as their
1081 * &drm_mode_config_funcs.atomic_check callback.
1082 *
1083 * This just wraps the two parts of the state checking for planes and modeset
1084 * state in the default order: First it calls drm_atomic_helper_check_modeset()
1085 * and then drm_atomic_helper_check_planes(). The assumption is that the
1086 * @drm_plane_helper_funcs.atomic_check and @drm_crtc_helper_funcs.atomic_check
1087 * functions depend upon an updated adjusted_mode.clock to e.g. properly compute
1088 * watermarks.
1089 *
1090 * Note that zpos normalization will add all enable planes to the state which
1091 * might not desired for some drivers.
1092 * For example enable/disable of a cursor plane which have fixed zpos value
1093 * would trigger all other enabled planes to be forced to the state change.
1094 *
1095 * IMPORTANT:
1096 *
1097 * As this function calls drm_atomic_helper_check_modeset() internally, its
1098 * restrictions also apply:
1099 * Drivers which set &drm_crtc_state.mode_changed (e.g. in their
1100 * &drm_plane_helper_funcs.atomic_check hooks if a plane update can't be done
1101 * without a full modeset) _must_ call drm_atomic_helper_check_modeset()
1102 * function again after that change.
1103 *
1104 * RETURNS:
1105 * Zero for success or -errno
1106 */
drm_atomic_helper_check(struct drm_device * dev,struct drm_atomic_state * state)1107 int drm_atomic_helper_check(struct drm_device *dev,
1108 struct drm_atomic_state *state)
1109 {
1110 int ret;
1111
1112 ret = drm_atomic_helper_check_modeset(dev, state);
1113 if (ret)
1114 return ret;
1115
1116 if (dev->mode_config.normalize_zpos) {
1117 ret = drm_atomic_normalize_zpos(dev, state);
1118 if (ret)
1119 return ret;
1120 }
1121
1122 ret = drm_atomic_helper_check_planes(dev, state);
1123 if (ret)
1124 return ret;
1125
1126 if (state->legacy_cursor_update)
1127 state->async_update = !drm_atomic_helper_async_check(dev, state);
1128
1129 drm_self_refresh_helper_alter_state(state);
1130
1131 return ret;
1132 }
1133 EXPORT_SYMBOL(drm_atomic_helper_check);
1134
1135 static bool
crtc_needs_disable(struct drm_crtc_state * old_state,struct drm_crtc_state * new_state)1136 crtc_needs_disable(struct drm_crtc_state *old_state,
1137 struct drm_crtc_state *new_state)
1138 {
1139 /*
1140 * No new_state means the CRTC is off, so the only criteria is whether
1141 * it's currently active or in self refresh mode.
1142 */
1143 if (!new_state)
1144 return drm_atomic_crtc_effectively_active(old_state);
1145
1146 /*
1147 * We need to disable bridge(s) and CRTC if we're transitioning out of
1148 * self-refresh and changing CRTCs at the same time, because the
1149 * bridge tracks self-refresh status via CRTC state.
1150 */
1151 if (old_state->self_refresh_active &&
1152 old_state->crtc != new_state->crtc)
1153 return true;
1154
1155 /*
1156 * We also need to run through the crtc_funcs->disable() function if
1157 * the CRTC is currently on, if it's transitioning to self refresh
1158 * mode, or if it's in self refresh mode and needs to be fully
1159 * disabled.
1160 */
1161 return old_state->active ||
1162 (old_state->self_refresh_active && !new_state->active) ||
1163 new_state->self_refresh_active;
1164 }
1165
1166 /**
1167 * drm_atomic_helper_commit_encoder_bridge_disable - disable bridges and encoder
1168 * @dev: DRM device
1169 * @state: the driver state object
1170 *
1171 * Loops over all connectors in the current state and if the CRTC needs
1172 * it, disables the bridge chain all the way, then disables the encoder
1173 * afterwards.
1174 */
1175 void
drm_atomic_helper_commit_encoder_bridge_disable(struct drm_device * dev,struct drm_atomic_state * state)1176 drm_atomic_helper_commit_encoder_bridge_disable(struct drm_device *dev,
1177 struct drm_atomic_state *state)
1178 {
1179 struct drm_connector *connector;
1180 struct drm_connector_state *old_conn_state, *new_conn_state;
1181 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
1182 int i;
1183
1184 for_each_oldnew_connector_in_state(state, connector, old_conn_state, new_conn_state, i) {
1185 const struct drm_encoder_helper_funcs *funcs;
1186 struct drm_encoder *encoder;
1187 struct drm_bridge *bridge;
1188
1189 /*
1190 * Shut down everything that's in the changeset and currently
1191 * still on. So need to check the old, saved state.
1192 */
1193 if (!old_conn_state->crtc)
1194 continue;
1195
1196 old_crtc_state = drm_atomic_get_old_crtc_state(state, old_conn_state->crtc);
1197
1198 if (new_conn_state->crtc)
1199 new_crtc_state = drm_atomic_get_new_crtc_state(
1200 state,
1201 new_conn_state->crtc);
1202 else
1203 new_crtc_state = NULL;
1204
1205 if (!crtc_needs_disable(old_crtc_state, new_crtc_state) ||
1206 !drm_atomic_crtc_needs_modeset(old_conn_state->crtc->state))
1207 continue;
1208
1209 encoder = old_conn_state->best_encoder;
1210
1211 /* We shouldn't get this far if we didn't previously have
1212 * an encoder.. but WARN_ON() rather than explode.
1213 */
1214 if (WARN_ON(!encoder))
1215 continue;
1216
1217 funcs = encoder->helper_private;
1218
1219 drm_dbg_atomic(dev, "disabling [ENCODER:%d:%s]\n",
1220 encoder->base.id, encoder->name);
1221
1222 /*
1223 * Each encoder has at most one connector (since we always steal
1224 * it away), so we won't call disable hooks twice.
1225 */
1226 bridge = drm_bridge_chain_get_first_bridge(encoder);
1227 drm_atomic_bridge_chain_disable(bridge, state);
1228 drm_bridge_put(bridge);
1229
1230 /* Right function depends upon target state. */
1231 if (funcs) {
1232 if (funcs->atomic_disable)
1233 funcs->atomic_disable(encoder, state);
1234 else if (new_conn_state->crtc && funcs->prepare)
1235 funcs->prepare(encoder);
1236 else if (funcs->disable)
1237 funcs->disable(encoder);
1238 else if (funcs->dpms)
1239 funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
1240 }
1241 }
1242 }
1243 EXPORT_SYMBOL(drm_atomic_helper_commit_encoder_bridge_disable);
1244
1245 /**
1246 * drm_atomic_helper_commit_crtc_disable - disable CRTSs
1247 * @dev: DRM device
1248 * @state: the driver state object
1249 *
1250 * Loops over all CRTCs in the current state and if the CRTC needs
1251 * it, disables it.
1252 */
1253 void
drm_atomic_helper_commit_crtc_disable(struct drm_device * dev,struct drm_atomic_state * state)1254 drm_atomic_helper_commit_crtc_disable(struct drm_device *dev, struct drm_atomic_state *state)
1255 {
1256 struct drm_crtc *crtc;
1257 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
1258 int i;
1259
1260 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
1261 const struct drm_crtc_helper_funcs *funcs;
1262 int ret;
1263
1264 /* Shut down everything that needs a full modeset. */
1265 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
1266 continue;
1267
1268 if (!crtc_needs_disable(old_crtc_state, new_crtc_state))
1269 continue;
1270
1271 funcs = crtc->helper_private;
1272
1273 drm_dbg_atomic(dev, "disabling [CRTC:%d:%s]\n",
1274 crtc->base.id, crtc->name);
1275
1276
1277 /* Right function depends upon target state. */
1278 if (new_crtc_state->enable && funcs->prepare)
1279 funcs->prepare(crtc);
1280 else if (funcs->atomic_disable)
1281 funcs->atomic_disable(crtc, state);
1282 else if (funcs->disable)
1283 funcs->disable(crtc);
1284 else if (funcs->dpms)
1285 funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
1286
1287 if (!drm_dev_has_vblank(dev))
1288 continue;
1289
1290 ret = drm_crtc_vblank_get(crtc);
1291 /*
1292 * Self-refresh is not a true "disable"; ensure vblank remains
1293 * enabled.
1294 */
1295 if (new_crtc_state->self_refresh_active)
1296 WARN_ONCE(ret != 0,
1297 "driver disabled vblank in self-refresh\n");
1298 else
1299 WARN_ONCE(ret != -EINVAL,
1300 "driver forgot to call drm_crtc_vblank_off()\n");
1301 if (ret == 0)
1302 drm_crtc_vblank_put(crtc);
1303 }
1304 }
1305 EXPORT_SYMBOL(drm_atomic_helper_commit_crtc_disable);
1306
1307 /**
1308 * drm_atomic_helper_commit_encoder_bridge_post_disable - post-disable encoder bridges
1309 * @dev: DRM device
1310 * @state: the driver state object
1311 *
1312 * Loops over all connectors in the current state and if the CRTC needs
1313 * it, post-disables all encoder bridges.
1314 */
1315 void
drm_atomic_helper_commit_encoder_bridge_post_disable(struct drm_device * dev,struct drm_atomic_state * state)1316 drm_atomic_helper_commit_encoder_bridge_post_disable(struct drm_device *dev, struct drm_atomic_state *state)
1317 {
1318 struct drm_connector *connector;
1319 struct drm_connector_state *old_conn_state, *new_conn_state;
1320 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
1321 int i;
1322
1323 for_each_oldnew_connector_in_state(state, connector, old_conn_state, new_conn_state, i) {
1324 struct drm_encoder *encoder;
1325 struct drm_bridge *bridge;
1326
1327 /*
1328 * Shut down everything that's in the changeset and currently
1329 * still on. So need to check the old, saved state.
1330 */
1331 if (!old_conn_state->crtc)
1332 continue;
1333
1334 old_crtc_state = drm_atomic_get_old_crtc_state(state, old_conn_state->crtc);
1335
1336 if (new_conn_state->crtc)
1337 new_crtc_state = drm_atomic_get_new_crtc_state(state,
1338 new_conn_state->crtc);
1339 else
1340 new_crtc_state = NULL;
1341
1342 if (!crtc_needs_disable(old_crtc_state, new_crtc_state) ||
1343 !drm_atomic_crtc_needs_modeset(old_conn_state->crtc->state))
1344 continue;
1345
1346 encoder = old_conn_state->best_encoder;
1347
1348 /*
1349 * We shouldn't get this far if we didn't previously have
1350 * an encoder.. but WARN_ON() rather than explode.
1351 */
1352 if (WARN_ON(!encoder))
1353 continue;
1354
1355 drm_dbg_atomic(dev, "post-disabling bridges [ENCODER:%d:%s]\n",
1356 encoder->base.id, encoder->name);
1357
1358 /*
1359 * Each encoder has at most one connector (since we always steal
1360 * it away), so we won't call disable hooks twice.
1361 */
1362 bridge = drm_bridge_chain_get_first_bridge(encoder);
1363 drm_atomic_bridge_chain_post_disable(bridge, state);
1364 drm_bridge_put(bridge);
1365 }
1366 }
1367 EXPORT_SYMBOL(drm_atomic_helper_commit_encoder_bridge_post_disable);
1368
1369 static void
disable_outputs(struct drm_device * dev,struct drm_atomic_state * state)1370 disable_outputs(struct drm_device *dev, struct drm_atomic_state *state)
1371 {
1372 drm_atomic_helper_commit_encoder_bridge_disable(dev, state);
1373
1374 drm_atomic_helper_commit_encoder_bridge_post_disable(dev, state);
1375
1376 drm_atomic_helper_commit_crtc_disable(dev, state);
1377 }
1378
1379 /**
1380 * drm_atomic_helper_update_legacy_modeset_state - update legacy modeset state
1381 * @dev: DRM device
1382 * @state: atomic state object being committed
1383 *
1384 * This function updates all the various legacy modeset state pointers in
1385 * connectors, encoders and CRTCs.
1386 *
1387 * Drivers can use this for building their own atomic commit if they don't have
1388 * a pure helper-based modeset implementation.
1389 *
1390 * Since these updates are not synchronized with lockings, only code paths
1391 * called from &drm_mode_config_helper_funcs.atomic_commit_tail can look at the
1392 * legacy state filled out by this helper. Defacto this means this helper and
1393 * the legacy state pointers are only really useful for transitioning an
1394 * existing driver to the atomic world.
1395 */
1396 void
drm_atomic_helper_update_legacy_modeset_state(struct drm_device * dev,struct drm_atomic_state * state)1397 drm_atomic_helper_update_legacy_modeset_state(struct drm_device *dev,
1398 struct drm_atomic_state *state)
1399 {
1400 struct drm_connector *connector;
1401 struct drm_connector_state *old_conn_state, *new_conn_state;
1402 struct drm_crtc *crtc;
1403 struct drm_crtc_state *new_crtc_state;
1404 int i;
1405
1406 /* clear out existing links and update dpms */
1407 for_each_oldnew_connector_in_state(state, connector, old_conn_state, new_conn_state, i) {
1408 if (connector->encoder) {
1409 WARN_ON(!connector->encoder->crtc);
1410
1411 connector->encoder->crtc = NULL;
1412 connector->encoder = NULL;
1413 }
1414
1415 crtc = new_conn_state->crtc;
1416 if ((!crtc && old_conn_state->crtc) ||
1417 (crtc && drm_atomic_crtc_needs_modeset(crtc->state))) {
1418 int mode = DRM_MODE_DPMS_OFF;
1419
1420 if (crtc && crtc->state->active)
1421 mode = DRM_MODE_DPMS_ON;
1422
1423 connector->dpms = mode;
1424 }
1425 }
1426
1427 /* set new links */
1428 for_each_new_connector_in_state(state, connector, new_conn_state, i) {
1429 if (!new_conn_state->crtc)
1430 continue;
1431
1432 if (WARN_ON(!new_conn_state->best_encoder))
1433 continue;
1434
1435 connector->encoder = new_conn_state->best_encoder;
1436 connector->encoder->crtc = new_conn_state->crtc;
1437 }
1438
1439 /* set legacy state in the crtc structure */
1440 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
1441 struct drm_plane *primary = crtc->primary;
1442 struct drm_plane_state *new_plane_state;
1443
1444 crtc->mode = new_crtc_state->mode;
1445 crtc->enabled = new_crtc_state->enable;
1446
1447 new_plane_state =
1448 drm_atomic_get_new_plane_state(state, primary);
1449
1450 if (new_plane_state && new_plane_state->crtc == crtc) {
1451 crtc->x = new_plane_state->src_x >> 16;
1452 crtc->y = new_plane_state->src_y >> 16;
1453 }
1454 }
1455 }
1456 EXPORT_SYMBOL(drm_atomic_helper_update_legacy_modeset_state);
1457
1458 /**
1459 * drm_atomic_helper_calc_timestamping_constants - update vblank timestamping constants
1460 * @state: atomic state object
1461 *
1462 * Updates the timestamping constants used for precise vblank timestamps
1463 * by calling drm_calc_timestamping_constants() for all enabled crtcs in @state.
1464 */
drm_atomic_helper_calc_timestamping_constants(struct drm_atomic_state * state)1465 void drm_atomic_helper_calc_timestamping_constants(struct drm_atomic_state *state)
1466 {
1467 struct drm_crtc_state *new_crtc_state;
1468 struct drm_crtc *crtc;
1469 int i;
1470
1471 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
1472 if (new_crtc_state->enable)
1473 drm_calc_timestamping_constants(crtc,
1474 &new_crtc_state->adjusted_mode);
1475 }
1476 }
1477 EXPORT_SYMBOL(drm_atomic_helper_calc_timestamping_constants);
1478
1479 /**
1480 * drm_atomic_helper_commit_crtc_set_mode - set the new mode
1481 * @dev: DRM device
1482 * @state: the driver state object
1483 *
1484 * Loops over all connectors in the current state and if the mode has
1485 * changed, change the mode of the CRTC, then call down the bridge
1486 * chain and change the mode in all bridges as well.
1487 */
1488 void
drm_atomic_helper_commit_crtc_set_mode(struct drm_device * dev,struct drm_atomic_state * state)1489 drm_atomic_helper_commit_crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *state)
1490 {
1491 struct drm_crtc *crtc;
1492 struct drm_crtc_state *new_crtc_state;
1493 struct drm_connector *connector;
1494 struct drm_connector_state *new_conn_state;
1495 int i;
1496
1497 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
1498 const struct drm_crtc_helper_funcs *funcs;
1499
1500 if (!new_crtc_state->mode_changed)
1501 continue;
1502
1503 funcs = crtc->helper_private;
1504
1505 if (new_crtc_state->enable && funcs->mode_set_nofb) {
1506 drm_dbg_atomic(dev, "modeset on [CRTC:%d:%s]\n",
1507 crtc->base.id, crtc->name);
1508
1509 funcs->mode_set_nofb(crtc);
1510 }
1511 }
1512
1513 for_each_new_connector_in_state(state, connector, new_conn_state, i) {
1514 const struct drm_encoder_helper_funcs *funcs;
1515 struct drm_encoder *encoder;
1516 struct drm_display_mode *mode, *adjusted_mode;
1517 struct drm_bridge *bridge;
1518
1519 if (!new_conn_state->best_encoder)
1520 continue;
1521
1522 encoder = new_conn_state->best_encoder;
1523 funcs = encoder->helper_private;
1524 new_crtc_state = new_conn_state->crtc->state;
1525 mode = &new_crtc_state->mode;
1526 adjusted_mode = &new_crtc_state->adjusted_mode;
1527
1528 if (!new_crtc_state->mode_changed && !new_crtc_state->connectors_changed)
1529 continue;
1530
1531 drm_dbg_atomic(dev, "modeset on [ENCODER:%d:%s]\n",
1532 encoder->base.id, encoder->name);
1533
1534 /*
1535 * Each encoder has at most one connector (since we always steal
1536 * it away), so we won't call mode_set hooks twice.
1537 */
1538 if (funcs && funcs->atomic_mode_set) {
1539 funcs->atomic_mode_set(encoder, new_crtc_state,
1540 new_conn_state);
1541 } else if (funcs && funcs->mode_set) {
1542 funcs->mode_set(encoder, mode, adjusted_mode);
1543 }
1544
1545 bridge = drm_bridge_chain_get_first_bridge(encoder);
1546 drm_bridge_chain_mode_set(bridge, mode, adjusted_mode);
1547 drm_bridge_put(bridge);
1548 }
1549 }
1550 EXPORT_SYMBOL(drm_atomic_helper_commit_crtc_set_mode);
1551
1552 /**
1553 * drm_atomic_helper_commit_modeset_disables - modeset commit to disable outputs
1554 * @dev: DRM device
1555 * @state: atomic state object being committed
1556 *
1557 * This function shuts down all the outputs that need to be shut down and
1558 * prepares them (if required) with the new mode.
1559 *
1560 * For compatibility with legacy CRTC helpers this should be called before
1561 * drm_atomic_helper_commit_planes(), which is what the default commit function
1562 * does. But drivers with different needs can group the modeset commits together
1563 * and do the plane commits at the end. This is useful for drivers doing runtime
1564 * PM since planes updates then only happen when the CRTC is actually enabled.
1565 */
drm_atomic_helper_commit_modeset_disables(struct drm_device * dev,struct drm_atomic_state * state)1566 void drm_atomic_helper_commit_modeset_disables(struct drm_device *dev,
1567 struct drm_atomic_state *state)
1568 {
1569 disable_outputs(dev, state);
1570
1571 drm_atomic_helper_update_legacy_modeset_state(dev, state);
1572 drm_atomic_helper_calc_timestamping_constants(state);
1573
1574 drm_atomic_helper_commit_crtc_set_mode(dev, state);
1575 }
1576 EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_disables);
1577
1578 /**
1579 * drm_atomic_helper_commit_writebacks - issue writebacks
1580 * @dev: DRM device
1581 * @state: atomic state object being committed
1582 *
1583 * This loops over the connectors, checks if the new state requires
1584 * a writeback job to be issued and in that case issues an atomic
1585 * commit on each connector.
1586 */
drm_atomic_helper_commit_writebacks(struct drm_device * dev,struct drm_atomic_state * state)1587 void drm_atomic_helper_commit_writebacks(struct drm_device *dev,
1588 struct drm_atomic_state *state)
1589 {
1590 struct drm_connector *connector;
1591 struct drm_connector_state *new_conn_state;
1592 int i;
1593
1594 for_each_new_connector_in_state(state, connector, new_conn_state, i) {
1595 const struct drm_connector_helper_funcs *funcs;
1596
1597 funcs = connector->helper_private;
1598 if (!funcs->atomic_commit)
1599 continue;
1600
1601 if (new_conn_state->writeback_job && new_conn_state->writeback_job->fb) {
1602 WARN_ON(connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK);
1603 funcs->atomic_commit(connector, state);
1604 }
1605 }
1606 }
1607 EXPORT_SYMBOL(drm_atomic_helper_commit_writebacks);
1608
1609 /**
1610 * drm_atomic_helper_commit_encoder_bridge_pre_enable - pre-enable bridges
1611 * @dev: DRM device
1612 * @state: atomic state object being committed
1613 *
1614 * This loops over the connectors and if the CRTC needs it, pre-enables
1615 * the entire bridge chain.
1616 */
1617 void
drm_atomic_helper_commit_encoder_bridge_pre_enable(struct drm_device * dev,struct drm_atomic_state * state)1618 drm_atomic_helper_commit_encoder_bridge_pre_enable(struct drm_device *dev, struct drm_atomic_state *state)
1619 {
1620 struct drm_connector *connector;
1621 struct drm_connector_state *new_conn_state;
1622 int i;
1623
1624 for_each_new_connector_in_state(state, connector, new_conn_state, i) {
1625 struct drm_encoder *encoder;
1626 struct drm_bridge *bridge;
1627
1628 if (!new_conn_state->best_encoder)
1629 continue;
1630
1631 if (!new_conn_state->crtc->state->active ||
1632 !drm_atomic_crtc_needs_modeset(new_conn_state->crtc->state))
1633 continue;
1634
1635 encoder = new_conn_state->best_encoder;
1636
1637 drm_dbg_atomic(dev, "pre-enabling bridges [ENCODER:%d:%s]\n",
1638 encoder->base.id, encoder->name);
1639
1640 /*
1641 * Each encoder has at most one connector (since we always steal
1642 * it away), so we won't call enable hooks twice.
1643 */
1644 bridge = drm_bridge_chain_get_first_bridge(encoder);
1645 drm_atomic_bridge_chain_pre_enable(bridge, state);
1646 drm_bridge_put(bridge);
1647 }
1648 }
1649 EXPORT_SYMBOL(drm_atomic_helper_commit_encoder_bridge_pre_enable);
1650
1651 /**
1652 * drm_atomic_helper_commit_crtc_enable - enables the CRTCs
1653 * @dev: DRM device
1654 * @state: atomic state object being committed
1655 *
1656 * This loops over CRTCs in the new state, and of the CRTC needs
1657 * it, enables it.
1658 */
1659 void
drm_atomic_helper_commit_crtc_enable(struct drm_device * dev,struct drm_atomic_state * state)1660 drm_atomic_helper_commit_crtc_enable(struct drm_device *dev, struct drm_atomic_state *state)
1661 {
1662 struct drm_crtc *crtc;
1663 struct drm_crtc_state *old_crtc_state;
1664 struct drm_crtc_state *new_crtc_state;
1665 int i;
1666
1667 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
1668 const struct drm_crtc_helper_funcs *funcs;
1669
1670 /* Need to filter out CRTCs where only planes change. */
1671 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
1672 continue;
1673
1674 if (!new_crtc_state->active)
1675 continue;
1676
1677 funcs = crtc->helper_private;
1678
1679 if (new_crtc_state->enable) {
1680 drm_dbg_atomic(dev, "enabling [CRTC:%d:%s]\n",
1681 crtc->base.id, crtc->name);
1682 if (funcs->atomic_enable)
1683 funcs->atomic_enable(crtc, state);
1684 else if (funcs->commit)
1685 funcs->commit(crtc);
1686 }
1687 }
1688 }
1689 EXPORT_SYMBOL(drm_atomic_helper_commit_crtc_enable);
1690
1691 /**
1692 * drm_atomic_helper_commit_encoder_bridge_enable - enables the bridges
1693 * @dev: DRM device
1694 * @state: atomic state object being committed
1695 *
1696 * This loops over all connectors in the new state, and of the CRTC needs
1697 * it, enables the entire bridge chain.
1698 */
1699 void
drm_atomic_helper_commit_encoder_bridge_enable(struct drm_device * dev,struct drm_atomic_state * state)1700 drm_atomic_helper_commit_encoder_bridge_enable(struct drm_device *dev, struct drm_atomic_state *state)
1701 {
1702 struct drm_connector *connector;
1703 struct drm_connector_state *new_conn_state;
1704 int i;
1705
1706 for_each_new_connector_in_state(state, connector, new_conn_state, i) {
1707 const struct drm_encoder_helper_funcs *funcs;
1708 struct drm_encoder *encoder;
1709 struct drm_bridge *bridge;
1710
1711 if (!new_conn_state->best_encoder)
1712 continue;
1713
1714 if (!new_conn_state->crtc->state->active ||
1715 !drm_atomic_crtc_needs_modeset(new_conn_state->crtc->state))
1716 continue;
1717
1718 encoder = new_conn_state->best_encoder;
1719 funcs = encoder->helper_private;
1720
1721 drm_dbg_atomic(dev, "enabling [ENCODER:%d:%s]\n",
1722 encoder->base.id, encoder->name);
1723
1724 /*
1725 * Each encoder has at most one connector (since we always steal
1726 * it away), so we won't call enable hooks twice.
1727 */
1728 bridge = drm_bridge_chain_get_first_bridge(encoder);
1729
1730 if (funcs) {
1731 if (funcs->atomic_enable)
1732 funcs->atomic_enable(encoder, state);
1733 else if (funcs->enable)
1734 funcs->enable(encoder);
1735 else if (funcs->commit)
1736 funcs->commit(encoder);
1737 }
1738
1739 drm_atomic_bridge_chain_enable(bridge, state);
1740 drm_bridge_put(bridge);
1741 }
1742 }
1743 EXPORT_SYMBOL(drm_atomic_helper_commit_encoder_bridge_enable);
1744
1745 /**
1746 * drm_atomic_helper_commit_modeset_enables - modeset commit to enable outputs
1747 * @dev: DRM device
1748 * @state: atomic state object being committed
1749 *
1750 * This function enables all the outputs with the new configuration which had to
1751 * be turned off for the update.
1752 *
1753 * For compatibility with legacy CRTC helpers this should be called after
1754 * drm_atomic_helper_commit_planes(), which is what the default commit function
1755 * does. But drivers with different needs can group the modeset commits together
1756 * and do the plane commits at the end. This is useful for drivers doing runtime
1757 * PM since planes updates then only happen when the CRTC is actually enabled.
1758 */
drm_atomic_helper_commit_modeset_enables(struct drm_device * dev,struct drm_atomic_state * state)1759 void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
1760 struct drm_atomic_state *state)
1761 {
1762 drm_atomic_helper_commit_crtc_enable(dev, state);
1763
1764 drm_atomic_helper_commit_encoder_bridge_pre_enable(dev, state);
1765
1766 drm_atomic_helper_commit_encoder_bridge_enable(dev, state);
1767
1768 drm_atomic_helper_commit_writebacks(dev, state);
1769 }
1770 EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_enables);
1771
1772 /*
1773 * For atomic updates which touch just a single CRTC, calculate the time of the
1774 * next vblank, and inform all the fences of the deadline.
1775 */
set_fence_deadline(struct drm_device * dev,struct drm_atomic_state * state)1776 static void set_fence_deadline(struct drm_device *dev,
1777 struct drm_atomic_state *state)
1778 {
1779 struct drm_crtc *crtc;
1780 struct drm_crtc_state *new_crtc_state;
1781 struct drm_plane *plane;
1782 struct drm_plane_state *new_plane_state;
1783 ktime_t vbltime = 0;
1784 int i;
1785
1786 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
1787 ktime_t v;
1788
1789 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
1790 continue;
1791
1792 if (!new_crtc_state->active)
1793 continue;
1794
1795 if (drm_crtc_next_vblank_start(crtc, &v))
1796 continue;
1797
1798 if (!vbltime || ktime_before(v, vbltime))
1799 vbltime = v;
1800 }
1801
1802 /* If no CRTCs updated, then nothing to do: */
1803 if (!vbltime)
1804 return;
1805
1806 for_each_new_plane_in_state (state, plane, new_plane_state, i) {
1807 if (!new_plane_state->fence)
1808 continue;
1809 dma_fence_set_deadline(new_plane_state->fence, vbltime);
1810 }
1811 }
1812
1813 /**
1814 * drm_atomic_helper_wait_for_fences - wait for fences stashed in plane state
1815 * @dev: DRM device
1816 * @state: atomic state object with old state structures
1817 * @pre_swap: If true, do an interruptible wait, and @state is the new state.
1818 * Otherwise @state is the old state.
1819 *
1820 * For implicit sync, driver should fish the exclusive fence out from the
1821 * incoming fb's and stash it in the drm_plane_state. This is called after
1822 * drm_atomic_helper_swap_state() so it uses the current plane state (and
1823 * just uses the atomic state to find the changed planes)
1824 *
1825 * Note that @pre_swap is needed since the point where we block for fences moves
1826 * around depending upon whether an atomic commit is blocking or
1827 * non-blocking. For non-blocking commit all waiting needs to happen after
1828 * drm_atomic_helper_swap_state() is called, but for blocking commits we want
1829 * to wait **before** we do anything that can't be easily rolled back. That is
1830 * before we call drm_atomic_helper_swap_state().
1831 *
1832 * Returns zero if success or < 0 if dma_fence_wait() fails.
1833 */
drm_atomic_helper_wait_for_fences(struct drm_device * dev,struct drm_atomic_state * state,bool pre_swap)1834 int drm_atomic_helper_wait_for_fences(struct drm_device *dev,
1835 struct drm_atomic_state *state,
1836 bool pre_swap)
1837 {
1838 struct drm_plane *plane;
1839 struct drm_plane_state *new_plane_state;
1840 int i, ret;
1841
1842 set_fence_deadline(dev, state);
1843
1844 for_each_new_plane_in_state(state, plane, new_plane_state, i) {
1845 if (!new_plane_state->fence)
1846 continue;
1847
1848 WARN_ON(!new_plane_state->fb);
1849
1850 /*
1851 * If waiting for fences pre-swap (ie: nonblock), userspace can
1852 * still interrupt the operation. Instead of blocking until the
1853 * timer expires, make the wait interruptible.
1854 */
1855 ret = dma_fence_wait(new_plane_state->fence, pre_swap);
1856 if (ret)
1857 return ret;
1858
1859 dma_fence_put(new_plane_state->fence);
1860 new_plane_state->fence = NULL;
1861 }
1862
1863 return 0;
1864 }
1865 EXPORT_SYMBOL(drm_atomic_helper_wait_for_fences);
1866
1867 /**
1868 * drm_atomic_helper_wait_for_vblanks - wait for vblank on CRTCs
1869 * @dev: DRM device
1870 * @state: atomic state object being committed
1871 *
1872 * Helper to, after atomic commit, wait for vblanks on all affected
1873 * CRTCs (ie. before cleaning up old framebuffers using
1874 * drm_atomic_helper_cleanup_planes()). It will only wait on CRTCs where the
1875 * framebuffers have actually changed to optimize for the legacy cursor and
1876 * plane update use-case.
1877 *
1878 * Drivers using the nonblocking commit tracking support initialized by calling
1879 * drm_atomic_helper_setup_commit() should look at
1880 * drm_atomic_helper_wait_for_flip_done() as an alternative.
1881 */
1882 void
drm_atomic_helper_wait_for_vblanks(struct drm_device * dev,struct drm_atomic_state * state)1883 drm_atomic_helper_wait_for_vblanks(struct drm_device *dev,
1884 struct drm_atomic_state *state)
1885 {
1886 struct drm_crtc *crtc;
1887 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
1888 int i, ret;
1889 unsigned int crtc_mask = 0;
1890
1891 /*
1892 * Legacy cursor ioctls are completely unsynced, and userspace
1893 * relies on that (by doing tons of cursor updates).
1894 */
1895 if (state->legacy_cursor_update)
1896 return;
1897
1898 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
1899 if (!new_crtc_state->active)
1900 continue;
1901
1902 ret = drm_crtc_vblank_get(crtc);
1903 if (ret != 0)
1904 continue;
1905
1906 crtc_mask |= drm_crtc_mask(crtc);
1907 state->crtcs[i].last_vblank_count = drm_crtc_vblank_count(crtc);
1908 }
1909
1910 for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) {
1911 wait_queue_head_t *queue = drm_crtc_vblank_waitqueue(crtc);
1912
1913 if (!(crtc_mask & drm_crtc_mask(crtc)))
1914 continue;
1915
1916 ret = wait_event_timeout(*queue,
1917 state->crtcs[i].last_vblank_count !=
1918 drm_crtc_vblank_count(crtc),
1919 msecs_to_jiffies(100));
1920
1921 WARN(!ret, "[CRTC:%d:%s] vblank wait timed out\n",
1922 crtc->base.id, crtc->name);
1923
1924 drm_crtc_vblank_put(crtc);
1925 }
1926 }
1927 EXPORT_SYMBOL(drm_atomic_helper_wait_for_vblanks);
1928
1929 /**
1930 * drm_atomic_helper_wait_for_flip_done - wait for all page flips to be done
1931 * @dev: DRM device
1932 * @state: atomic state object being committed
1933 *
1934 * Helper to, after atomic commit, wait for page flips on all affected
1935 * crtcs (ie. before cleaning up old framebuffers using
1936 * drm_atomic_helper_cleanup_planes()). Compared to
1937 * drm_atomic_helper_wait_for_vblanks() this waits for the completion on all
1938 * CRTCs, assuming that cursors-only updates are signalling their completion
1939 * immediately (or using a different path).
1940 *
1941 * This requires that drivers use the nonblocking commit tracking support
1942 * initialized using drm_atomic_helper_setup_commit().
1943 */
drm_atomic_helper_wait_for_flip_done(struct drm_device * dev,struct drm_atomic_state * state)1944 void drm_atomic_helper_wait_for_flip_done(struct drm_device *dev,
1945 struct drm_atomic_state *state)
1946 {
1947 struct drm_crtc *crtc;
1948 int i;
1949
1950 for (i = 0; i < dev->mode_config.num_crtc; i++) {
1951 struct drm_crtc_commit *commit = state->crtcs[i].commit;
1952 int ret;
1953
1954 crtc = state->crtcs[i].ptr;
1955
1956 if (!crtc || !commit)
1957 continue;
1958
1959 ret = wait_for_completion_timeout(&commit->flip_done, 10 * HZ);
1960 if (ret == 0)
1961 drm_err(dev, "[CRTC:%d:%s] flip_done timed out\n",
1962 crtc->base.id, crtc->name);
1963 }
1964
1965 if (state->fake_commit)
1966 complete_all(&state->fake_commit->flip_done);
1967 }
1968 EXPORT_SYMBOL(drm_atomic_helper_wait_for_flip_done);
1969
1970 /**
1971 * drm_atomic_helper_commit_tail - commit atomic update to hardware
1972 * @state: atomic state object being committed
1973 *
1974 * This is the default implementation for the
1975 * &drm_mode_config_helper_funcs.atomic_commit_tail hook, for drivers
1976 * that do not support runtime_pm or do not need the CRTC to be
1977 * enabled to perform a commit. Otherwise, see
1978 * drm_atomic_helper_commit_tail_rpm().
1979 *
1980 * Note that the default ordering of how the various stages are called is to
1981 * match the legacy modeset helper library closest.
1982 */
drm_atomic_helper_commit_tail(struct drm_atomic_state * state)1983 void drm_atomic_helper_commit_tail(struct drm_atomic_state *state)
1984 {
1985 struct drm_device *dev = state->dev;
1986
1987 drm_atomic_helper_commit_modeset_disables(dev, state);
1988
1989 drm_atomic_helper_commit_planes(dev, state, 0);
1990
1991 drm_atomic_helper_commit_modeset_enables(dev, state);
1992
1993 drm_atomic_helper_fake_vblank(state);
1994
1995 drm_atomic_helper_commit_hw_done(state);
1996
1997 drm_atomic_helper_wait_for_vblanks(dev, state);
1998
1999 drm_atomic_helper_cleanup_planes(dev, state);
2000 }
2001 EXPORT_SYMBOL(drm_atomic_helper_commit_tail);
2002
2003 /**
2004 * drm_atomic_helper_commit_tail_rpm - commit atomic update to hardware
2005 * @state: new modeset state to be committed
2006 *
2007 * This is an alternative implementation for the
2008 * &drm_mode_config_helper_funcs.atomic_commit_tail hook, for drivers
2009 * that support runtime_pm or need the CRTC to be enabled to perform a
2010 * commit. Otherwise, one should use the default implementation
2011 * drm_atomic_helper_commit_tail().
2012 */
drm_atomic_helper_commit_tail_rpm(struct drm_atomic_state * state)2013 void drm_atomic_helper_commit_tail_rpm(struct drm_atomic_state *state)
2014 {
2015 struct drm_device *dev = state->dev;
2016
2017 drm_atomic_helper_commit_modeset_disables(dev, state);
2018
2019 drm_atomic_helper_commit_modeset_enables(dev, state);
2020
2021 drm_atomic_helper_commit_planes(dev, state,
2022 DRM_PLANE_COMMIT_ACTIVE_ONLY);
2023
2024 drm_atomic_helper_fake_vblank(state);
2025
2026 drm_atomic_helper_commit_hw_done(state);
2027
2028 drm_atomic_helper_wait_for_vblanks(dev, state);
2029
2030 drm_atomic_helper_cleanup_planes(dev, state);
2031 }
2032 EXPORT_SYMBOL(drm_atomic_helper_commit_tail_rpm);
2033
commit_tail(struct drm_atomic_state * state)2034 static void commit_tail(struct drm_atomic_state *state)
2035 {
2036 struct drm_device *dev = state->dev;
2037 const struct drm_mode_config_helper_funcs *funcs;
2038 struct drm_crtc_state *new_crtc_state;
2039 struct drm_crtc *crtc;
2040 ktime_t start;
2041 s64 commit_time_ms;
2042 unsigned int i, new_self_refresh_mask = 0;
2043
2044 funcs = dev->mode_config.helper_private;
2045
2046 /*
2047 * We're measuring the _entire_ commit, so the time will vary depending
2048 * on how many fences and objects are involved. For the purposes of self
2049 * refresh, this is desirable since it'll give us an idea of how
2050 * congested things are. This will inform our decision on how often we
2051 * should enter self refresh after idle.
2052 *
2053 * These times will be averaged out in the self refresh helpers to avoid
2054 * overreacting over one outlier frame
2055 */
2056 start = ktime_get();
2057
2058 drm_atomic_helper_wait_for_fences(dev, state, false);
2059
2060 drm_atomic_helper_wait_for_dependencies(state);
2061
2062 /*
2063 * We cannot safely access new_crtc_state after
2064 * drm_atomic_helper_commit_hw_done() so figure out which crtc's have
2065 * self-refresh active beforehand:
2066 */
2067 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i)
2068 if (new_crtc_state->self_refresh_active)
2069 new_self_refresh_mask |= BIT(i);
2070
2071 if (funcs && funcs->atomic_commit_tail)
2072 funcs->atomic_commit_tail(state);
2073 else
2074 drm_atomic_helper_commit_tail(state);
2075
2076 commit_time_ms = ktime_ms_delta(ktime_get(), start);
2077 if (commit_time_ms > 0)
2078 drm_self_refresh_helper_update_avg_times(state,
2079 (unsigned long)commit_time_ms,
2080 new_self_refresh_mask);
2081
2082 drm_atomic_helper_commit_cleanup_done(state);
2083
2084 drm_atomic_state_put(state);
2085 }
2086
commit_work(struct work_struct * work)2087 static void commit_work(struct work_struct *work)
2088 {
2089 struct drm_atomic_state *state = container_of(work,
2090 struct drm_atomic_state,
2091 commit_work);
2092 commit_tail(state);
2093 }
2094
2095 /**
2096 * drm_atomic_helper_async_check - check if state can be committed asynchronously
2097 * @dev: DRM device
2098 * @state: the driver state object
2099 *
2100 * This helper will check if it is possible to commit the state asynchronously.
2101 * Async commits are not supposed to swap the states like normal sync commits
2102 * but just do in-place changes on the current state.
2103 *
2104 * It will return 0 if the commit can happen in an asynchronous fashion or error
2105 * if not. Note that error just mean it can't be committed asynchronously, if it
2106 * fails the commit should be treated like a normal synchronous commit.
2107 */
drm_atomic_helper_async_check(struct drm_device * dev,struct drm_atomic_state * state)2108 int drm_atomic_helper_async_check(struct drm_device *dev,
2109 struct drm_atomic_state *state)
2110 {
2111 struct drm_crtc *crtc;
2112 struct drm_crtc_state *crtc_state;
2113 struct drm_plane *plane = NULL;
2114 struct drm_plane_state *old_plane_state = NULL;
2115 struct drm_plane_state *new_plane_state = NULL;
2116 const struct drm_plane_helper_funcs *funcs;
2117 int i, ret, n_planes = 0;
2118
2119 for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
2120 if (drm_atomic_crtc_needs_modeset(crtc_state))
2121 return -EINVAL;
2122 }
2123
2124 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i)
2125 n_planes++;
2126
2127 /* FIXME: we support only single plane updates for now */
2128 if (n_planes != 1) {
2129 drm_dbg_atomic(dev,
2130 "only single plane async updates are supported\n");
2131 return -EINVAL;
2132 }
2133
2134 if (!new_plane_state->crtc ||
2135 old_plane_state->crtc != new_plane_state->crtc) {
2136 drm_dbg_atomic(dev,
2137 "[PLANE:%d:%s] async update cannot change CRTC\n",
2138 plane->base.id, plane->name);
2139 return -EINVAL;
2140 }
2141
2142 funcs = plane->helper_private;
2143 if (!funcs->atomic_async_update) {
2144 drm_dbg_atomic(dev,
2145 "[PLANE:%d:%s] driver does not support async updates\n",
2146 plane->base.id, plane->name);
2147 return -EINVAL;
2148 }
2149
2150 if (new_plane_state->fence) {
2151 drm_dbg_atomic(dev,
2152 "[PLANE:%d:%s] missing fence for async update\n",
2153 plane->base.id, plane->name);
2154 return -EINVAL;
2155 }
2156
2157 /*
2158 * Don't do an async update if there is an outstanding commit modifying
2159 * the plane. This prevents our async update's changes from getting
2160 * overridden by a previous synchronous update's state.
2161 */
2162 if (old_plane_state->commit &&
2163 !try_wait_for_completion(&old_plane_state->commit->hw_done)) {
2164 drm_dbg_atomic(dev,
2165 "[PLANE:%d:%s] inflight previous commit preventing async commit\n",
2166 plane->base.id, plane->name);
2167 return -EBUSY;
2168 }
2169
2170 ret = funcs->atomic_async_check(plane, state, false);
2171 if (ret != 0)
2172 drm_dbg_atomic(dev,
2173 "[PLANE:%d:%s] driver async check failed\n",
2174 plane->base.id, plane->name);
2175 return ret;
2176 }
2177 EXPORT_SYMBOL(drm_atomic_helper_async_check);
2178
2179 /**
2180 * drm_atomic_helper_async_commit - commit state asynchronously
2181 * @dev: DRM device
2182 * @state: the driver state object
2183 *
2184 * This function commits a state asynchronously, i.e., not vblank
2185 * synchronized. It should be used on a state only when
2186 * drm_atomic_async_check() succeeds. Async commits are not supposed to swap
2187 * the states like normal sync commits, but just do in-place changes on the
2188 * current state.
2189 *
2190 * TODO: Implement full swap instead of doing in-place changes.
2191 */
drm_atomic_helper_async_commit(struct drm_device * dev,struct drm_atomic_state * state)2192 void drm_atomic_helper_async_commit(struct drm_device *dev,
2193 struct drm_atomic_state *state)
2194 {
2195 struct drm_plane *plane;
2196 struct drm_plane_state *plane_state;
2197 const struct drm_plane_helper_funcs *funcs;
2198 int i;
2199
2200 for_each_new_plane_in_state(state, plane, plane_state, i) {
2201 struct drm_framebuffer *new_fb = plane_state->fb;
2202 struct drm_framebuffer *old_fb = plane->state->fb;
2203
2204 funcs = plane->helper_private;
2205 funcs->atomic_async_update(plane, state);
2206
2207 /*
2208 * ->atomic_async_update() is supposed to update the
2209 * plane->state in-place, make sure at least common
2210 * properties have been properly updated.
2211 */
2212 WARN_ON_ONCE(plane->state->fb != new_fb);
2213 WARN_ON_ONCE(plane->state->crtc_x != plane_state->crtc_x);
2214 WARN_ON_ONCE(plane->state->crtc_y != plane_state->crtc_y);
2215 WARN_ON_ONCE(plane->state->src_x != plane_state->src_x);
2216 WARN_ON_ONCE(plane->state->src_y != plane_state->src_y);
2217
2218 /*
2219 * Make sure the FBs have been swapped so that cleanups in the
2220 * new_state performs a cleanup in the old FB.
2221 */
2222 WARN_ON_ONCE(plane_state->fb != old_fb);
2223 }
2224 }
2225 EXPORT_SYMBOL(drm_atomic_helper_async_commit);
2226
2227 /**
2228 * drm_atomic_helper_commit - commit validated state object
2229 * @dev: DRM device
2230 * @state: the driver state object
2231 * @nonblock: whether nonblocking behavior is requested.
2232 *
2233 * This function commits a with drm_atomic_helper_check() pre-validated state
2234 * object. This can still fail when e.g. the framebuffer reservation fails. This
2235 * function implements nonblocking commits, using
2236 * drm_atomic_helper_setup_commit() and related functions.
2237 *
2238 * Committing the actual hardware state is done through the
2239 * &drm_mode_config_helper_funcs.atomic_commit_tail callback, or its default
2240 * implementation drm_atomic_helper_commit_tail().
2241 *
2242 * RETURNS:
2243 * Zero for success or -errno.
2244 */
drm_atomic_helper_commit(struct drm_device * dev,struct drm_atomic_state * state,bool nonblock)2245 int drm_atomic_helper_commit(struct drm_device *dev,
2246 struct drm_atomic_state *state,
2247 bool nonblock)
2248 {
2249 int ret;
2250
2251 if (state->async_update) {
2252 ret = drm_atomic_helper_prepare_planes(dev, state);
2253 if (ret)
2254 return ret;
2255
2256 drm_atomic_helper_async_commit(dev, state);
2257 drm_atomic_helper_unprepare_planes(dev, state);
2258
2259 return 0;
2260 }
2261
2262 ret = drm_atomic_helper_setup_commit(state, nonblock);
2263 if (ret)
2264 return ret;
2265
2266 INIT_WORK(&state->commit_work, commit_work);
2267
2268 ret = drm_atomic_helper_prepare_planes(dev, state);
2269 if (ret)
2270 return ret;
2271
2272 if (!nonblock) {
2273 ret = drm_atomic_helper_wait_for_fences(dev, state, true);
2274 if (ret)
2275 goto err;
2276 }
2277
2278 /*
2279 * This is the point of no return - everything below never fails except
2280 * when the hw goes bonghits. Which means we can commit the new state on
2281 * the software side now.
2282 */
2283
2284 ret = drm_atomic_helper_swap_state(state, true);
2285 if (ret)
2286 goto err;
2287
2288 /*
2289 * Everything below can be run asynchronously without the need to grab
2290 * any modeset locks at all under one condition: It must be guaranteed
2291 * that the asynchronous work has either been cancelled (if the driver
2292 * supports it, which at least requires that the framebuffers get
2293 * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
2294 * before the new state gets committed on the software side with
2295 * drm_atomic_helper_swap_state().
2296 *
2297 * This scheme allows new atomic state updates to be prepared and
2298 * checked in parallel to the asynchronous completion of the previous
2299 * update. Which is important since compositors need to figure out the
2300 * composition of the next frame right after having submitted the
2301 * current layout.
2302 *
2303 * NOTE: Commit work has multiple phases, first hardware commit, then
2304 * cleanup. We want them to overlap, hence need system_unbound_wq to
2305 * make sure work items don't artificially stall on each another.
2306 */
2307
2308 drm_atomic_state_get(state);
2309 if (nonblock)
2310 queue_work(system_unbound_wq, &state->commit_work);
2311 else
2312 commit_tail(state);
2313
2314 return 0;
2315
2316 err:
2317 drm_atomic_helper_unprepare_planes(dev, state);
2318 return ret;
2319 }
2320 EXPORT_SYMBOL(drm_atomic_helper_commit);
2321
2322 /**
2323 * DOC: implementing nonblocking commit
2324 *
2325 * Nonblocking atomic commits should use struct &drm_crtc_commit to sequence
2326 * different operations against each another. Locks, especially struct
2327 * &drm_modeset_lock, should not be held in worker threads or any other
2328 * asynchronous context used to commit the hardware state.
2329 *
2330 * drm_atomic_helper_commit() implements the recommended sequence for
2331 * nonblocking commits, using drm_atomic_helper_setup_commit() internally:
2332 *
2333 * 1. Run drm_atomic_helper_prepare_planes(). Since this can fail and we
2334 * need to propagate out of memory/VRAM errors to userspace, it must be called
2335 * synchronously.
2336 *
2337 * 2. Synchronize with any outstanding nonblocking commit worker threads which
2338 * might be affected by the new state update. This is handled by
2339 * drm_atomic_helper_setup_commit().
2340 *
2341 * Asynchronous workers need to have sufficient parallelism to be able to run
2342 * different atomic commits on different CRTCs in parallel. The simplest way to
2343 * achieve this is by running them on the &system_unbound_wq work queue. Note
2344 * that drivers are not required to split up atomic commits and run an
2345 * individual commit in parallel - userspace is supposed to do that if it cares.
2346 * But it might be beneficial to do that for modesets, since those necessarily
2347 * must be done as one global operation, and enabling or disabling a CRTC can
2348 * take a long time. But even that is not required.
2349 *
2350 * IMPORTANT: A &drm_atomic_state update for multiple CRTCs is sequenced
2351 * against all CRTCs therein. Therefore for atomic state updates which only flip
2352 * planes the driver must not get the struct &drm_crtc_state of unrelated CRTCs
2353 * in its atomic check code: This would prevent committing of atomic updates to
2354 * multiple CRTCs in parallel. In general, adding additional state structures
2355 * should be avoided as much as possible, because this reduces parallelism in
2356 * (nonblocking) commits, both due to locking and due to commit sequencing
2357 * requirements.
2358 *
2359 * 3. The software state is updated synchronously with
2360 * drm_atomic_helper_swap_state(). Doing this under the protection of all modeset
2361 * locks means concurrent callers never see inconsistent state. Note that commit
2362 * workers do not hold any locks; their access is only coordinated through
2363 * ordering. If workers would access state only through the pointers in the
2364 * free-standing state objects (currently not the case for any driver) then even
2365 * multiple pending commits could be in-flight at the same time.
2366 *
2367 * 4. Schedule a work item to do all subsequent steps, using the split-out
2368 * commit helpers: a) pre-plane commit b) plane commit c) post-plane commit and
2369 * then cleaning up the framebuffers after the old framebuffer is no longer
2370 * being displayed. The scheduled work should synchronize against other workers
2371 * using the &drm_crtc_commit infrastructure as needed. See
2372 * drm_atomic_helper_setup_commit() for more details.
2373 */
2374
stall_checks(struct drm_crtc * crtc,bool nonblock)2375 static int stall_checks(struct drm_crtc *crtc, bool nonblock)
2376 {
2377 struct drm_crtc_commit *commit, *stall_commit = NULL;
2378 bool completed = true;
2379 int i;
2380 long ret = 0;
2381
2382 spin_lock(&crtc->commit_lock);
2383 i = 0;
2384 list_for_each_entry(commit, &crtc->commit_list, commit_entry) {
2385 if (i == 0) {
2386 completed = try_wait_for_completion(&commit->flip_done);
2387 /*
2388 * Userspace is not allowed to get ahead of the previous
2389 * commit with nonblocking ones.
2390 */
2391 if (!completed && nonblock) {
2392 spin_unlock(&crtc->commit_lock);
2393 drm_dbg_atomic(crtc->dev,
2394 "[CRTC:%d:%s] busy with a previous commit\n",
2395 crtc->base.id, crtc->name);
2396
2397 return -EBUSY;
2398 }
2399 } else if (i == 1) {
2400 stall_commit = drm_crtc_commit_get(commit);
2401 break;
2402 }
2403
2404 i++;
2405 }
2406 spin_unlock(&crtc->commit_lock);
2407
2408 if (!stall_commit)
2409 return 0;
2410
2411 /* We don't want to let commits get ahead of cleanup work too much,
2412 * stalling on 2nd previous commit means triple-buffer won't ever stall.
2413 */
2414 ret = wait_for_completion_interruptible_timeout(&stall_commit->cleanup_done,
2415 10*HZ);
2416 if (ret == 0)
2417 drm_err(crtc->dev, "[CRTC:%d:%s] cleanup_done timed out\n",
2418 crtc->base.id, crtc->name);
2419
2420 drm_crtc_commit_put(stall_commit);
2421
2422 return ret < 0 ? ret : 0;
2423 }
2424
release_crtc_commit(struct completion * completion)2425 static void release_crtc_commit(struct completion *completion)
2426 {
2427 struct drm_crtc_commit *commit = container_of(completion,
2428 typeof(*commit),
2429 flip_done);
2430
2431 drm_crtc_commit_put(commit);
2432 }
2433
init_commit(struct drm_crtc_commit * commit,struct drm_crtc * crtc)2434 static void init_commit(struct drm_crtc_commit *commit, struct drm_crtc *crtc)
2435 {
2436 init_completion(&commit->flip_done);
2437 init_completion(&commit->hw_done);
2438 init_completion(&commit->cleanup_done);
2439 INIT_LIST_HEAD(&commit->commit_entry);
2440 kref_init(&commit->ref);
2441 commit->crtc = crtc;
2442 }
2443
2444 static struct drm_crtc_commit *
crtc_or_fake_commit(struct drm_atomic_state * state,struct drm_crtc * crtc)2445 crtc_or_fake_commit(struct drm_atomic_state *state, struct drm_crtc *crtc)
2446 {
2447 if (crtc) {
2448 struct drm_crtc_state *new_crtc_state;
2449
2450 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
2451
2452 return new_crtc_state->commit;
2453 }
2454
2455 if (!state->fake_commit) {
2456 state->fake_commit = kzalloc_obj(*state->fake_commit,
2457 GFP_KERNEL);
2458 if (!state->fake_commit)
2459 return NULL;
2460
2461 init_commit(state->fake_commit, NULL);
2462 }
2463
2464 return state->fake_commit;
2465 }
2466
2467 /**
2468 * drm_atomic_helper_setup_commit - setup possibly nonblocking commit
2469 * @state: new modeset state to be committed
2470 * @nonblock: whether nonblocking behavior is requested.
2471 *
2472 * This function prepares @state to be used by the atomic helper's support for
2473 * nonblocking commits. Drivers using the nonblocking commit infrastructure
2474 * should always call this function from their
2475 * &drm_mode_config_funcs.atomic_commit hook.
2476 *
2477 * Drivers that need to extend the commit setup to private objects can use the
2478 * &drm_mode_config_helper_funcs.atomic_commit_setup hook.
2479 *
2480 * To be able to use this support drivers need to use a few more helper
2481 * functions. drm_atomic_helper_wait_for_dependencies() must be called before
2482 * actually committing the hardware state, and for nonblocking commits this call
2483 * must be placed in the async worker. See also drm_atomic_helper_swap_state()
2484 * and its stall parameter, for when a driver's commit hooks look at the
2485 * &drm_crtc.state, &drm_plane.state or &drm_connector.state pointer directly.
2486 *
2487 * Completion of the hardware commit step must be signalled using
2488 * drm_atomic_helper_commit_hw_done(). After this step the driver is not allowed
2489 * to read or change any permanent software or hardware modeset state. The only
2490 * exception is state protected by other means than &drm_modeset_lock locks.
2491 * Only the free standing @state with pointers to the old state structures can
2492 * be inspected, e.g. to clean up old buffers using
2493 * drm_atomic_helper_cleanup_planes().
2494 *
2495 * At the very end, before cleaning up @state drivers must call
2496 * drm_atomic_helper_commit_cleanup_done().
2497 *
2498 * This is all implemented by in drm_atomic_helper_commit(), giving drivers a
2499 * complete and easy-to-use default implementation of the atomic_commit() hook.
2500 *
2501 * The tracking of asynchronously executed and still pending commits is done
2502 * using the core structure &drm_crtc_commit.
2503 *
2504 * By default there's no need to clean up resources allocated by this function
2505 * explicitly: drm_atomic_state_default_clear() will take care of that
2506 * automatically.
2507 *
2508 * Returns:
2509 * 0 on success. -EBUSY when userspace schedules nonblocking commits too fast,
2510 * -ENOMEM on allocation failures and -EINTR when a signal is pending.
2511 */
drm_atomic_helper_setup_commit(struct drm_atomic_state * state,bool nonblock)2512 int drm_atomic_helper_setup_commit(struct drm_atomic_state *state,
2513 bool nonblock)
2514 {
2515 struct drm_crtc *crtc;
2516 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
2517 struct drm_connector *conn;
2518 struct drm_connector_state *old_conn_state, *new_conn_state;
2519 struct drm_plane *plane;
2520 struct drm_plane_state *old_plane_state, *new_plane_state;
2521 struct drm_crtc_commit *commit;
2522 const struct drm_mode_config_helper_funcs *funcs;
2523 int i, ret;
2524
2525 funcs = state->dev->mode_config.helper_private;
2526
2527 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
2528 commit = kzalloc_obj(*commit, GFP_KERNEL);
2529 if (!commit)
2530 return -ENOMEM;
2531
2532 init_commit(commit, crtc);
2533
2534 new_crtc_state->commit = commit;
2535
2536 ret = stall_checks(crtc, nonblock);
2537 if (ret)
2538 return ret;
2539
2540 /*
2541 * Drivers only send out events when at least either current or
2542 * new CRTC state is active. Complete right away if everything
2543 * stays off.
2544 */
2545 if (!old_crtc_state->active && !new_crtc_state->active) {
2546 complete_all(&commit->flip_done);
2547 continue;
2548 }
2549
2550 /* Legacy cursor updates are fully unsynced. */
2551 if (state->legacy_cursor_update) {
2552 complete_all(&commit->flip_done);
2553 continue;
2554 }
2555
2556 if (!new_crtc_state->event) {
2557 commit->event = kzalloc_obj(*commit->event, GFP_KERNEL);
2558 if (!commit->event)
2559 return -ENOMEM;
2560
2561 new_crtc_state->event = commit->event;
2562 }
2563
2564 new_crtc_state->event->base.completion = &commit->flip_done;
2565 new_crtc_state->event->base.completion_release = release_crtc_commit;
2566 drm_crtc_commit_get(commit);
2567
2568 commit->abort_completion = true;
2569
2570 state->crtcs[i].commit = commit;
2571 drm_crtc_commit_get(commit);
2572 }
2573
2574 for_each_oldnew_connector_in_state(state, conn, old_conn_state, new_conn_state, i) {
2575 /*
2576 * Userspace is not allowed to get ahead of the previous
2577 * commit with nonblocking ones.
2578 */
2579 if (nonblock && old_conn_state->commit &&
2580 !try_wait_for_completion(&old_conn_state->commit->flip_done)) {
2581 drm_dbg_atomic(conn->dev,
2582 "[CONNECTOR:%d:%s] busy with a previous commit\n",
2583 conn->base.id, conn->name);
2584
2585 return -EBUSY;
2586 }
2587
2588 /* Always track connectors explicitly for e.g. link retraining. */
2589 commit = crtc_or_fake_commit(state, new_conn_state->crtc ?: old_conn_state->crtc);
2590 if (!commit)
2591 return -ENOMEM;
2592
2593 new_conn_state->commit = drm_crtc_commit_get(commit);
2594 }
2595
2596 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
2597 /*
2598 * Userspace is not allowed to get ahead of the previous
2599 * commit with nonblocking ones.
2600 */
2601 if (nonblock && old_plane_state->commit &&
2602 !try_wait_for_completion(&old_plane_state->commit->flip_done)) {
2603 drm_dbg_atomic(plane->dev,
2604 "[PLANE:%d:%s] busy with a previous commit\n",
2605 plane->base.id, plane->name);
2606
2607 return -EBUSY;
2608 }
2609
2610 /* Always track planes explicitly for async pageflip support. */
2611 commit = crtc_or_fake_commit(state, new_plane_state->crtc ?: old_plane_state->crtc);
2612 if (!commit)
2613 return -ENOMEM;
2614
2615 new_plane_state->commit = drm_crtc_commit_get(commit);
2616 }
2617
2618 if (funcs && funcs->atomic_commit_setup)
2619 return funcs->atomic_commit_setup(state);
2620
2621 return 0;
2622 }
2623 EXPORT_SYMBOL(drm_atomic_helper_setup_commit);
2624
2625 /**
2626 * drm_atomic_helper_wait_for_dependencies - wait for required preceding commits
2627 * @state: atomic state object being committed
2628 *
2629 * This function waits for all preceding commits that touch the same CRTC as
2630 * @state to both be committed to the hardware (as signalled by
2631 * drm_atomic_helper_commit_hw_done()) and executed by the hardware (as signalled
2632 * by calling drm_crtc_send_vblank_event() on the &drm_crtc_state.event).
2633 *
2634 * This is part of the atomic helper support for nonblocking commits, see
2635 * drm_atomic_helper_setup_commit() for an overview.
2636 */
drm_atomic_helper_wait_for_dependencies(struct drm_atomic_state * state)2637 void drm_atomic_helper_wait_for_dependencies(struct drm_atomic_state *state)
2638 {
2639 struct drm_crtc *crtc;
2640 struct drm_crtc_state *old_crtc_state;
2641 struct drm_plane *plane;
2642 struct drm_plane_state *old_plane_state;
2643 struct drm_connector *conn;
2644 struct drm_connector_state *old_conn_state;
2645 int i;
2646 long ret;
2647
2648 for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) {
2649 ret = drm_crtc_commit_wait(old_crtc_state->commit);
2650 if (ret)
2651 drm_err(crtc->dev,
2652 "[CRTC:%d:%s] commit wait timed out\n",
2653 crtc->base.id, crtc->name);
2654 }
2655
2656 for_each_old_connector_in_state(state, conn, old_conn_state, i) {
2657 ret = drm_crtc_commit_wait(old_conn_state->commit);
2658 if (ret)
2659 drm_err(conn->dev,
2660 "[CONNECTOR:%d:%s] commit wait timed out\n",
2661 conn->base.id, conn->name);
2662 }
2663
2664 for_each_old_plane_in_state(state, plane, old_plane_state, i) {
2665 ret = drm_crtc_commit_wait(old_plane_state->commit);
2666 if (ret)
2667 drm_err(plane->dev,
2668 "[PLANE:%d:%s] commit wait timed out\n",
2669 plane->base.id, plane->name);
2670 }
2671 }
2672 EXPORT_SYMBOL(drm_atomic_helper_wait_for_dependencies);
2673
2674 /**
2675 * drm_atomic_helper_fake_vblank - fake VBLANK events if needed
2676 * @state: atomic state object being committed
2677 *
2678 * This function walks all CRTCs and fakes VBLANK events on those with
2679 * &drm_crtc_state.no_vblank set to true and &drm_crtc_state.event != NULL.
2680 * The primary use of this function is writeback connectors working in oneshot
2681 * mode and faking VBLANK events. In this case they only fake the VBLANK event
2682 * when a job is queued, and any change to the pipeline that does not touch the
2683 * connector is leading to timeouts when calling
2684 * drm_atomic_helper_wait_for_vblanks() or
2685 * drm_atomic_helper_wait_for_flip_done(). In addition to writeback
2686 * connectors, this function can also fake VBLANK events for CRTCs without
2687 * VBLANK interrupt.
2688 *
2689 * This is part of the atomic helper support for nonblocking commits, see
2690 * drm_atomic_helper_setup_commit() for an overview.
2691 */
drm_atomic_helper_fake_vblank(struct drm_atomic_state * state)2692 void drm_atomic_helper_fake_vblank(struct drm_atomic_state *state)
2693 {
2694 struct drm_crtc_state *new_crtc_state;
2695 struct drm_crtc *crtc;
2696 int i;
2697
2698 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
2699 unsigned long flags;
2700
2701 if (!new_crtc_state->no_vblank)
2702 continue;
2703
2704 spin_lock_irqsave(&state->dev->event_lock, flags);
2705 if (new_crtc_state->event) {
2706 drm_crtc_send_vblank_event(crtc,
2707 new_crtc_state->event);
2708 new_crtc_state->event = NULL;
2709 }
2710 spin_unlock_irqrestore(&state->dev->event_lock, flags);
2711 }
2712 }
2713 EXPORT_SYMBOL(drm_atomic_helper_fake_vblank);
2714
2715 /**
2716 * drm_atomic_helper_commit_hw_done - setup possible nonblocking commit
2717 * @state: atomic state object being committed
2718 *
2719 * This function is used to signal completion of the hardware commit step. After
2720 * this step the driver is not allowed to read or change any permanent software
2721 * or hardware modeset state. The only exception is state protected by other
2722 * means than &drm_modeset_lock locks.
2723 *
2724 * Drivers should try to postpone any expensive or delayed cleanup work after
2725 * this function is called.
2726 *
2727 * This is part of the atomic helper support for nonblocking commits, see
2728 * drm_atomic_helper_setup_commit() for an overview.
2729 */
drm_atomic_helper_commit_hw_done(struct drm_atomic_state * state)2730 void drm_atomic_helper_commit_hw_done(struct drm_atomic_state *state)
2731 {
2732 struct drm_crtc *crtc;
2733 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
2734 struct drm_crtc_commit *commit;
2735 int i;
2736
2737 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
2738 commit = new_crtc_state->commit;
2739 if (!commit)
2740 continue;
2741
2742 /*
2743 * copy new_crtc_state->commit to old_crtc_state->commit,
2744 * it's unsafe to touch new_crtc_state after hw_done,
2745 * but we still need to do so in cleanup_done().
2746 */
2747 if (old_crtc_state->commit)
2748 drm_crtc_commit_put(old_crtc_state->commit);
2749
2750 old_crtc_state->commit = drm_crtc_commit_get(commit);
2751
2752 /* backend must have consumed any event by now */
2753 WARN_ON(new_crtc_state->event);
2754 complete_all(&commit->hw_done);
2755 }
2756
2757 if (state->fake_commit) {
2758 complete_all(&state->fake_commit->hw_done);
2759 complete_all(&state->fake_commit->flip_done);
2760 }
2761 }
2762 EXPORT_SYMBOL(drm_atomic_helper_commit_hw_done);
2763
2764 /**
2765 * drm_atomic_helper_commit_cleanup_done - signal completion of commit
2766 * @state: atomic state object being committed
2767 *
2768 * This signals completion of the atomic update @state, including any
2769 * cleanup work. If used, it must be called right before calling
2770 * drm_atomic_state_put().
2771 *
2772 * This is part of the atomic helper support for nonblocking commits, see
2773 * drm_atomic_helper_setup_commit() for an overview.
2774 */
drm_atomic_helper_commit_cleanup_done(struct drm_atomic_state * state)2775 void drm_atomic_helper_commit_cleanup_done(struct drm_atomic_state *state)
2776 {
2777 struct drm_crtc *crtc;
2778 struct drm_crtc_state *old_crtc_state;
2779 struct drm_crtc_commit *commit;
2780 int i;
2781
2782 for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) {
2783 commit = old_crtc_state->commit;
2784 if (WARN_ON(!commit))
2785 continue;
2786
2787 complete_all(&commit->cleanup_done);
2788 WARN_ON(!try_wait_for_completion(&commit->hw_done));
2789
2790 spin_lock(&crtc->commit_lock);
2791 list_del(&commit->commit_entry);
2792 spin_unlock(&crtc->commit_lock);
2793 }
2794
2795 if (state->fake_commit) {
2796 complete_all(&state->fake_commit->cleanup_done);
2797 WARN_ON(!try_wait_for_completion(&state->fake_commit->hw_done));
2798 }
2799 }
2800 EXPORT_SYMBOL(drm_atomic_helper_commit_cleanup_done);
2801
2802 /**
2803 * drm_atomic_helper_prepare_planes - prepare plane resources before commit
2804 * @dev: DRM device
2805 * @state: atomic state object with new state structures
2806 *
2807 * This function prepares plane state, specifically framebuffers, for the new
2808 * configuration, by calling &drm_plane_helper_funcs.prepare_fb. If any failure
2809 * is encountered this function will call &drm_plane_helper_funcs.cleanup_fb on
2810 * any already successfully prepared framebuffer.
2811 *
2812 * Returns:
2813 * 0 on success, negative error code on failure.
2814 */
drm_atomic_helper_prepare_planes(struct drm_device * dev,struct drm_atomic_state * state)2815 int drm_atomic_helper_prepare_planes(struct drm_device *dev,
2816 struct drm_atomic_state *state)
2817 {
2818 struct drm_connector *connector;
2819 struct drm_connector_state *new_conn_state;
2820 struct drm_plane *plane;
2821 struct drm_plane_state *new_plane_state;
2822 int ret, i, j;
2823
2824 for_each_new_connector_in_state(state, connector, new_conn_state, i) {
2825 if (!new_conn_state->writeback_job)
2826 continue;
2827
2828 ret = drm_writeback_prepare_job(new_conn_state->writeback_job);
2829 if (ret < 0)
2830 return ret;
2831 }
2832
2833 for_each_new_plane_in_state(state, plane, new_plane_state, i) {
2834 const struct drm_plane_helper_funcs *funcs;
2835
2836 funcs = plane->helper_private;
2837
2838 if (funcs->prepare_fb) {
2839 ret = funcs->prepare_fb(plane, new_plane_state);
2840 if (ret)
2841 goto fail_prepare_fb;
2842 } else {
2843 WARN_ON_ONCE(funcs->cleanup_fb);
2844
2845 if (!drm_core_check_feature(dev, DRIVER_GEM))
2846 continue;
2847
2848 ret = drm_gem_plane_helper_prepare_fb(plane, new_plane_state);
2849 if (ret)
2850 goto fail_prepare_fb;
2851 }
2852 }
2853
2854 for_each_new_plane_in_state(state, plane, new_plane_state, i) {
2855 const struct drm_plane_helper_funcs *funcs = plane->helper_private;
2856
2857 if (funcs->begin_fb_access) {
2858 ret = funcs->begin_fb_access(plane, new_plane_state);
2859 if (ret)
2860 goto fail_begin_fb_access;
2861 }
2862 }
2863
2864 return 0;
2865
2866 fail_begin_fb_access:
2867 for_each_new_plane_in_state(state, plane, new_plane_state, j) {
2868 const struct drm_plane_helper_funcs *funcs = plane->helper_private;
2869
2870 if (j >= i)
2871 continue;
2872
2873 if (funcs->end_fb_access)
2874 funcs->end_fb_access(plane, new_plane_state);
2875 }
2876 i = j; /* set i to upper limit to cleanup all planes */
2877 fail_prepare_fb:
2878 for_each_new_plane_in_state(state, plane, new_plane_state, j) {
2879 const struct drm_plane_helper_funcs *funcs;
2880
2881 if (j >= i)
2882 continue;
2883
2884 funcs = plane->helper_private;
2885
2886 if (funcs->cleanup_fb)
2887 funcs->cleanup_fb(plane, new_plane_state);
2888 }
2889
2890 return ret;
2891 }
2892 EXPORT_SYMBOL(drm_atomic_helper_prepare_planes);
2893
2894 /**
2895 * drm_atomic_helper_unprepare_planes - release plane resources on aborts
2896 * @dev: DRM device
2897 * @state: atomic state object with old state structures
2898 *
2899 * This function cleans up plane state, specifically framebuffers, from the
2900 * atomic state. It undoes the effects of drm_atomic_helper_prepare_planes()
2901 * when aborting an atomic commit. For cleaning up after a successful commit
2902 * use drm_atomic_helper_cleanup_planes().
2903 */
drm_atomic_helper_unprepare_planes(struct drm_device * dev,struct drm_atomic_state * state)2904 void drm_atomic_helper_unprepare_planes(struct drm_device *dev,
2905 struct drm_atomic_state *state)
2906 {
2907 struct drm_plane *plane;
2908 struct drm_plane_state *new_plane_state;
2909 int i;
2910
2911 for_each_new_plane_in_state(state, plane, new_plane_state, i) {
2912 const struct drm_plane_helper_funcs *funcs = plane->helper_private;
2913
2914 if (funcs->end_fb_access)
2915 funcs->end_fb_access(plane, new_plane_state);
2916 }
2917
2918 for_each_new_plane_in_state(state, plane, new_plane_state, i) {
2919 const struct drm_plane_helper_funcs *funcs = plane->helper_private;
2920
2921 if (funcs->cleanup_fb)
2922 funcs->cleanup_fb(plane, new_plane_state);
2923 }
2924 }
2925 EXPORT_SYMBOL(drm_atomic_helper_unprepare_planes);
2926
plane_crtc_active(const struct drm_plane_state * state)2927 static bool plane_crtc_active(const struct drm_plane_state *state)
2928 {
2929 return state->crtc && state->crtc->state->active;
2930 }
2931
2932 /**
2933 * drm_atomic_helper_commit_planes - commit plane state
2934 * @dev: DRM device
2935 * @state: atomic state object being committed
2936 * @flags: flags for committing plane state
2937 *
2938 * This function commits the new plane state using the plane and atomic helper
2939 * functions for planes and CRTCs. It assumes that the atomic state has already
2940 * been pushed into the relevant object state pointers, since this step can no
2941 * longer fail.
2942 *
2943 * It still requires the global state object @state to know which planes and
2944 * crtcs need to be updated though.
2945 *
2946 * Note that this function does all plane updates across all CRTCs in one step.
2947 * If the hardware can't support this approach look at
2948 * drm_atomic_helper_commit_planes_on_crtc() instead.
2949 *
2950 * Plane parameters can be updated by applications while the associated CRTC is
2951 * disabled. The DRM/KMS core will store the parameters in the plane state,
2952 * which will be available to the driver when the CRTC is turned on. As a result
2953 * most drivers don't need to be immediately notified of plane updates for a
2954 * disabled CRTC.
2955 *
2956 * Unless otherwise needed, drivers are advised to set the ACTIVE_ONLY flag in
2957 * @flags in order not to receive plane update notifications related to a
2958 * disabled CRTC. This avoids the need to manually ignore plane updates in
2959 * driver code when the driver and/or hardware can't or just don't need to deal
2960 * with updates on disabled CRTCs, for example when supporting runtime PM.
2961 *
2962 * Drivers may set the NO_DISABLE_AFTER_MODESET flag in @flags if the relevant
2963 * display controllers require to disable a CRTC's planes when the CRTC is
2964 * disabled. This function would skip the &drm_plane_helper_funcs.atomic_disable
2965 * call for a plane if the CRTC of the old plane state needs a modesetting
2966 * operation. Of course, the drivers need to disable the planes in their CRTC
2967 * disable callbacks since no one else would do that.
2968 *
2969 * The drm_atomic_helper_commit() default implementation doesn't set the
2970 * ACTIVE_ONLY flag to most closely match the behaviour of the legacy helpers.
2971 * This should not be copied blindly by drivers.
2972 */
drm_atomic_helper_commit_planes(struct drm_device * dev,struct drm_atomic_state * state,uint32_t flags)2973 void drm_atomic_helper_commit_planes(struct drm_device *dev,
2974 struct drm_atomic_state *state,
2975 uint32_t flags)
2976 {
2977 struct drm_crtc *crtc;
2978 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
2979 struct drm_plane *plane;
2980 struct drm_plane_state *old_plane_state, *new_plane_state;
2981 int i;
2982 bool active_only = flags & DRM_PLANE_COMMIT_ACTIVE_ONLY;
2983 bool no_disable = flags & DRM_PLANE_COMMIT_NO_DISABLE_AFTER_MODESET;
2984
2985 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
2986 const struct drm_crtc_helper_funcs *funcs;
2987
2988 funcs = crtc->helper_private;
2989
2990 if (!funcs || !funcs->atomic_begin)
2991 continue;
2992
2993 if (active_only && !new_crtc_state->active)
2994 continue;
2995
2996 funcs->atomic_begin(crtc, state);
2997 }
2998
2999 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
3000 const struct drm_plane_helper_funcs *funcs;
3001 bool disabling;
3002
3003 funcs = plane->helper_private;
3004
3005 if (!funcs)
3006 continue;
3007
3008 disabling = drm_atomic_plane_disabling(old_plane_state,
3009 new_plane_state);
3010
3011 if (active_only) {
3012 /*
3013 * Skip planes related to inactive CRTCs. If the plane
3014 * is enabled use the state of the current CRTC. If the
3015 * plane is being disabled use the state of the old
3016 * CRTC to avoid skipping planes being disabled on an
3017 * active CRTC.
3018 */
3019 if (!disabling && !plane_crtc_active(new_plane_state))
3020 continue;
3021 if (disabling && !plane_crtc_active(old_plane_state))
3022 continue;
3023 }
3024
3025 /*
3026 * Special-case disabling the plane if drivers support it.
3027 */
3028 if (disabling && funcs->atomic_disable) {
3029 struct drm_crtc_state *crtc_state;
3030
3031 crtc_state = old_plane_state->crtc->state;
3032
3033 if (drm_atomic_crtc_needs_modeset(crtc_state) &&
3034 no_disable)
3035 continue;
3036
3037 funcs->atomic_disable(plane, state);
3038 } else if (new_plane_state->crtc || disabling) {
3039 funcs->atomic_update(plane, state);
3040
3041 if (!disabling && funcs->atomic_enable) {
3042 if (drm_atomic_plane_enabling(old_plane_state, new_plane_state))
3043 funcs->atomic_enable(plane, state);
3044 }
3045 }
3046 }
3047
3048 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
3049 const struct drm_crtc_helper_funcs *funcs;
3050
3051 funcs = crtc->helper_private;
3052
3053 if (!funcs || !funcs->atomic_flush)
3054 continue;
3055
3056 if (active_only && !new_crtc_state->active)
3057 continue;
3058
3059 funcs->atomic_flush(crtc, state);
3060 }
3061
3062 /*
3063 * Signal end of framebuffer access here before hw_done. After hw_done,
3064 * a later commit might have already released the plane state.
3065 */
3066 for_each_old_plane_in_state(state, plane, old_plane_state, i) {
3067 const struct drm_plane_helper_funcs *funcs = plane->helper_private;
3068
3069 if (funcs->end_fb_access)
3070 funcs->end_fb_access(plane, old_plane_state);
3071 }
3072 }
3073 EXPORT_SYMBOL(drm_atomic_helper_commit_planes);
3074
3075 /**
3076 * drm_atomic_helper_commit_planes_on_crtc - commit plane state for a CRTC
3077 * @old_crtc_state: atomic state object with the old CRTC state
3078 *
3079 * This function commits the new plane state using the plane and atomic helper
3080 * functions for planes on the specific CRTC. It assumes that the atomic state
3081 * has already been pushed into the relevant object state pointers, since this
3082 * step can no longer fail.
3083 *
3084 * This function is useful when plane updates should be done CRTC-by-CRTC
3085 * instead of one global step like drm_atomic_helper_commit_planes() does.
3086 *
3087 * This function can only be savely used when planes are not allowed to move
3088 * between different CRTCs because this function doesn't handle inter-CRTC
3089 * dependencies. Callers need to ensure that either no such dependencies exist,
3090 * resolve them through ordering of commit calls or through some other means.
3091 */
3092 void
drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state * old_crtc_state)3093 drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state)
3094 {
3095 const struct drm_crtc_helper_funcs *crtc_funcs;
3096 struct drm_crtc *crtc = old_crtc_state->crtc;
3097 struct drm_atomic_state *old_state = old_crtc_state->state;
3098 struct drm_crtc_state *new_crtc_state =
3099 drm_atomic_get_new_crtc_state(old_state, crtc);
3100 struct drm_plane *plane;
3101 unsigned int plane_mask;
3102
3103 plane_mask = old_crtc_state->plane_mask;
3104 plane_mask |= new_crtc_state->plane_mask;
3105
3106 crtc_funcs = crtc->helper_private;
3107 if (crtc_funcs && crtc_funcs->atomic_begin)
3108 crtc_funcs->atomic_begin(crtc, old_state);
3109
3110 drm_for_each_plane_mask(plane, crtc->dev, plane_mask) {
3111 struct drm_plane_state *old_plane_state =
3112 drm_atomic_get_old_plane_state(old_state, plane);
3113 struct drm_plane_state *new_plane_state =
3114 drm_atomic_get_new_plane_state(old_state, plane);
3115 const struct drm_plane_helper_funcs *plane_funcs;
3116 bool disabling;
3117
3118 plane_funcs = plane->helper_private;
3119
3120 if (!old_plane_state || !plane_funcs)
3121 continue;
3122
3123 WARN_ON(new_plane_state->crtc &&
3124 new_plane_state->crtc != crtc);
3125
3126 disabling = drm_atomic_plane_disabling(old_plane_state, new_plane_state);
3127
3128 if (disabling && plane_funcs->atomic_disable) {
3129 plane_funcs->atomic_disable(plane, old_state);
3130 } else if (new_plane_state->crtc || disabling) {
3131 plane_funcs->atomic_update(plane, old_state);
3132
3133 if (!disabling && plane_funcs->atomic_enable) {
3134 if (drm_atomic_plane_enabling(old_plane_state, new_plane_state))
3135 plane_funcs->atomic_enable(plane, old_state);
3136 }
3137 }
3138 }
3139
3140 if (crtc_funcs && crtc_funcs->atomic_flush)
3141 crtc_funcs->atomic_flush(crtc, old_state);
3142 }
3143 EXPORT_SYMBOL(drm_atomic_helper_commit_planes_on_crtc);
3144
3145 /**
3146 * drm_atomic_helper_disable_planes_on_crtc - helper to disable CRTC's planes
3147 * @old_crtc_state: atomic state object with the old CRTC state
3148 * @atomic: if set, synchronize with CRTC's atomic_begin/flush hooks
3149 *
3150 * Disables all planes associated with the given CRTC. This can be
3151 * used for instance in the CRTC helper atomic_disable callback to disable
3152 * all planes.
3153 *
3154 * If the atomic-parameter is set the function calls the CRTC's
3155 * atomic_begin hook before and atomic_flush hook after disabling the
3156 * planes.
3157 *
3158 * It is a bug to call this function without having implemented the
3159 * &drm_plane_helper_funcs.atomic_disable plane hook.
3160 */
3161 void
drm_atomic_helper_disable_planes_on_crtc(struct drm_crtc_state * old_crtc_state,bool atomic)3162 drm_atomic_helper_disable_planes_on_crtc(struct drm_crtc_state *old_crtc_state,
3163 bool atomic)
3164 {
3165 struct drm_crtc *crtc = old_crtc_state->crtc;
3166 const struct drm_crtc_helper_funcs *crtc_funcs =
3167 crtc->helper_private;
3168 struct drm_plane *plane;
3169
3170 if (atomic && crtc_funcs && crtc_funcs->atomic_begin)
3171 crtc_funcs->atomic_begin(crtc, NULL);
3172
3173 drm_atomic_crtc_state_for_each_plane(plane, old_crtc_state) {
3174 const struct drm_plane_helper_funcs *plane_funcs =
3175 plane->helper_private;
3176
3177 if (!plane_funcs)
3178 continue;
3179
3180 WARN_ON(!plane_funcs->atomic_disable);
3181 if (plane_funcs->atomic_disable)
3182 plane_funcs->atomic_disable(plane, NULL);
3183 }
3184
3185 if (atomic && crtc_funcs && crtc_funcs->atomic_flush)
3186 crtc_funcs->atomic_flush(crtc, NULL);
3187 }
3188 EXPORT_SYMBOL(drm_atomic_helper_disable_planes_on_crtc);
3189
3190 /**
3191 * drm_atomic_helper_cleanup_planes - cleanup plane resources after commit
3192 * @dev: DRM device
3193 * @state: atomic state object being committed
3194 *
3195 * This function cleans up plane state, specifically framebuffers, from the old
3196 * configuration. Hence the old configuration must be perserved in @state to
3197 * be able to call this function.
3198 *
3199 * This function may not be called on the new state when the atomic update
3200 * fails at any point after calling drm_atomic_helper_prepare_planes(). Use
3201 * drm_atomic_helper_unprepare_planes() in this case.
3202 */
drm_atomic_helper_cleanup_planes(struct drm_device * dev,struct drm_atomic_state * state)3203 void drm_atomic_helper_cleanup_planes(struct drm_device *dev,
3204 struct drm_atomic_state *state)
3205 {
3206 struct drm_plane *plane;
3207 struct drm_plane_state *old_plane_state;
3208 int i;
3209
3210 for_each_old_plane_in_state(state, plane, old_plane_state, i) {
3211 const struct drm_plane_helper_funcs *funcs = plane->helper_private;
3212
3213 if (funcs->cleanup_fb)
3214 funcs->cleanup_fb(plane, old_plane_state);
3215 }
3216 }
3217 EXPORT_SYMBOL(drm_atomic_helper_cleanup_planes);
3218
3219 /**
3220 * drm_atomic_helper_swap_state - store atomic state into current sw state
3221 * @state: atomic state
3222 * @stall: stall for preceding commits
3223 *
3224 * This function stores the atomic state into the current state pointers in all
3225 * driver objects. It should be called after all failing steps have been done
3226 * and succeeded, but before the actual hardware state is committed.
3227 *
3228 * For cleanup and error recovery the current state for all changed objects will
3229 * be swapped into @state.
3230 *
3231 * With that sequence it fits perfectly into the plane prepare/cleanup sequence:
3232 *
3233 * 1. Call drm_atomic_helper_prepare_planes() with the staged atomic state.
3234 *
3235 * 2. Do any other steps that might fail.
3236 *
3237 * 3. Put the staged state into the current state pointers with this function.
3238 *
3239 * 4. Actually commit the hardware state.
3240 *
3241 * 5. Call drm_atomic_helper_cleanup_planes() with @state, which since step 3
3242 * contains the old state. Also do any other cleanup required with that state.
3243 *
3244 * @stall must be set when nonblocking commits for this driver directly access
3245 * the &drm_plane.state, &drm_crtc.state or &drm_connector.state pointer. With
3246 * the current atomic helpers this is almost always the case, since the helpers
3247 * don't pass the right state structures to the callbacks.
3248 *
3249 * Returns:
3250 * Returns 0 on success. Can return -ERESTARTSYS when @stall is true and the
3251 * waiting for the previous commits has been interrupted.
3252 */
drm_atomic_helper_swap_state(struct drm_atomic_state * state,bool stall)3253 int drm_atomic_helper_swap_state(struct drm_atomic_state *state,
3254 bool stall)
3255 {
3256 int i, ret;
3257 unsigned long flags = 0;
3258 struct drm_connector *connector;
3259 struct drm_connector_state *old_conn_state, *new_conn_state;
3260 struct drm_crtc *crtc;
3261 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
3262 struct drm_plane *plane;
3263 struct drm_plane_state *old_plane_state, *new_plane_state;
3264 struct drm_colorop *colorop;
3265 struct drm_colorop_state *old_colorop_state, *new_colorop_state;
3266 struct drm_crtc_commit *commit;
3267 struct drm_private_obj *obj;
3268 struct drm_private_state *old_obj_state, *new_obj_state;
3269
3270 if (stall) {
3271 /*
3272 * We have to stall for hw_done here before
3273 * drm_atomic_helper_wait_for_dependencies() because flip
3274 * depth > 1 is not yet supported by all drivers. As long as
3275 * obj->state is directly dereferenced anywhere in the drivers
3276 * atomic_commit_tail function, then it's unsafe to swap state
3277 * before drm_atomic_helper_commit_hw_done() is called.
3278 */
3279
3280 for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) {
3281 commit = old_crtc_state->commit;
3282
3283 if (!commit)
3284 continue;
3285
3286 ret = wait_for_completion_interruptible(&commit->hw_done);
3287 if (ret)
3288 return ret;
3289 }
3290
3291 for_each_old_connector_in_state(state, connector, old_conn_state, i) {
3292 commit = old_conn_state->commit;
3293
3294 if (!commit)
3295 continue;
3296
3297 ret = wait_for_completion_interruptible(&commit->hw_done);
3298 if (ret)
3299 return ret;
3300 }
3301
3302 for_each_old_plane_in_state(state, plane, old_plane_state, i) {
3303 commit = old_plane_state->commit;
3304
3305 if (!commit)
3306 continue;
3307
3308 ret = wait_for_completion_interruptible(&commit->hw_done);
3309 if (ret)
3310 return ret;
3311 }
3312 }
3313
3314 for_each_oldnew_connector_in_state(state, connector, old_conn_state, new_conn_state, i) {
3315 WARN_ON(connector->state != old_conn_state);
3316
3317 old_conn_state->state = state;
3318 new_conn_state->state = NULL;
3319
3320 state->connectors[i].state_to_destroy = old_conn_state;
3321 connector->state = new_conn_state;
3322 }
3323
3324 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
3325 WARN_ON(crtc->state != old_crtc_state);
3326
3327 old_crtc_state->state = state;
3328 new_crtc_state->state = NULL;
3329
3330 state->crtcs[i].state_to_destroy = old_crtc_state;
3331 crtc->state = new_crtc_state;
3332
3333 if (new_crtc_state->commit) {
3334 spin_lock(&crtc->commit_lock);
3335 list_add(&new_crtc_state->commit->commit_entry,
3336 &crtc->commit_list);
3337 spin_unlock(&crtc->commit_lock);
3338
3339 new_crtc_state->commit->event = NULL;
3340 }
3341 }
3342
3343 for_each_oldnew_colorop_in_state(state, colorop, old_colorop_state, new_colorop_state, i) {
3344 WARN_ON(colorop->state != old_colorop_state);
3345
3346 old_colorop_state->state = state;
3347 new_colorop_state->state = NULL;
3348
3349 state->colorops[i].state = old_colorop_state;
3350 colorop->state = new_colorop_state;
3351 }
3352
3353 drm_panic_lock(state->dev, flags);
3354 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
3355 WARN_ON(plane->state != old_plane_state);
3356
3357 old_plane_state->state = state;
3358 new_plane_state->state = NULL;
3359
3360 state->planes[i].state_to_destroy = old_plane_state;
3361 plane->state = new_plane_state;
3362 }
3363 drm_panic_unlock(state->dev, flags);
3364
3365 for_each_oldnew_private_obj_in_state(state, obj, old_obj_state, new_obj_state, i) {
3366 WARN_ON(obj->state != old_obj_state);
3367
3368 old_obj_state->state = state;
3369 new_obj_state->state = NULL;
3370
3371 state->private_objs[i].state_to_destroy = old_obj_state;
3372 obj->state = new_obj_state;
3373 }
3374
3375 return 0;
3376 }
3377 EXPORT_SYMBOL(drm_atomic_helper_swap_state);
3378
3379 /**
3380 * drm_atomic_helper_update_plane - Helper for primary plane update using atomic
3381 * @plane: plane object to update
3382 * @crtc: owning CRTC of owning plane
3383 * @fb: framebuffer to flip onto plane
3384 * @crtc_x: x offset of primary plane on @crtc
3385 * @crtc_y: y offset of primary plane on @crtc
3386 * @crtc_w: width of primary plane rectangle on @crtc
3387 * @crtc_h: height of primary plane rectangle on @crtc
3388 * @src_x: x offset of @fb for panning
3389 * @src_y: y offset of @fb for panning
3390 * @src_w: width of source rectangle in @fb
3391 * @src_h: height of source rectangle in @fb
3392 * @ctx: lock acquire context
3393 *
3394 * Provides a default plane update handler using the atomic driver interface.
3395 *
3396 * RETURNS:
3397 * Zero on success, error code on failure
3398 */
drm_atomic_helper_update_plane(struct drm_plane * plane,struct drm_crtc * crtc,struct drm_framebuffer * fb,int crtc_x,int crtc_y,unsigned int crtc_w,unsigned int crtc_h,uint32_t src_x,uint32_t src_y,uint32_t src_w,uint32_t src_h,struct drm_modeset_acquire_ctx * ctx)3399 int drm_atomic_helper_update_plane(struct drm_plane *plane,
3400 struct drm_crtc *crtc,
3401 struct drm_framebuffer *fb,
3402 int crtc_x, int crtc_y,
3403 unsigned int crtc_w, unsigned int crtc_h,
3404 uint32_t src_x, uint32_t src_y,
3405 uint32_t src_w, uint32_t src_h,
3406 struct drm_modeset_acquire_ctx *ctx)
3407 {
3408 struct drm_atomic_state *state;
3409 struct drm_plane_state *plane_state;
3410 int ret = 0;
3411
3412 state = drm_atomic_state_alloc(plane->dev);
3413 if (!state)
3414 return -ENOMEM;
3415
3416 state->acquire_ctx = ctx;
3417 plane_state = drm_atomic_get_plane_state(state, plane);
3418 if (IS_ERR(plane_state)) {
3419 ret = PTR_ERR(plane_state);
3420 goto fail;
3421 }
3422
3423 ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
3424 if (ret != 0)
3425 goto fail;
3426 drm_atomic_set_fb_for_plane(plane_state, fb);
3427 plane_state->crtc_x = crtc_x;
3428 plane_state->crtc_y = crtc_y;
3429 plane_state->crtc_w = crtc_w;
3430 plane_state->crtc_h = crtc_h;
3431 plane_state->src_x = src_x;
3432 plane_state->src_y = src_y;
3433 plane_state->src_w = src_w;
3434 plane_state->src_h = src_h;
3435
3436 if (plane == crtc->cursor)
3437 state->legacy_cursor_update = true;
3438
3439 ret = drm_atomic_commit(state);
3440 fail:
3441 drm_atomic_state_put(state);
3442 return ret;
3443 }
3444 EXPORT_SYMBOL(drm_atomic_helper_update_plane);
3445
3446 /**
3447 * drm_atomic_helper_disable_plane - Helper for primary plane disable using atomic
3448 * @plane: plane to disable
3449 * @ctx: lock acquire context
3450 *
3451 * Provides a default plane disable handler using the atomic driver interface.
3452 *
3453 * RETURNS:
3454 * Zero on success, error code on failure
3455 */
drm_atomic_helper_disable_plane(struct drm_plane * plane,struct drm_modeset_acquire_ctx * ctx)3456 int drm_atomic_helper_disable_plane(struct drm_plane *plane,
3457 struct drm_modeset_acquire_ctx *ctx)
3458 {
3459 struct drm_atomic_state *state;
3460 struct drm_plane_state *plane_state;
3461 int ret = 0;
3462
3463 state = drm_atomic_state_alloc(plane->dev);
3464 if (!state)
3465 return -ENOMEM;
3466
3467 state->acquire_ctx = ctx;
3468 plane_state = drm_atomic_get_plane_state(state, plane);
3469 if (IS_ERR(plane_state)) {
3470 ret = PTR_ERR(plane_state);
3471 goto fail;
3472 }
3473
3474 if (plane_state->crtc && plane_state->crtc->cursor == plane)
3475 plane_state->state->legacy_cursor_update = true;
3476
3477 ret = __drm_atomic_helper_disable_plane(plane, plane_state);
3478 if (ret != 0)
3479 goto fail;
3480
3481 ret = drm_atomic_commit(state);
3482 fail:
3483 drm_atomic_state_put(state);
3484 return ret;
3485 }
3486 EXPORT_SYMBOL(drm_atomic_helper_disable_plane);
3487
3488 /**
3489 * drm_atomic_helper_set_config - set a new config from userspace
3490 * @set: mode set configuration
3491 * @ctx: lock acquisition context
3492 *
3493 * Provides a default CRTC set_config handler using the atomic driver interface.
3494 *
3495 * NOTE: For backwards compatibility with old userspace this automatically
3496 * resets the "link-status" property to GOOD, to force any link
3497 * re-training. The SETCRTC ioctl does not define whether an update does
3498 * need a full modeset or just a plane update, hence we're allowed to do
3499 * that. See also drm_connector_set_link_status_property().
3500 *
3501 * Returns:
3502 * Returns 0 on success, negative errno numbers on failure.
3503 */
drm_atomic_helper_set_config(struct drm_mode_set * set,struct drm_modeset_acquire_ctx * ctx)3504 int drm_atomic_helper_set_config(struct drm_mode_set *set,
3505 struct drm_modeset_acquire_ctx *ctx)
3506 {
3507 struct drm_atomic_state *state;
3508 struct drm_crtc *crtc = set->crtc;
3509 int ret = 0;
3510
3511 state = drm_atomic_state_alloc(crtc->dev);
3512 if (!state)
3513 return -ENOMEM;
3514
3515 state->acquire_ctx = ctx;
3516 ret = __drm_atomic_helper_set_config(set, state);
3517 if (ret != 0)
3518 goto fail;
3519
3520 ret = handle_conflicting_encoders(state, true);
3521 if (ret)
3522 goto fail;
3523
3524 ret = drm_atomic_commit(state);
3525
3526 fail:
3527 drm_atomic_state_put(state);
3528 return ret;
3529 }
3530 EXPORT_SYMBOL(drm_atomic_helper_set_config);
3531
3532 /**
3533 * drm_atomic_helper_disable_all - disable all currently active outputs
3534 * @dev: DRM device
3535 * @ctx: lock acquisition context
3536 *
3537 * Loops through all connectors, finding those that aren't turned off and then
3538 * turns them off by setting their DPMS mode to OFF and deactivating the CRTC
3539 * that they are connected to.
3540 *
3541 * This is used for example in suspend/resume to disable all currently active
3542 * functions when suspending. If you just want to shut down everything at e.g.
3543 * driver unload, look at drm_atomic_helper_shutdown().
3544 *
3545 * Note that if callers haven't already acquired all modeset locks this might
3546 * return -EDEADLK, which must be handled by calling drm_modeset_backoff().
3547 *
3548 * Returns:
3549 * 0 on success or a negative error code on failure.
3550 *
3551 * See also:
3552 * drm_atomic_helper_suspend(), drm_atomic_helper_resume() and
3553 * drm_atomic_helper_shutdown().
3554 */
drm_atomic_helper_disable_all(struct drm_device * dev,struct drm_modeset_acquire_ctx * ctx)3555 int drm_atomic_helper_disable_all(struct drm_device *dev,
3556 struct drm_modeset_acquire_ctx *ctx)
3557 {
3558 struct drm_atomic_state *state;
3559 struct drm_connector_state *conn_state;
3560 struct drm_connector *conn;
3561 struct drm_plane_state *plane_state;
3562 struct drm_plane *plane;
3563 struct drm_crtc_state *crtc_state;
3564 struct drm_crtc *crtc;
3565 int ret, i;
3566
3567 state = drm_atomic_state_alloc(dev);
3568 if (!state)
3569 return -ENOMEM;
3570
3571 state->acquire_ctx = ctx;
3572
3573 drm_for_each_crtc(crtc, dev) {
3574 crtc_state = drm_atomic_get_crtc_state(state, crtc);
3575 if (IS_ERR(crtc_state)) {
3576 ret = PTR_ERR(crtc_state);
3577 goto free;
3578 }
3579
3580 crtc_state->active = false;
3581
3582 ret = drm_atomic_set_mode_prop_for_crtc(crtc_state, NULL);
3583 if (ret < 0)
3584 goto free;
3585
3586 ret = drm_atomic_add_affected_planes(state, crtc);
3587 if (ret < 0)
3588 goto free;
3589
3590 ret = drm_atomic_add_affected_connectors(state, crtc);
3591 if (ret < 0)
3592 goto free;
3593 }
3594
3595 for_each_new_connector_in_state(state, conn, conn_state, i) {
3596 ret = drm_atomic_set_crtc_for_connector(conn_state, NULL);
3597 if (ret < 0)
3598 goto free;
3599 }
3600
3601 for_each_new_plane_in_state(state, plane, plane_state, i) {
3602 ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
3603 if (ret < 0)
3604 goto free;
3605
3606 drm_atomic_set_fb_for_plane(plane_state, NULL);
3607 }
3608
3609 ret = drm_atomic_commit(state);
3610 free:
3611 drm_atomic_state_put(state);
3612 return ret;
3613 }
3614 EXPORT_SYMBOL(drm_atomic_helper_disable_all);
3615
3616 /**
3617 * drm_atomic_helper_reset_crtc - reset the active outputs of a CRTC
3618 * @crtc: DRM CRTC
3619 * @ctx: lock acquisition context
3620 *
3621 * Reset the active outputs by indicating that connectors have changed.
3622 * This implies a reset of all active components available between the CRTC and
3623 * connectors.
3624 *
3625 * A variant of this function exists with
3626 * drm_bridge_helper_reset_crtc(), dedicated to bridges.
3627 *
3628 * NOTE: This relies on resetting &drm_crtc_state.connectors_changed.
3629 * For drivers which optimize out unnecessary modesets this will result in
3630 * a no-op commit, achieving nothing.
3631 *
3632 * Returns:
3633 * 0 on success or a negative error code on failure.
3634 */
drm_atomic_helper_reset_crtc(struct drm_crtc * crtc,struct drm_modeset_acquire_ctx * ctx)3635 int drm_atomic_helper_reset_crtc(struct drm_crtc *crtc,
3636 struct drm_modeset_acquire_ctx *ctx)
3637 {
3638 struct drm_atomic_state *state;
3639 struct drm_crtc_state *crtc_state;
3640 int ret;
3641
3642 state = drm_atomic_state_alloc(crtc->dev);
3643 if (!state)
3644 return -ENOMEM;
3645
3646 state->acquire_ctx = ctx;
3647
3648 crtc_state = drm_atomic_get_crtc_state(state, crtc);
3649 if (IS_ERR(crtc_state)) {
3650 ret = PTR_ERR(crtc_state);
3651 goto out;
3652 }
3653
3654 crtc_state->connectors_changed = true;
3655
3656 ret = drm_atomic_commit(state);
3657 out:
3658 drm_atomic_state_put(state);
3659
3660 return ret;
3661 }
3662 EXPORT_SYMBOL(drm_atomic_helper_reset_crtc);
3663
3664 /**
3665 * drm_atomic_helper_shutdown - shutdown all CRTC
3666 * @dev: DRM device
3667 *
3668 * This shuts down all CRTC, which is useful for driver unloading. Shutdown on
3669 * suspend should instead be handled with drm_atomic_helper_suspend(), since
3670 * that also takes a snapshot of the modeset state to be restored on resume.
3671 *
3672 * This is just a convenience wrapper around drm_atomic_helper_disable_all(),
3673 * and it is the atomic version of drm_helper_force_disable_all().
3674 */
drm_atomic_helper_shutdown(struct drm_device * dev)3675 void drm_atomic_helper_shutdown(struct drm_device *dev)
3676 {
3677 struct drm_modeset_acquire_ctx ctx;
3678 int ret;
3679
3680 if (dev == NULL)
3681 return;
3682
3683 DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, ret);
3684
3685 ret = drm_atomic_helper_disable_all(dev, &ctx);
3686 if (ret)
3687 drm_err(dev,
3688 "Disabling all crtc's during unload failed with %i\n",
3689 ret);
3690
3691 DRM_MODESET_LOCK_ALL_END(dev, ctx, ret);
3692 }
3693 EXPORT_SYMBOL(drm_atomic_helper_shutdown);
3694
3695 /**
3696 * drm_atomic_helper_duplicate_state - duplicate an atomic state object
3697 * @dev: DRM device
3698 * @ctx: lock acquisition context
3699 *
3700 * Makes a copy of the current atomic state by looping over all objects and
3701 * duplicating their respective states. This is used for example by suspend/
3702 * resume support code to save the state prior to suspend such that it can
3703 * be restored upon resume.
3704 *
3705 * Note that this treats atomic state as persistent between save and restore.
3706 * Drivers must make sure that this is possible and won't result in confusion
3707 * or erroneous behaviour.
3708 *
3709 * Note that if callers haven't already acquired all modeset locks this might
3710 * return -EDEADLK, which must be handled by calling drm_modeset_backoff().
3711 *
3712 * Returns:
3713 * A pointer to the copy of the atomic state object on success or an
3714 * ERR_PTR()-encoded error code on failure.
3715 *
3716 * See also:
3717 * drm_atomic_helper_suspend(), drm_atomic_helper_resume()
3718 */
3719 struct drm_atomic_state *
drm_atomic_helper_duplicate_state(struct drm_device * dev,struct drm_modeset_acquire_ctx * ctx)3720 drm_atomic_helper_duplicate_state(struct drm_device *dev,
3721 struct drm_modeset_acquire_ctx *ctx)
3722 {
3723 struct drm_atomic_state *state;
3724 struct drm_connector *conn;
3725 struct drm_connector_list_iter conn_iter;
3726 struct drm_plane *plane;
3727 struct drm_crtc *crtc;
3728 int err = 0;
3729
3730 state = drm_atomic_state_alloc(dev);
3731 if (!state)
3732 return ERR_PTR(-ENOMEM);
3733
3734 state->acquire_ctx = ctx;
3735 state->duplicated = true;
3736
3737 drm_for_each_crtc(crtc, dev) {
3738 struct drm_crtc_state *crtc_state;
3739
3740 crtc_state = drm_atomic_get_crtc_state(state, crtc);
3741 if (IS_ERR(crtc_state)) {
3742 err = PTR_ERR(crtc_state);
3743 goto free;
3744 }
3745 }
3746
3747 drm_for_each_plane(plane, dev) {
3748 struct drm_plane_state *plane_state;
3749
3750 plane_state = drm_atomic_get_plane_state(state, plane);
3751 if (IS_ERR(plane_state)) {
3752 err = PTR_ERR(plane_state);
3753 goto free;
3754 }
3755 }
3756
3757 drm_connector_list_iter_begin(dev, &conn_iter);
3758 drm_for_each_connector_iter(conn, &conn_iter) {
3759 struct drm_connector_state *conn_state;
3760
3761 conn_state = drm_atomic_get_connector_state(state, conn);
3762 if (IS_ERR(conn_state)) {
3763 err = PTR_ERR(conn_state);
3764 drm_connector_list_iter_end(&conn_iter);
3765 goto free;
3766 }
3767 }
3768 drm_connector_list_iter_end(&conn_iter);
3769
3770 /* clear the acquire context so that it isn't accidentally reused */
3771 state->acquire_ctx = NULL;
3772
3773 free:
3774 if (err < 0) {
3775 drm_atomic_state_put(state);
3776 state = ERR_PTR(err);
3777 }
3778
3779 return state;
3780 }
3781 EXPORT_SYMBOL(drm_atomic_helper_duplicate_state);
3782
3783 /**
3784 * drm_atomic_helper_suspend - subsystem-level suspend helper
3785 * @dev: DRM device
3786 *
3787 * Duplicates the current atomic state, disables all active outputs and then
3788 * returns a pointer to the original atomic state to the caller. Drivers can
3789 * pass this pointer to the drm_atomic_helper_resume() helper upon resume to
3790 * restore the output configuration that was active at the time the system
3791 * entered suspend.
3792 *
3793 * Note that it is potentially unsafe to use this. The atomic state object
3794 * returned by this function is assumed to be persistent. Drivers must ensure
3795 * that this holds true. Before calling this function, drivers must make sure
3796 * to suspend fbdev emulation so that nothing can be using the device.
3797 *
3798 * Returns:
3799 * A pointer to a copy of the state before suspend on success or an ERR_PTR()-
3800 * encoded error code on failure. Drivers should store the returned atomic
3801 * state object and pass it to the drm_atomic_helper_resume() helper upon
3802 * resume.
3803 *
3804 * See also:
3805 * drm_atomic_helper_duplicate_state(), drm_atomic_helper_disable_all(),
3806 * drm_atomic_helper_resume(), drm_atomic_helper_commit_duplicated_state()
3807 */
drm_atomic_helper_suspend(struct drm_device * dev)3808 struct drm_atomic_state *drm_atomic_helper_suspend(struct drm_device *dev)
3809 {
3810 struct drm_modeset_acquire_ctx ctx;
3811 struct drm_atomic_state *state;
3812 int err;
3813
3814 /* This can never be returned, but it makes the compiler happy */
3815 state = ERR_PTR(-EINVAL);
3816
3817 DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, err);
3818
3819 state = drm_atomic_helper_duplicate_state(dev, &ctx);
3820 if (IS_ERR(state))
3821 goto unlock;
3822
3823 err = drm_atomic_helper_disable_all(dev, &ctx);
3824 if (err < 0) {
3825 drm_atomic_state_put(state);
3826 state = ERR_PTR(err);
3827 goto unlock;
3828 }
3829
3830 unlock:
3831 DRM_MODESET_LOCK_ALL_END(dev, ctx, err);
3832 if (err)
3833 return ERR_PTR(err);
3834
3835 return state;
3836 }
3837 EXPORT_SYMBOL(drm_atomic_helper_suspend);
3838
3839 /**
3840 * drm_atomic_helper_commit_duplicated_state - commit duplicated state
3841 * @state: duplicated atomic state to commit
3842 * @ctx: pointer to acquire_ctx to use for commit.
3843 *
3844 * The state returned by drm_atomic_helper_duplicate_state() and
3845 * drm_atomic_helper_suspend() is partially invalid, and needs to
3846 * be fixed up before commit.
3847 *
3848 * Returns:
3849 * 0 on success or a negative error code on failure.
3850 *
3851 * See also:
3852 * drm_atomic_helper_suspend()
3853 */
drm_atomic_helper_commit_duplicated_state(struct drm_atomic_state * state,struct drm_modeset_acquire_ctx * ctx)3854 int drm_atomic_helper_commit_duplicated_state(struct drm_atomic_state *state,
3855 struct drm_modeset_acquire_ctx *ctx)
3856 {
3857 int i, ret;
3858 struct drm_plane *plane;
3859 struct drm_plane_state *new_plane_state;
3860 struct drm_connector *connector;
3861 struct drm_connector_state *new_conn_state;
3862 struct drm_crtc *crtc;
3863 struct drm_crtc_state *new_crtc_state;
3864
3865 state->acquire_ctx = ctx;
3866
3867 for_each_new_plane_in_state(state, plane, new_plane_state, i)
3868 state->planes[i].old_state = plane->state;
3869
3870 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i)
3871 state->crtcs[i].old_state = crtc->state;
3872
3873 for_each_new_connector_in_state(state, connector, new_conn_state, i)
3874 state->connectors[i].old_state = connector->state;
3875
3876 ret = drm_atomic_commit(state);
3877
3878 state->acquire_ctx = NULL;
3879
3880 return ret;
3881 }
3882 EXPORT_SYMBOL(drm_atomic_helper_commit_duplicated_state);
3883
3884 /**
3885 * drm_atomic_helper_resume - subsystem-level resume helper
3886 * @dev: DRM device
3887 * @state: atomic state to resume to
3888 *
3889 * Calls drm_mode_config_reset() to synchronize hardware and software states,
3890 * grabs all modeset locks and commits the atomic state object. This can be
3891 * used in conjunction with the drm_atomic_helper_suspend() helper to
3892 * implement suspend/resume for drivers that support atomic mode-setting.
3893 *
3894 * Returns:
3895 * 0 on success or a negative error code on failure.
3896 *
3897 * See also:
3898 * drm_atomic_helper_suspend()
3899 */
drm_atomic_helper_resume(struct drm_device * dev,struct drm_atomic_state * state)3900 int drm_atomic_helper_resume(struct drm_device *dev,
3901 struct drm_atomic_state *state)
3902 {
3903 struct drm_modeset_acquire_ctx ctx;
3904 int err;
3905
3906 drm_mode_config_reset(dev);
3907
3908 DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, err);
3909
3910 err = drm_atomic_helper_commit_duplicated_state(state, &ctx);
3911
3912 DRM_MODESET_LOCK_ALL_END(dev, ctx, err);
3913 drm_atomic_state_put(state);
3914
3915 return err;
3916 }
3917 EXPORT_SYMBOL(drm_atomic_helper_resume);
3918
page_flip_common(struct drm_atomic_state * state,struct drm_crtc * crtc,struct drm_framebuffer * fb,struct drm_pending_vblank_event * event,uint32_t flags)3919 static int page_flip_common(struct drm_atomic_state *state,
3920 struct drm_crtc *crtc,
3921 struct drm_framebuffer *fb,
3922 struct drm_pending_vblank_event *event,
3923 uint32_t flags)
3924 {
3925 struct drm_plane *plane = crtc->primary;
3926 struct drm_plane_state *plane_state;
3927 struct drm_crtc_state *crtc_state;
3928 int ret = 0;
3929
3930 crtc_state = drm_atomic_get_crtc_state(state, crtc);
3931 if (IS_ERR(crtc_state))
3932 return PTR_ERR(crtc_state);
3933
3934 crtc_state->event = event;
3935 crtc_state->async_flip = flags & DRM_MODE_PAGE_FLIP_ASYNC;
3936
3937 plane_state = drm_atomic_get_plane_state(state, plane);
3938 if (IS_ERR(plane_state))
3939 return PTR_ERR(plane_state);
3940
3941 ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
3942 if (ret != 0)
3943 return ret;
3944 drm_atomic_set_fb_for_plane(plane_state, fb);
3945
3946 /* Make sure we don't accidentally do a full modeset. */
3947 state->allow_modeset = false;
3948 if (!crtc_state->active) {
3949 drm_dbg_atomic(crtc->dev,
3950 "[CRTC:%d:%s] disabled, rejecting legacy flip\n",
3951 crtc->base.id, crtc->name);
3952 return -EINVAL;
3953 }
3954
3955 return ret;
3956 }
3957
3958 /**
3959 * drm_atomic_helper_page_flip - execute a legacy page flip
3960 * @crtc: DRM CRTC
3961 * @fb: DRM framebuffer
3962 * @event: optional DRM event to signal upon completion
3963 * @flags: flip flags for non-vblank sync'ed updates
3964 * @ctx: lock acquisition context
3965 *
3966 * Provides a default &drm_crtc_funcs.page_flip implementation
3967 * using the atomic driver interface.
3968 *
3969 * Returns:
3970 * Returns 0 on success, negative errno numbers on failure.
3971 *
3972 * See also:
3973 * drm_atomic_helper_page_flip_target()
3974 */
drm_atomic_helper_page_flip(struct drm_crtc * crtc,struct drm_framebuffer * fb,struct drm_pending_vblank_event * event,uint32_t flags,struct drm_modeset_acquire_ctx * ctx)3975 int drm_atomic_helper_page_flip(struct drm_crtc *crtc,
3976 struct drm_framebuffer *fb,
3977 struct drm_pending_vblank_event *event,
3978 uint32_t flags,
3979 struct drm_modeset_acquire_ctx *ctx)
3980 {
3981 struct drm_plane *plane = crtc->primary;
3982 struct drm_atomic_state *state;
3983 int ret = 0;
3984
3985 state = drm_atomic_state_alloc(plane->dev);
3986 if (!state)
3987 return -ENOMEM;
3988
3989 state->acquire_ctx = ctx;
3990
3991 ret = page_flip_common(state, crtc, fb, event, flags);
3992 if (ret != 0)
3993 goto fail;
3994
3995 ret = drm_atomic_nonblocking_commit(state);
3996 fail:
3997 drm_atomic_state_put(state);
3998 return ret;
3999 }
4000 EXPORT_SYMBOL(drm_atomic_helper_page_flip);
4001
4002 /**
4003 * drm_atomic_helper_page_flip_target - do page flip on target vblank period.
4004 * @crtc: DRM CRTC
4005 * @fb: DRM framebuffer
4006 * @event: optional DRM event to signal upon completion
4007 * @flags: flip flags for non-vblank sync'ed updates
4008 * @target: specifying the target vblank period when the flip to take effect
4009 * @ctx: lock acquisition context
4010 *
4011 * Provides a default &drm_crtc_funcs.page_flip_target implementation.
4012 * Similar to drm_atomic_helper_page_flip() with extra parameter to specify
4013 * target vblank period to flip.
4014 *
4015 * Returns:
4016 * Returns 0 on success, negative errno numbers on failure.
4017 */
drm_atomic_helper_page_flip_target(struct drm_crtc * crtc,struct drm_framebuffer * fb,struct drm_pending_vblank_event * event,uint32_t flags,uint32_t target,struct drm_modeset_acquire_ctx * ctx)4018 int drm_atomic_helper_page_flip_target(struct drm_crtc *crtc,
4019 struct drm_framebuffer *fb,
4020 struct drm_pending_vblank_event *event,
4021 uint32_t flags,
4022 uint32_t target,
4023 struct drm_modeset_acquire_ctx *ctx)
4024 {
4025 struct drm_plane *plane = crtc->primary;
4026 struct drm_atomic_state *state;
4027 struct drm_crtc_state *crtc_state;
4028 int ret = 0;
4029
4030 state = drm_atomic_state_alloc(plane->dev);
4031 if (!state)
4032 return -ENOMEM;
4033
4034 state->acquire_ctx = ctx;
4035
4036 ret = page_flip_common(state, crtc, fb, event, flags);
4037 if (ret != 0)
4038 goto fail;
4039
4040 crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
4041 if (WARN_ON(!crtc_state)) {
4042 ret = -EINVAL;
4043 goto fail;
4044 }
4045 crtc_state->target_vblank = target;
4046
4047 ret = drm_atomic_nonblocking_commit(state);
4048 fail:
4049 drm_atomic_state_put(state);
4050 return ret;
4051 }
4052 EXPORT_SYMBOL(drm_atomic_helper_page_flip_target);
4053
4054 /**
4055 * drm_atomic_helper_bridge_propagate_bus_fmt() - Propagate output format to
4056 * the input end of a bridge
4057 * @bridge: bridge control structure
4058 * @bridge_state: new bridge state
4059 * @crtc_state: new CRTC state
4060 * @conn_state: new connector state
4061 * @output_fmt: tested output bus format
4062 * @num_input_fmts: will contain the size of the returned array
4063 *
4064 * This helper is a pluggable implementation of the
4065 * &drm_bridge_funcs.atomic_get_input_bus_fmts operation for bridges that don't
4066 * modify the bus configuration between their input and their output. It
4067 * returns an array of input formats with a single element set to @output_fmt.
4068 *
4069 * RETURNS:
4070 * a valid format array of size @num_input_fmts, or NULL if the allocation
4071 * failed
4072 */
4073 u32 *
drm_atomic_helper_bridge_propagate_bus_fmt(struct drm_bridge * bridge,struct drm_bridge_state * bridge_state,struct drm_crtc_state * crtc_state,struct drm_connector_state * conn_state,u32 output_fmt,unsigned int * num_input_fmts)4074 drm_atomic_helper_bridge_propagate_bus_fmt(struct drm_bridge *bridge,
4075 struct drm_bridge_state *bridge_state,
4076 struct drm_crtc_state *crtc_state,
4077 struct drm_connector_state *conn_state,
4078 u32 output_fmt,
4079 unsigned int *num_input_fmts)
4080 {
4081 u32 *input_fmts;
4082
4083 input_fmts = kzalloc_obj(*input_fmts, GFP_KERNEL);
4084 if (!input_fmts) {
4085 *num_input_fmts = 0;
4086 return NULL;
4087 }
4088
4089 *num_input_fmts = 1;
4090 input_fmts[0] = output_fmt;
4091 return input_fmts;
4092 }
4093 EXPORT_SYMBOL(drm_atomic_helper_bridge_propagate_bus_fmt);
4094