xref: /linux/drivers/gpu/drm/drm_atomic_helper.c (revision a4eb44a6435d6d8f9e642407a4a06f65eb90ca04)
1 /*
2  * Copyright (C) 2014 Red Hat
3  * Copyright (C) 2014 Intel Corp.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  * Rob Clark <robdclark@gmail.com>
25  * Daniel Vetter <daniel.vetter@ffwll.ch>
26  */
27 
28 #include <linux/dma-fence.h>
29 #include <linux/ktime.h>
30 
31 #include <drm/drm_atomic.h>
32 #include <drm/drm_atomic_helper.h>
33 #include <drm/drm_atomic_uapi.h>
34 #include <drm/drm_bridge.h>
35 #include <drm/drm_damage_helper.h>
36 #include <drm/drm_device.h>
37 #include <drm/drm_drv.h>
38 #include <drm/drm_gem_atomic_helper.h>
39 #include <drm/drm_plane_helper.h>
40 #include <drm/drm_print.h>
41 #include <drm/drm_self_refresh_helper.h>
42 #include <drm/drm_vblank.h>
43 #include <drm/drm_writeback.h>
44 
45 #include "drm_crtc_helper_internal.h"
46 #include "drm_crtc_internal.h"
47 
48 /**
49  * DOC: overview
50  *
51  * This helper library provides implementations of check and commit functions on
52  * top of the CRTC modeset helper callbacks and the plane helper callbacks. It
53  * also provides convenience implementations for the atomic state handling
54  * callbacks for drivers which don't need to subclass the drm core structures to
55  * add their own additional internal state.
56  *
57  * This library also provides default implementations for the check callback in
58  * drm_atomic_helper_check() and for the commit callback with
59  * drm_atomic_helper_commit(). But the individual stages and callbacks are
60  * exposed to allow drivers to mix and match and e.g. use the plane helpers only
61  * together with a driver private modeset implementation.
62  *
63  * This library also provides implementations for all the legacy driver
64  * interfaces on top of the atomic interface. See drm_atomic_helper_set_config(),
65  * drm_atomic_helper_disable_plane(), and the various functions to implement
66  * set_property callbacks. New drivers must not implement these functions
67  * themselves but must use the provided helpers.
68  *
69  * The atomic helper uses the same function table structures as all other
70  * modesetting helpers. See the documentation for &struct drm_crtc_helper_funcs,
71  * struct &drm_encoder_helper_funcs and &struct drm_connector_helper_funcs. It
72  * also shares the &struct drm_plane_helper_funcs function table with the plane
73  * helpers.
74  */
75 static void
76 drm_atomic_helper_plane_changed(struct drm_atomic_state *state,
77 				struct drm_plane_state *old_plane_state,
78 				struct drm_plane_state *plane_state,
79 				struct drm_plane *plane)
80 {
81 	struct drm_crtc_state *crtc_state;
82 
83 	if (old_plane_state->crtc) {
84 		crtc_state = drm_atomic_get_new_crtc_state(state,
85 							   old_plane_state->crtc);
86 
87 		if (WARN_ON(!crtc_state))
88 			return;
89 
90 		crtc_state->planes_changed = true;
91 	}
92 
93 	if (plane_state->crtc) {
94 		crtc_state = drm_atomic_get_new_crtc_state(state, plane_state->crtc);
95 
96 		if (WARN_ON(!crtc_state))
97 			return;
98 
99 		crtc_state->planes_changed = true;
100 	}
101 }
102 
103 static int handle_conflicting_encoders(struct drm_atomic_state *state,
104 				       bool disable_conflicting_encoders)
105 {
106 	struct drm_connector_state *new_conn_state;
107 	struct drm_connector *connector;
108 	struct drm_connector_list_iter conn_iter;
109 	struct drm_encoder *encoder;
110 	unsigned int encoder_mask = 0;
111 	int i, ret = 0;
112 
113 	/*
114 	 * First loop, find all newly assigned encoders from the connectors
115 	 * part of the state. If the same encoder is assigned to multiple
116 	 * connectors bail out.
117 	 */
118 	for_each_new_connector_in_state(state, connector, new_conn_state, i) {
119 		const struct drm_connector_helper_funcs *funcs = connector->helper_private;
120 		struct drm_encoder *new_encoder;
121 
122 		if (!new_conn_state->crtc)
123 			continue;
124 
125 		if (funcs->atomic_best_encoder)
126 			new_encoder = funcs->atomic_best_encoder(connector,
127 								 state);
128 		else if (funcs->best_encoder)
129 			new_encoder = funcs->best_encoder(connector);
130 		else
131 			new_encoder = drm_connector_get_single_encoder(connector);
132 
133 		if (new_encoder) {
134 			if (encoder_mask & drm_encoder_mask(new_encoder)) {
135 				drm_dbg_atomic(connector->dev,
136 					       "[ENCODER:%d:%s] on [CONNECTOR:%d:%s] already assigned\n",
137 					       new_encoder->base.id, new_encoder->name,
138 					       connector->base.id, connector->name);
139 
140 				return -EINVAL;
141 			}
142 
143 			encoder_mask |= drm_encoder_mask(new_encoder);
144 		}
145 	}
146 
147 	if (!encoder_mask)
148 		return 0;
149 
150 	/*
151 	 * Second loop, iterate over all connectors not part of the state.
152 	 *
153 	 * If a conflicting encoder is found and disable_conflicting_encoders
154 	 * is not set, an error is returned. Userspace can provide a solution
155 	 * through the atomic ioctl.
156 	 *
157 	 * If the flag is set conflicting connectors are removed from the CRTC
158 	 * and the CRTC is disabled if no encoder is left. This preserves
159 	 * compatibility with the legacy set_config behavior.
160 	 */
161 	drm_connector_list_iter_begin(state->dev, &conn_iter);
162 	drm_for_each_connector_iter(connector, &conn_iter) {
163 		struct drm_crtc_state *crtc_state;
164 
165 		if (drm_atomic_get_new_connector_state(state, connector))
166 			continue;
167 
168 		encoder = connector->state->best_encoder;
169 		if (!encoder || !(encoder_mask & drm_encoder_mask(encoder)))
170 			continue;
171 
172 		if (!disable_conflicting_encoders) {
173 			drm_dbg_atomic(connector->dev,
174 				       "[ENCODER:%d:%s] in use on [CRTC:%d:%s] by [CONNECTOR:%d:%s]\n",
175 				       encoder->base.id, encoder->name,
176 				       connector->state->crtc->base.id,
177 				       connector->state->crtc->name,
178 				       connector->base.id, connector->name);
179 			ret = -EINVAL;
180 			goto out;
181 		}
182 
183 		new_conn_state = drm_atomic_get_connector_state(state, connector);
184 		if (IS_ERR(new_conn_state)) {
185 			ret = PTR_ERR(new_conn_state);
186 			goto out;
187 		}
188 
189 		drm_dbg_atomic(connector->dev,
190 			       "[ENCODER:%d:%s] in use on [CRTC:%d:%s], disabling [CONNECTOR:%d:%s]\n",
191 			       encoder->base.id, encoder->name,
192 			       new_conn_state->crtc->base.id, new_conn_state->crtc->name,
193 			       connector->base.id, connector->name);
194 
195 		crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
196 
197 		ret = drm_atomic_set_crtc_for_connector(new_conn_state, NULL);
198 		if (ret)
199 			goto out;
200 
201 		if (!crtc_state->connector_mask) {
202 			ret = drm_atomic_set_mode_prop_for_crtc(crtc_state,
203 								NULL);
204 			if (ret < 0)
205 				goto out;
206 
207 			crtc_state->active = false;
208 		}
209 	}
210 out:
211 	drm_connector_list_iter_end(&conn_iter);
212 
213 	return ret;
214 }
215 
216 static void
217 set_best_encoder(struct drm_atomic_state *state,
218 		 struct drm_connector_state *conn_state,
219 		 struct drm_encoder *encoder)
220 {
221 	struct drm_crtc_state *crtc_state;
222 	struct drm_crtc *crtc;
223 
224 	if (conn_state->best_encoder) {
225 		/* Unset the encoder_mask in the old crtc state. */
226 		crtc = conn_state->connector->state->crtc;
227 
228 		/* A NULL crtc is an error here because we should have
229 		 * duplicated a NULL best_encoder when crtc was NULL.
230 		 * As an exception restoring duplicated atomic state
231 		 * during resume is allowed, so don't warn when
232 		 * best_encoder is equal to encoder we intend to set.
233 		 */
234 		WARN_ON(!crtc && encoder != conn_state->best_encoder);
235 		if (crtc) {
236 			crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
237 
238 			crtc_state->encoder_mask &=
239 				~drm_encoder_mask(conn_state->best_encoder);
240 		}
241 	}
242 
243 	if (encoder) {
244 		crtc = conn_state->crtc;
245 		WARN_ON(!crtc);
246 		if (crtc) {
247 			crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
248 
249 			crtc_state->encoder_mask |=
250 				drm_encoder_mask(encoder);
251 		}
252 	}
253 
254 	conn_state->best_encoder = encoder;
255 }
256 
257 static void
258 steal_encoder(struct drm_atomic_state *state,
259 	      struct drm_encoder *encoder)
260 {
261 	struct drm_crtc_state *crtc_state;
262 	struct drm_connector *connector;
263 	struct drm_connector_state *old_connector_state, *new_connector_state;
264 	int i;
265 
266 	for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) {
267 		struct drm_crtc *encoder_crtc;
268 
269 		if (new_connector_state->best_encoder != encoder)
270 			continue;
271 
272 		encoder_crtc = old_connector_state->crtc;
273 
274 		drm_dbg_atomic(encoder->dev,
275 			       "[ENCODER:%d:%s] in use on [CRTC:%d:%s], stealing it\n",
276 			       encoder->base.id, encoder->name,
277 			       encoder_crtc->base.id, encoder_crtc->name);
278 
279 		set_best_encoder(state, new_connector_state, NULL);
280 
281 		crtc_state = drm_atomic_get_new_crtc_state(state, encoder_crtc);
282 		crtc_state->connectors_changed = true;
283 
284 		return;
285 	}
286 }
287 
288 static int
289 update_connector_routing(struct drm_atomic_state *state,
290 			 struct drm_connector *connector,
291 			 struct drm_connector_state *old_connector_state,
292 			 struct drm_connector_state *new_connector_state)
293 {
294 	const struct drm_connector_helper_funcs *funcs;
295 	struct drm_encoder *new_encoder;
296 	struct drm_crtc_state *crtc_state;
297 
298 	drm_dbg_atomic(connector->dev, "Updating routing for [CONNECTOR:%d:%s]\n",
299 		       connector->base.id, connector->name);
300 
301 	if (old_connector_state->crtc != new_connector_state->crtc) {
302 		if (old_connector_state->crtc) {
303 			crtc_state = drm_atomic_get_new_crtc_state(state, old_connector_state->crtc);
304 			crtc_state->connectors_changed = true;
305 		}
306 
307 		if (new_connector_state->crtc) {
308 			crtc_state = drm_atomic_get_new_crtc_state(state, new_connector_state->crtc);
309 			crtc_state->connectors_changed = true;
310 		}
311 	}
312 
313 	if (!new_connector_state->crtc) {
314 		drm_dbg_atomic(connector->dev, "Disabling [CONNECTOR:%d:%s]\n",
315 				connector->base.id, connector->name);
316 
317 		set_best_encoder(state, new_connector_state, NULL);
318 
319 		return 0;
320 	}
321 
322 	crtc_state = drm_atomic_get_new_crtc_state(state,
323 						   new_connector_state->crtc);
324 	/*
325 	 * For compatibility with legacy users, we want to make sure that
326 	 * we allow DPMS On->Off modesets on unregistered connectors. Modesets
327 	 * which would result in anything else must be considered invalid, to
328 	 * avoid turning on new displays on dead connectors.
329 	 *
330 	 * Since the connector can be unregistered at any point during an
331 	 * atomic check or commit, this is racy. But that's OK: all we care
332 	 * about is ensuring that userspace can't do anything but shut off the
333 	 * display on a connector that was destroyed after it's been notified,
334 	 * not before.
335 	 *
336 	 * Additionally, we also want to ignore connector registration when
337 	 * we're trying to restore an atomic state during system resume since
338 	 * there's a chance the connector may have been destroyed during the
339 	 * process, but it's better to ignore that then cause
340 	 * drm_atomic_helper_resume() to fail.
341 	 */
342 	if (!state->duplicated && drm_connector_is_unregistered(connector) &&
343 	    crtc_state->active) {
344 		drm_dbg_atomic(connector->dev,
345 			       "[CONNECTOR:%d:%s] is not registered\n",
346 			       connector->base.id, connector->name);
347 		return -EINVAL;
348 	}
349 
350 	funcs = connector->helper_private;
351 
352 	if (funcs->atomic_best_encoder)
353 		new_encoder = funcs->atomic_best_encoder(connector, state);
354 	else if (funcs->best_encoder)
355 		new_encoder = funcs->best_encoder(connector);
356 	else
357 		new_encoder = drm_connector_get_single_encoder(connector);
358 
359 	if (!new_encoder) {
360 		drm_dbg_atomic(connector->dev,
361 			       "No suitable encoder found for [CONNECTOR:%d:%s]\n",
362 			       connector->base.id, connector->name);
363 		return -EINVAL;
364 	}
365 
366 	if (!drm_encoder_crtc_ok(new_encoder, new_connector_state->crtc)) {
367 		drm_dbg_atomic(connector->dev,
368 			       "[ENCODER:%d:%s] incompatible with [CRTC:%d:%s]\n",
369 			       new_encoder->base.id,
370 			       new_encoder->name,
371 			       new_connector_state->crtc->base.id,
372 			       new_connector_state->crtc->name);
373 		return -EINVAL;
374 	}
375 
376 	if (new_encoder == new_connector_state->best_encoder) {
377 		set_best_encoder(state, new_connector_state, new_encoder);
378 
379 		drm_dbg_atomic(connector->dev,
380 			       "[CONNECTOR:%d:%s] keeps [ENCODER:%d:%s], now on [CRTC:%d:%s]\n",
381 			       connector->base.id,
382 			       connector->name,
383 			       new_encoder->base.id,
384 			       new_encoder->name,
385 			       new_connector_state->crtc->base.id,
386 			       new_connector_state->crtc->name);
387 
388 		return 0;
389 	}
390 
391 	steal_encoder(state, new_encoder);
392 
393 	set_best_encoder(state, new_connector_state, new_encoder);
394 
395 	crtc_state->connectors_changed = true;
396 
397 	drm_dbg_atomic(connector->dev,
398 		       "[CONNECTOR:%d:%s] using [ENCODER:%d:%s] on [CRTC:%d:%s]\n",
399 		       connector->base.id,
400 		       connector->name,
401 		       new_encoder->base.id,
402 		       new_encoder->name,
403 		       new_connector_state->crtc->base.id,
404 		       new_connector_state->crtc->name);
405 
406 	return 0;
407 }
408 
409 static int
410 mode_fixup(struct drm_atomic_state *state)
411 {
412 	struct drm_crtc *crtc;
413 	struct drm_crtc_state *new_crtc_state;
414 	struct drm_connector *connector;
415 	struct drm_connector_state *new_conn_state;
416 	int i;
417 	int ret;
418 
419 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
420 		if (!new_crtc_state->mode_changed &&
421 		    !new_crtc_state->connectors_changed)
422 			continue;
423 
424 		drm_mode_copy(&new_crtc_state->adjusted_mode, &new_crtc_state->mode);
425 	}
426 
427 	for_each_new_connector_in_state(state, connector, new_conn_state, i) {
428 		const struct drm_encoder_helper_funcs *funcs;
429 		struct drm_encoder *encoder;
430 		struct drm_bridge *bridge;
431 
432 		WARN_ON(!!new_conn_state->best_encoder != !!new_conn_state->crtc);
433 
434 		if (!new_conn_state->crtc || !new_conn_state->best_encoder)
435 			continue;
436 
437 		new_crtc_state =
438 			drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
439 
440 		/*
441 		 * Each encoder has at most one connector (since we always steal
442 		 * it away), so we won't call ->mode_fixup twice.
443 		 */
444 		encoder = new_conn_state->best_encoder;
445 		funcs = encoder->helper_private;
446 
447 		bridge = drm_bridge_chain_get_first_bridge(encoder);
448 		ret = drm_atomic_bridge_chain_check(bridge,
449 						    new_crtc_state,
450 						    new_conn_state);
451 		if (ret) {
452 			drm_dbg_atomic(encoder->dev, "Bridge atomic check failed\n");
453 			return ret;
454 		}
455 
456 		if (funcs && funcs->atomic_check) {
457 			ret = funcs->atomic_check(encoder, new_crtc_state,
458 						  new_conn_state);
459 			if (ret) {
460 				drm_dbg_atomic(encoder->dev,
461 					       "[ENCODER:%d:%s] check failed\n",
462 					       encoder->base.id, encoder->name);
463 				return ret;
464 			}
465 		} else if (funcs && funcs->mode_fixup) {
466 			ret = funcs->mode_fixup(encoder, &new_crtc_state->mode,
467 						&new_crtc_state->adjusted_mode);
468 			if (!ret) {
469 				drm_dbg_atomic(encoder->dev,
470 					       "[ENCODER:%d:%s] fixup failed\n",
471 					       encoder->base.id, encoder->name);
472 				return -EINVAL;
473 			}
474 		}
475 	}
476 
477 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
478 		const struct drm_crtc_helper_funcs *funcs;
479 
480 		if (!new_crtc_state->enable)
481 			continue;
482 
483 		if (!new_crtc_state->mode_changed &&
484 		    !new_crtc_state->connectors_changed)
485 			continue;
486 
487 		funcs = crtc->helper_private;
488 		if (!funcs || !funcs->mode_fixup)
489 			continue;
490 
491 		ret = funcs->mode_fixup(crtc, &new_crtc_state->mode,
492 					&new_crtc_state->adjusted_mode);
493 		if (!ret) {
494 			drm_dbg_atomic(crtc->dev, "[CRTC:%d:%s] fixup failed\n",
495 				       crtc->base.id, crtc->name);
496 			return -EINVAL;
497 		}
498 	}
499 
500 	return 0;
501 }
502 
503 static enum drm_mode_status mode_valid_path(struct drm_connector *connector,
504 					    struct drm_encoder *encoder,
505 					    struct drm_crtc *crtc,
506 					    const struct drm_display_mode *mode)
507 {
508 	struct drm_bridge *bridge;
509 	enum drm_mode_status ret;
510 
511 	ret = drm_encoder_mode_valid(encoder, mode);
512 	if (ret != MODE_OK) {
513 		drm_dbg_atomic(encoder->dev,
514 			       "[ENCODER:%d:%s] mode_valid() failed\n",
515 			       encoder->base.id, encoder->name);
516 		return ret;
517 	}
518 
519 	bridge = drm_bridge_chain_get_first_bridge(encoder);
520 	ret = drm_bridge_chain_mode_valid(bridge, &connector->display_info,
521 					  mode);
522 	if (ret != MODE_OK) {
523 		drm_dbg_atomic(encoder->dev, "[BRIDGE] mode_valid() failed\n");
524 		return ret;
525 	}
526 
527 	ret = drm_crtc_mode_valid(crtc, mode);
528 	if (ret != MODE_OK) {
529 		drm_dbg_atomic(encoder->dev, "[CRTC:%d:%s] mode_valid() failed\n",
530 			       crtc->base.id, crtc->name);
531 		return ret;
532 	}
533 
534 	return ret;
535 }
536 
537 static int
538 mode_valid(struct drm_atomic_state *state)
539 {
540 	struct drm_connector_state *conn_state;
541 	struct drm_connector *connector;
542 	int i;
543 
544 	for_each_new_connector_in_state(state, connector, conn_state, i) {
545 		struct drm_encoder *encoder = conn_state->best_encoder;
546 		struct drm_crtc *crtc = conn_state->crtc;
547 		struct drm_crtc_state *crtc_state;
548 		enum drm_mode_status mode_status;
549 		const struct drm_display_mode *mode;
550 
551 		if (!crtc || !encoder)
552 			continue;
553 
554 		crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
555 		if (!crtc_state)
556 			continue;
557 		if (!crtc_state->mode_changed && !crtc_state->connectors_changed)
558 			continue;
559 
560 		mode = &crtc_state->mode;
561 
562 		mode_status = mode_valid_path(connector, encoder, crtc, mode);
563 		if (mode_status != MODE_OK)
564 			return -EINVAL;
565 	}
566 
567 	return 0;
568 }
569 
570 /**
571  * drm_atomic_helper_check_modeset - validate state object for modeset changes
572  * @dev: DRM device
573  * @state: the driver state object
574  *
575  * Check the state object to see if the requested state is physically possible.
576  * This does all the CRTC and connector related computations for an atomic
577  * update and adds any additional connectors needed for full modesets. It calls
578  * the various per-object callbacks in the follow order:
579  *
580  * 1. &drm_connector_helper_funcs.atomic_best_encoder for determining the new encoder.
581  * 2. &drm_connector_helper_funcs.atomic_check to validate the connector state.
582  * 3. If it's determined a modeset is needed then all connectors on the affected
583  *    CRTC are added and &drm_connector_helper_funcs.atomic_check is run on them.
584  * 4. &drm_encoder_helper_funcs.mode_valid, &drm_bridge_funcs.mode_valid and
585  *    &drm_crtc_helper_funcs.mode_valid are called on the affected components.
586  * 5. &drm_bridge_funcs.mode_fixup is called on all encoder bridges.
587  * 6. &drm_encoder_helper_funcs.atomic_check is called to validate any encoder state.
588  *    This function is only called when the encoder will be part of a configured CRTC,
589  *    it must not be used for implementing connector property validation.
590  *    If this function is NULL, &drm_atomic_encoder_helper_funcs.mode_fixup is called
591  *    instead.
592  * 7. &drm_crtc_helper_funcs.mode_fixup is called last, to fix up the mode with CRTC constraints.
593  *
594  * &drm_crtc_state.mode_changed is set when the input mode is changed.
595  * &drm_crtc_state.connectors_changed is set when a connector is added or
596  * removed from the CRTC.  &drm_crtc_state.active_changed is set when
597  * &drm_crtc_state.active changes, which is used for DPMS.
598  * &drm_crtc_state.no_vblank is set from the result of drm_dev_has_vblank().
599  * See also: drm_atomic_crtc_needs_modeset()
600  *
601  * IMPORTANT:
602  *
603  * Drivers which set &drm_crtc_state.mode_changed (e.g. in their
604  * &drm_plane_helper_funcs.atomic_check hooks if a plane update can't be done
605  * without a full modeset) _must_ call this function after that change. It is
606  * permitted to call this function multiple times for the same update, e.g.
607  * when the &drm_crtc_helper_funcs.atomic_check functions depend upon the
608  * adjusted dotclock for fifo space allocation and watermark computation.
609  *
610  * RETURNS:
611  * Zero for success or -errno
612  */
613 int
614 drm_atomic_helper_check_modeset(struct drm_device *dev,
615 				struct drm_atomic_state *state)
616 {
617 	struct drm_crtc *crtc;
618 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
619 	struct drm_connector *connector;
620 	struct drm_connector_state *old_connector_state, *new_connector_state;
621 	int i, ret;
622 	unsigned int connectors_mask = 0;
623 
624 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
625 		bool has_connectors =
626 			!!new_crtc_state->connector_mask;
627 
628 		WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
629 
630 		if (!drm_mode_equal(&old_crtc_state->mode, &new_crtc_state->mode)) {
631 			drm_dbg_atomic(dev, "[CRTC:%d:%s] mode changed\n",
632 				       crtc->base.id, crtc->name);
633 			new_crtc_state->mode_changed = true;
634 		}
635 
636 		if (old_crtc_state->enable != new_crtc_state->enable) {
637 			drm_dbg_atomic(dev, "[CRTC:%d:%s] enable changed\n",
638 				       crtc->base.id, crtc->name);
639 
640 			/*
641 			 * For clarity this assignment is done here, but
642 			 * enable == 0 is only true when there are no
643 			 * connectors and a NULL mode.
644 			 *
645 			 * The other way around is true as well. enable != 0
646 			 * implies that connectors are attached and a mode is set.
647 			 */
648 			new_crtc_state->mode_changed = true;
649 			new_crtc_state->connectors_changed = true;
650 		}
651 
652 		if (old_crtc_state->active != new_crtc_state->active) {
653 			drm_dbg_atomic(dev, "[CRTC:%d:%s] active changed\n",
654 				       crtc->base.id, crtc->name);
655 			new_crtc_state->active_changed = true;
656 		}
657 
658 		if (new_crtc_state->enable != has_connectors) {
659 			drm_dbg_atomic(dev, "[CRTC:%d:%s] enabled/connectors mismatch\n",
660 				       crtc->base.id, crtc->name);
661 
662 			return -EINVAL;
663 		}
664 
665 		if (drm_dev_has_vblank(dev))
666 			new_crtc_state->no_vblank = false;
667 		else
668 			new_crtc_state->no_vblank = true;
669 	}
670 
671 	ret = handle_conflicting_encoders(state, false);
672 	if (ret)
673 		return ret;
674 
675 	for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) {
676 		const struct drm_connector_helper_funcs *funcs = connector->helper_private;
677 
678 		WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
679 
680 		/*
681 		 * This only sets crtc->connectors_changed for routing changes,
682 		 * drivers must set crtc->connectors_changed themselves when
683 		 * connector properties need to be updated.
684 		 */
685 		ret = update_connector_routing(state, connector,
686 					       old_connector_state,
687 					       new_connector_state);
688 		if (ret)
689 			return ret;
690 		if (old_connector_state->crtc) {
691 			new_crtc_state = drm_atomic_get_new_crtc_state(state,
692 								       old_connector_state->crtc);
693 			if (old_connector_state->link_status !=
694 			    new_connector_state->link_status)
695 				new_crtc_state->connectors_changed = true;
696 
697 			if (old_connector_state->max_requested_bpc !=
698 			    new_connector_state->max_requested_bpc)
699 				new_crtc_state->connectors_changed = true;
700 		}
701 
702 		if (funcs->atomic_check)
703 			ret = funcs->atomic_check(connector, state);
704 		if (ret)
705 			return ret;
706 
707 		connectors_mask |= BIT(i);
708 	}
709 
710 	/*
711 	 * After all the routing has been prepared we need to add in any
712 	 * connector which is itself unchanged, but whose CRTC changes its
713 	 * configuration. This must be done before calling mode_fixup in case a
714 	 * crtc only changed its mode but has the same set of connectors.
715 	 */
716 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
717 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
718 			continue;
719 
720 		drm_dbg_atomic(dev,
721 			       "[CRTC:%d:%s] needs all connectors, enable: %c, active: %c\n",
722 			       crtc->base.id, crtc->name,
723 			       new_crtc_state->enable ? 'y' : 'n',
724 			       new_crtc_state->active ? 'y' : 'n');
725 
726 		ret = drm_atomic_add_affected_connectors(state, crtc);
727 		if (ret != 0)
728 			return ret;
729 
730 		ret = drm_atomic_add_affected_planes(state, crtc);
731 		if (ret != 0)
732 			return ret;
733 	}
734 
735 	/*
736 	 * Iterate over all connectors again, to make sure atomic_check()
737 	 * has been called on them when a modeset is forced.
738 	 */
739 	for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) {
740 		const struct drm_connector_helper_funcs *funcs = connector->helper_private;
741 
742 		if (connectors_mask & BIT(i))
743 			continue;
744 
745 		if (funcs->atomic_check)
746 			ret = funcs->atomic_check(connector, state);
747 		if (ret)
748 			return ret;
749 	}
750 
751 	/*
752 	 * Iterate over all connectors again, and add all affected bridges to
753 	 * the state.
754 	 */
755 	for_each_oldnew_connector_in_state(state, connector,
756 					   old_connector_state,
757 					   new_connector_state, i) {
758 		struct drm_encoder *encoder;
759 
760 		encoder = old_connector_state->best_encoder;
761 		ret = drm_atomic_add_encoder_bridges(state, encoder);
762 		if (ret)
763 			return ret;
764 
765 		encoder = new_connector_state->best_encoder;
766 		ret = drm_atomic_add_encoder_bridges(state, encoder);
767 		if (ret)
768 			return ret;
769 	}
770 
771 	ret = mode_valid(state);
772 	if (ret)
773 		return ret;
774 
775 	return mode_fixup(state);
776 }
777 EXPORT_SYMBOL(drm_atomic_helper_check_modeset);
778 
779 /**
780  * drm_atomic_helper_check_plane_state() - Check plane state for validity
781  * @plane_state: plane state to check
782  * @crtc_state: CRTC state to check
783  * @min_scale: minimum @src:@dest scaling factor in 16.16 fixed point
784  * @max_scale: maximum @src:@dest scaling factor in 16.16 fixed point
785  * @can_position: is it legal to position the plane such that it
786  *                doesn't cover the entire CRTC?  This will generally
787  *                only be false for primary planes.
788  * @can_update_disabled: can the plane be updated while the CRTC
789  *                       is disabled?
790  *
791  * Checks that a desired plane update is valid, and updates various
792  * bits of derived state (clipped coordinates etc.). Drivers that provide
793  * their own plane handling rather than helper-provided implementations may
794  * still wish to call this function to avoid duplication of error checking
795  * code.
796  *
797  * RETURNS:
798  * Zero if update appears valid, error code on failure
799  */
800 int drm_atomic_helper_check_plane_state(struct drm_plane_state *plane_state,
801 					const struct drm_crtc_state *crtc_state,
802 					int min_scale,
803 					int max_scale,
804 					bool can_position,
805 					bool can_update_disabled)
806 {
807 	struct drm_framebuffer *fb = plane_state->fb;
808 	struct drm_rect *src = &plane_state->src;
809 	struct drm_rect *dst = &plane_state->dst;
810 	unsigned int rotation = plane_state->rotation;
811 	struct drm_rect clip = {};
812 	int hscale, vscale;
813 
814 	WARN_ON(plane_state->crtc && plane_state->crtc != crtc_state->crtc);
815 
816 	*src = drm_plane_state_src(plane_state);
817 	*dst = drm_plane_state_dest(plane_state);
818 
819 	if (!fb) {
820 		plane_state->visible = false;
821 		return 0;
822 	}
823 
824 	/* crtc should only be NULL when disabling (i.e., !fb) */
825 	if (WARN_ON(!plane_state->crtc)) {
826 		plane_state->visible = false;
827 		return 0;
828 	}
829 
830 	if (!crtc_state->enable && !can_update_disabled) {
831 		drm_dbg_kms(plane_state->plane->dev,
832 			    "Cannot update plane of a disabled CRTC.\n");
833 		return -EINVAL;
834 	}
835 
836 	drm_rect_rotate(src, fb->width << 16, fb->height << 16, rotation);
837 
838 	/* Check scaling */
839 	hscale = drm_rect_calc_hscale(src, dst, min_scale, max_scale);
840 	vscale = drm_rect_calc_vscale(src, dst, min_scale, max_scale);
841 	if (hscale < 0 || vscale < 0) {
842 		drm_dbg_kms(plane_state->plane->dev,
843 			    "Invalid scaling of plane\n");
844 		drm_rect_debug_print("src: ", &plane_state->src, true);
845 		drm_rect_debug_print("dst: ", &plane_state->dst, false);
846 		return -ERANGE;
847 	}
848 
849 	if (crtc_state->enable)
850 		drm_mode_get_hv_timing(&crtc_state->mode, &clip.x2, &clip.y2);
851 
852 	plane_state->visible = drm_rect_clip_scaled(src, dst, &clip);
853 
854 	drm_rect_rotate_inv(src, fb->width << 16, fb->height << 16, rotation);
855 
856 	if (!plane_state->visible)
857 		/*
858 		 * Plane isn't visible; some drivers can handle this
859 		 * so we just return success here.  Drivers that can't
860 		 * (including those that use the primary plane helper's
861 		 * update function) will return an error from their
862 		 * update_plane handler.
863 		 */
864 		return 0;
865 
866 	if (!can_position && !drm_rect_equals(dst, &clip)) {
867 		drm_dbg_kms(plane_state->plane->dev,
868 			    "Plane must cover entire CRTC\n");
869 		drm_rect_debug_print("dst: ", dst, false);
870 		drm_rect_debug_print("clip: ", &clip, false);
871 		return -EINVAL;
872 	}
873 
874 	return 0;
875 }
876 EXPORT_SYMBOL(drm_atomic_helper_check_plane_state);
877 
878 /**
879  * drm_atomic_helper_check_planes - validate state object for planes changes
880  * @dev: DRM device
881  * @state: the driver state object
882  *
883  * Check the state object to see if the requested state is physically possible.
884  * This does all the plane update related checks using by calling into the
885  * &drm_crtc_helper_funcs.atomic_check and &drm_plane_helper_funcs.atomic_check
886  * hooks provided by the driver.
887  *
888  * It also sets &drm_crtc_state.planes_changed to indicate that a CRTC has
889  * updated planes.
890  *
891  * RETURNS:
892  * Zero for success or -errno
893  */
894 int
895 drm_atomic_helper_check_planes(struct drm_device *dev,
896 			       struct drm_atomic_state *state)
897 {
898 	struct drm_crtc *crtc;
899 	struct drm_crtc_state *new_crtc_state;
900 	struct drm_plane *plane;
901 	struct drm_plane_state *new_plane_state, *old_plane_state;
902 	int i, ret = 0;
903 
904 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
905 		const struct drm_plane_helper_funcs *funcs;
906 
907 		WARN_ON(!drm_modeset_is_locked(&plane->mutex));
908 
909 		funcs = plane->helper_private;
910 
911 		drm_atomic_helper_plane_changed(state, old_plane_state, new_plane_state, plane);
912 
913 		drm_atomic_helper_check_plane_damage(state, new_plane_state);
914 
915 		if (!funcs || !funcs->atomic_check)
916 			continue;
917 
918 		ret = funcs->atomic_check(plane, state);
919 		if (ret) {
920 			drm_dbg_atomic(plane->dev,
921 				       "[PLANE:%d:%s] atomic driver check failed\n",
922 				       plane->base.id, plane->name);
923 			return ret;
924 		}
925 	}
926 
927 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
928 		const struct drm_crtc_helper_funcs *funcs;
929 
930 		funcs = crtc->helper_private;
931 
932 		if (!funcs || !funcs->atomic_check)
933 			continue;
934 
935 		ret = funcs->atomic_check(crtc, state);
936 		if (ret) {
937 			drm_dbg_atomic(crtc->dev,
938 				       "[CRTC:%d:%s] atomic driver check failed\n",
939 				       crtc->base.id, crtc->name);
940 			return ret;
941 		}
942 	}
943 
944 	return ret;
945 }
946 EXPORT_SYMBOL(drm_atomic_helper_check_planes);
947 
948 /**
949  * drm_atomic_helper_check - validate state object
950  * @dev: DRM device
951  * @state: the driver state object
952  *
953  * Check the state object to see if the requested state is physically possible.
954  * Only CRTCs and planes have check callbacks, so for any additional (global)
955  * checking that a driver needs it can simply wrap that around this function.
956  * Drivers without such needs can directly use this as their
957  * &drm_mode_config_funcs.atomic_check callback.
958  *
959  * This just wraps the two parts of the state checking for planes and modeset
960  * state in the default order: First it calls drm_atomic_helper_check_modeset()
961  * and then drm_atomic_helper_check_planes(). The assumption is that the
962  * @drm_plane_helper_funcs.atomic_check and @drm_crtc_helper_funcs.atomic_check
963  * functions depend upon an updated adjusted_mode.clock to e.g. properly compute
964  * watermarks.
965  *
966  * Note that zpos normalization will add all enable planes to the state which
967  * might not desired for some drivers.
968  * For example enable/disable of a cursor plane which have fixed zpos value
969  * would trigger all other enabled planes to be forced to the state change.
970  *
971  * RETURNS:
972  * Zero for success or -errno
973  */
974 int drm_atomic_helper_check(struct drm_device *dev,
975 			    struct drm_atomic_state *state)
976 {
977 	int ret;
978 
979 	ret = drm_atomic_helper_check_modeset(dev, state);
980 	if (ret)
981 		return ret;
982 
983 	if (dev->mode_config.normalize_zpos) {
984 		ret = drm_atomic_normalize_zpos(dev, state);
985 		if (ret)
986 			return ret;
987 	}
988 
989 	ret = drm_atomic_helper_check_planes(dev, state);
990 	if (ret)
991 		return ret;
992 
993 	if (state->legacy_cursor_update)
994 		state->async_update = !drm_atomic_helper_async_check(dev, state);
995 
996 	drm_self_refresh_helper_alter_state(state);
997 
998 	return ret;
999 }
1000 EXPORT_SYMBOL(drm_atomic_helper_check);
1001 
1002 static bool
1003 crtc_needs_disable(struct drm_crtc_state *old_state,
1004 		   struct drm_crtc_state *new_state)
1005 {
1006 	/*
1007 	 * No new_state means the CRTC is off, so the only criteria is whether
1008 	 * it's currently active or in self refresh mode.
1009 	 */
1010 	if (!new_state)
1011 		return drm_atomic_crtc_effectively_active(old_state);
1012 
1013 	/*
1014 	 * We need to run through the crtc_funcs->disable() function if the CRTC
1015 	 * is currently on, if it's transitioning to self refresh mode, or if
1016 	 * it's in self refresh mode and needs to be fully disabled.
1017 	 */
1018 	return old_state->active ||
1019 	       (old_state->self_refresh_active && !new_state->active) ||
1020 	       new_state->self_refresh_active;
1021 }
1022 
1023 static void
1024 disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
1025 {
1026 	struct drm_connector *connector;
1027 	struct drm_connector_state *old_conn_state, *new_conn_state;
1028 	struct drm_crtc *crtc;
1029 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
1030 	int i;
1031 
1032 	for_each_oldnew_connector_in_state(old_state, connector, old_conn_state, new_conn_state, i) {
1033 		const struct drm_encoder_helper_funcs *funcs;
1034 		struct drm_encoder *encoder;
1035 		struct drm_bridge *bridge;
1036 
1037 		/*
1038 		 * Shut down everything that's in the changeset and currently
1039 		 * still on. So need to check the old, saved state.
1040 		 */
1041 		if (!old_conn_state->crtc)
1042 			continue;
1043 
1044 		old_crtc_state = drm_atomic_get_old_crtc_state(old_state, old_conn_state->crtc);
1045 
1046 		if (new_conn_state->crtc)
1047 			new_crtc_state = drm_atomic_get_new_crtc_state(
1048 						old_state,
1049 						new_conn_state->crtc);
1050 		else
1051 			new_crtc_state = NULL;
1052 
1053 		if (!crtc_needs_disable(old_crtc_state, new_crtc_state) ||
1054 		    !drm_atomic_crtc_needs_modeset(old_conn_state->crtc->state))
1055 			continue;
1056 
1057 		encoder = old_conn_state->best_encoder;
1058 
1059 		/* We shouldn't get this far if we didn't previously have
1060 		 * an encoder.. but WARN_ON() rather than explode.
1061 		 */
1062 		if (WARN_ON(!encoder))
1063 			continue;
1064 
1065 		funcs = encoder->helper_private;
1066 
1067 		drm_dbg_atomic(dev, "disabling [ENCODER:%d:%s]\n",
1068 			       encoder->base.id, encoder->name);
1069 
1070 		/*
1071 		 * Each encoder has at most one connector (since we always steal
1072 		 * it away), so we won't call disable hooks twice.
1073 		 */
1074 		bridge = drm_bridge_chain_get_first_bridge(encoder);
1075 		drm_atomic_bridge_chain_disable(bridge, old_state);
1076 
1077 		/* Right function depends upon target state. */
1078 		if (funcs) {
1079 			if (funcs->atomic_disable)
1080 				funcs->atomic_disable(encoder, old_state);
1081 			else if (new_conn_state->crtc && funcs->prepare)
1082 				funcs->prepare(encoder);
1083 			else if (funcs->disable)
1084 				funcs->disable(encoder);
1085 			else if (funcs->dpms)
1086 				funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
1087 		}
1088 
1089 		drm_atomic_bridge_chain_post_disable(bridge, old_state);
1090 	}
1091 
1092 	for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
1093 		const struct drm_crtc_helper_funcs *funcs;
1094 		int ret;
1095 
1096 		/* Shut down everything that needs a full modeset. */
1097 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
1098 			continue;
1099 
1100 		if (!crtc_needs_disable(old_crtc_state, new_crtc_state))
1101 			continue;
1102 
1103 		funcs = crtc->helper_private;
1104 
1105 		drm_dbg_atomic(dev, "disabling [CRTC:%d:%s]\n",
1106 			       crtc->base.id, crtc->name);
1107 
1108 
1109 		/* Right function depends upon target state. */
1110 		if (new_crtc_state->enable && funcs->prepare)
1111 			funcs->prepare(crtc);
1112 		else if (funcs->atomic_disable)
1113 			funcs->atomic_disable(crtc, old_state);
1114 		else if (funcs->disable)
1115 			funcs->disable(crtc);
1116 		else if (funcs->dpms)
1117 			funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
1118 
1119 		if (!drm_dev_has_vblank(dev))
1120 			continue;
1121 
1122 		ret = drm_crtc_vblank_get(crtc);
1123 		WARN_ONCE(ret != -EINVAL, "driver forgot to call drm_crtc_vblank_off()\n");
1124 		if (ret == 0)
1125 			drm_crtc_vblank_put(crtc);
1126 	}
1127 }
1128 
1129 /**
1130  * drm_atomic_helper_update_legacy_modeset_state - update legacy modeset state
1131  * @dev: DRM device
1132  * @old_state: atomic state object with old state structures
1133  *
1134  * This function updates all the various legacy modeset state pointers in
1135  * connectors, encoders and CRTCs.
1136  *
1137  * Drivers can use this for building their own atomic commit if they don't have
1138  * a pure helper-based modeset implementation.
1139  *
1140  * Since these updates are not synchronized with lockings, only code paths
1141  * called from &drm_mode_config_helper_funcs.atomic_commit_tail can look at the
1142  * legacy state filled out by this helper. Defacto this means this helper and
1143  * the legacy state pointers are only really useful for transitioning an
1144  * existing driver to the atomic world.
1145  */
1146 void
1147 drm_atomic_helper_update_legacy_modeset_state(struct drm_device *dev,
1148 					      struct drm_atomic_state *old_state)
1149 {
1150 	struct drm_connector *connector;
1151 	struct drm_connector_state *old_conn_state, *new_conn_state;
1152 	struct drm_crtc *crtc;
1153 	struct drm_crtc_state *new_crtc_state;
1154 	int i;
1155 
1156 	/* clear out existing links and update dpms */
1157 	for_each_oldnew_connector_in_state(old_state, connector, old_conn_state, new_conn_state, i) {
1158 		if (connector->encoder) {
1159 			WARN_ON(!connector->encoder->crtc);
1160 
1161 			connector->encoder->crtc = NULL;
1162 			connector->encoder = NULL;
1163 		}
1164 
1165 		crtc = new_conn_state->crtc;
1166 		if ((!crtc && old_conn_state->crtc) ||
1167 		    (crtc && drm_atomic_crtc_needs_modeset(crtc->state))) {
1168 			int mode = DRM_MODE_DPMS_OFF;
1169 
1170 			if (crtc && crtc->state->active)
1171 				mode = DRM_MODE_DPMS_ON;
1172 
1173 			connector->dpms = mode;
1174 		}
1175 	}
1176 
1177 	/* set new links */
1178 	for_each_new_connector_in_state(old_state, connector, new_conn_state, i) {
1179 		if (!new_conn_state->crtc)
1180 			continue;
1181 
1182 		if (WARN_ON(!new_conn_state->best_encoder))
1183 			continue;
1184 
1185 		connector->encoder = new_conn_state->best_encoder;
1186 		connector->encoder->crtc = new_conn_state->crtc;
1187 	}
1188 
1189 	/* set legacy state in the crtc structure */
1190 	for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i) {
1191 		struct drm_plane *primary = crtc->primary;
1192 		struct drm_plane_state *new_plane_state;
1193 
1194 		crtc->mode = new_crtc_state->mode;
1195 		crtc->enabled = new_crtc_state->enable;
1196 
1197 		new_plane_state =
1198 			drm_atomic_get_new_plane_state(old_state, primary);
1199 
1200 		if (new_plane_state && new_plane_state->crtc == crtc) {
1201 			crtc->x = new_plane_state->src_x >> 16;
1202 			crtc->y = new_plane_state->src_y >> 16;
1203 		}
1204 	}
1205 }
1206 EXPORT_SYMBOL(drm_atomic_helper_update_legacy_modeset_state);
1207 
1208 /**
1209  * drm_atomic_helper_calc_timestamping_constants - update vblank timestamping constants
1210  * @state: atomic state object
1211  *
1212  * Updates the timestamping constants used for precise vblank timestamps
1213  * by calling drm_calc_timestamping_constants() for all enabled crtcs in @state.
1214  */
1215 void drm_atomic_helper_calc_timestamping_constants(struct drm_atomic_state *state)
1216 {
1217 	struct drm_crtc_state *new_crtc_state;
1218 	struct drm_crtc *crtc;
1219 	int i;
1220 
1221 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
1222 		if (new_crtc_state->enable)
1223 			drm_calc_timestamping_constants(crtc,
1224 							&new_crtc_state->adjusted_mode);
1225 	}
1226 }
1227 EXPORT_SYMBOL(drm_atomic_helper_calc_timestamping_constants);
1228 
1229 static void
1230 crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state)
1231 {
1232 	struct drm_crtc *crtc;
1233 	struct drm_crtc_state *new_crtc_state;
1234 	struct drm_connector *connector;
1235 	struct drm_connector_state *new_conn_state;
1236 	int i;
1237 
1238 	for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i) {
1239 		const struct drm_crtc_helper_funcs *funcs;
1240 
1241 		if (!new_crtc_state->mode_changed)
1242 			continue;
1243 
1244 		funcs = crtc->helper_private;
1245 
1246 		if (new_crtc_state->enable && funcs->mode_set_nofb) {
1247 			drm_dbg_atomic(dev, "modeset on [CRTC:%d:%s]\n",
1248 				       crtc->base.id, crtc->name);
1249 
1250 			funcs->mode_set_nofb(crtc);
1251 		}
1252 	}
1253 
1254 	for_each_new_connector_in_state(old_state, connector, new_conn_state, i) {
1255 		const struct drm_encoder_helper_funcs *funcs;
1256 		struct drm_encoder *encoder;
1257 		struct drm_display_mode *mode, *adjusted_mode;
1258 		struct drm_bridge *bridge;
1259 
1260 		if (!new_conn_state->best_encoder)
1261 			continue;
1262 
1263 		encoder = new_conn_state->best_encoder;
1264 		funcs = encoder->helper_private;
1265 		new_crtc_state = new_conn_state->crtc->state;
1266 		mode = &new_crtc_state->mode;
1267 		adjusted_mode = &new_crtc_state->adjusted_mode;
1268 
1269 		if (!new_crtc_state->mode_changed)
1270 			continue;
1271 
1272 		drm_dbg_atomic(dev, "modeset on [ENCODER:%d:%s]\n",
1273 			       encoder->base.id, encoder->name);
1274 
1275 		/*
1276 		 * Each encoder has at most one connector (since we always steal
1277 		 * it away), so we won't call mode_set hooks twice.
1278 		 */
1279 		if (funcs && funcs->atomic_mode_set) {
1280 			funcs->atomic_mode_set(encoder, new_crtc_state,
1281 					       new_conn_state);
1282 		} else if (funcs && funcs->mode_set) {
1283 			funcs->mode_set(encoder, mode, adjusted_mode);
1284 		}
1285 
1286 		bridge = drm_bridge_chain_get_first_bridge(encoder);
1287 		drm_bridge_chain_mode_set(bridge, mode, adjusted_mode);
1288 	}
1289 }
1290 
1291 /**
1292  * drm_atomic_helper_commit_modeset_disables - modeset commit to disable outputs
1293  * @dev: DRM device
1294  * @old_state: atomic state object with old state structures
1295  *
1296  * This function shuts down all the outputs that need to be shut down and
1297  * prepares them (if required) with the new mode.
1298  *
1299  * For compatibility with legacy CRTC helpers this should be called before
1300  * drm_atomic_helper_commit_planes(), which is what the default commit function
1301  * does. But drivers with different needs can group the modeset commits together
1302  * and do the plane commits at the end. This is useful for drivers doing runtime
1303  * PM since planes updates then only happen when the CRTC is actually enabled.
1304  */
1305 void drm_atomic_helper_commit_modeset_disables(struct drm_device *dev,
1306 					       struct drm_atomic_state *old_state)
1307 {
1308 	disable_outputs(dev, old_state);
1309 
1310 	drm_atomic_helper_update_legacy_modeset_state(dev, old_state);
1311 	drm_atomic_helper_calc_timestamping_constants(old_state);
1312 
1313 	crtc_set_mode(dev, old_state);
1314 }
1315 EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_disables);
1316 
1317 static void drm_atomic_helper_commit_writebacks(struct drm_device *dev,
1318 						struct drm_atomic_state *old_state)
1319 {
1320 	struct drm_connector *connector;
1321 	struct drm_connector_state *new_conn_state;
1322 	int i;
1323 
1324 	for_each_new_connector_in_state(old_state, connector, new_conn_state, i) {
1325 		const struct drm_connector_helper_funcs *funcs;
1326 
1327 		funcs = connector->helper_private;
1328 		if (!funcs->atomic_commit)
1329 			continue;
1330 
1331 		if (new_conn_state->writeback_job && new_conn_state->writeback_job->fb) {
1332 			WARN_ON(connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK);
1333 			funcs->atomic_commit(connector, old_state);
1334 		}
1335 	}
1336 }
1337 
1338 /**
1339  * drm_atomic_helper_commit_modeset_enables - modeset commit to enable outputs
1340  * @dev: DRM device
1341  * @old_state: atomic state object with old state structures
1342  *
1343  * This function enables all the outputs with the new configuration which had to
1344  * be turned off for the update.
1345  *
1346  * For compatibility with legacy CRTC helpers this should be called after
1347  * drm_atomic_helper_commit_planes(), which is what the default commit function
1348  * does. But drivers with different needs can group the modeset commits together
1349  * and do the plane commits at the end. This is useful for drivers doing runtime
1350  * PM since planes updates then only happen when the CRTC is actually enabled.
1351  */
1352 void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
1353 					      struct drm_atomic_state *old_state)
1354 {
1355 	struct drm_crtc *crtc;
1356 	struct drm_crtc_state *old_crtc_state;
1357 	struct drm_crtc_state *new_crtc_state;
1358 	struct drm_connector *connector;
1359 	struct drm_connector_state *new_conn_state;
1360 	int i;
1361 
1362 	for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
1363 		const struct drm_crtc_helper_funcs *funcs;
1364 
1365 		/* Need to filter out CRTCs where only planes change. */
1366 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
1367 			continue;
1368 
1369 		if (!new_crtc_state->active)
1370 			continue;
1371 
1372 		funcs = crtc->helper_private;
1373 
1374 		if (new_crtc_state->enable) {
1375 			drm_dbg_atomic(dev, "enabling [CRTC:%d:%s]\n",
1376 				       crtc->base.id, crtc->name);
1377 			if (funcs->atomic_enable)
1378 				funcs->atomic_enable(crtc, old_state);
1379 			else if (funcs->commit)
1380 				funcs->commit(crtc);
1381 		}
1382 	}
1383 
1384 	for_each_new_connector_in_state(old_state, connector, new_conn_state, i) {
1385 		const struct drm_encoder_helper_funcs *funcs;
1386 		struct drm_encoder *encoder;
1387 		struct drm_bridge *bridge;
1388 
1389 		if (!new_conn_state->best_encoder)
1390 			continue;
1391 
1392 		if (!new_conn_state->crtc->state->active ||
1393 		    !drm_atomic_crtc_needs_modeset(new_conn_state->crtc->state))
1394 			continue;
1395 
1396 		encoder = new_conn_state->best_encoder;
1397 		funcs = encoder->helper_private;
1398 
1399 		drm_dbg_atomic(dev, "enabling [ENCODER:%d:%s]\n",
1400 			       encoder->base.id, encoder->name);
1401 
1402 		/*
1403 		 * Each encoder has at most one connector (since we always steal
1404 		 * it away), so we won't call enable hooks twice.
1405 		 */
1406 		bridge = drm_bridge_chain_get_first_bridge(encoder);
1407 		drm_atomic_bridge_chain_pre_enable(bridge, old_state);
1408 
1409 		if (funcs) {
1410 			if (funcs->atomic_enable)
1411 				funcs->atomic_enable(encoder, old_state);
1412 			else if (funcs->enable)
1413 				funcs->enable(encoder);
1414 			else if (funcs->commit)
1415 				funcs->commit(encoder);
1416 		}
1417 
1418 		drm_atomic_bridge_chain_enable(bridge, old_state);
1419 	}
1420 
1421 	drm_atomic_helper_commit_writebacks(dev, old_state);
1422 }
1423 EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_enables);
1424 
1425 /**
1426  * drm_atomic_helper_wait_for_fences - wait for fences stashed in plane state
1427  * @dev: DRM device
1428  * @state: atomic state object with old state structures
1429  * @pre_swap: If true, do an interruptible wait, and @state is the new state.
1430  *	Otherwise @state is the old state.
1431  *
1432  * For implicit sync, driver should fish the exclusive fence out from the
1433  * incoming fb's and stash it in the drm_plane_state.  This is called after
1434  * drm_atomic_helper_swap_state() so it uses the current plane state (and
1435  * just uses the atomic state to find the changed planes)
1436  *
1437  * Note that @pre_swap is needed since the point where we block for fences moves
1438  * around depending upon whether an atomic commit is blocking or
1439  * non-blocking. For non-blocking commit all waiting needs to happen after
1440  * drm_atomic_helper_swap_state() is called, but for blocking commits we want
1441  * to wait **before** we do anything that can't be easily rolled back. That is
1442  * before we call drm_atomic_helper_swap_state().
1443  *
1444  * Returns zero if success or < 0 if dma_fence_wait() fails.
1445  */
1446 int drm_atomic_helper_wait_for_fences(struct drm_device *dev,
1447 				      struct drm_atomic_state *state,
1448 				      bool pre_swap)
1449 {
1450 	struct drm_plane *plane;
1451 	struct drm_plane_state *new_plane_state;
1452 	int i, ret;
1453 
1454 	for_each_new_plane_in_state(state, plane, new_plane_state, i) {
1455 		if (!new_plane_state->fence)
1456 			continue;
1457 
1458 		WARN_ON(!new_plane_state->fb);
1459 
1460 		/*
1461 		 * If waiting for fences pre-swap (ie: nonblock), userspace can
1462 		 * still interrupt the operation. Instead of blocking until the
1463 		 * timer expires, make the wait interruptible.
1464 		 */
1465 		ret = dma_fence_wait(new_plane_state->fence, pre_swap);
1466 		if (ret)
1467 			return ret;
1468 
1469 		dma_fence_put(new_plane_state->fence);
1470 		new_plane_state->fence = NULL;
1471 	}
1472 
1473 	return 0;
1474 }
1475 EXPORT_SYMBOL(drm_atomic_helper_wait_for_fences);
1476 
1477 /**
1478  * drm_atomic_helper_wait_for_vblanks - wait for vblank on CRTCs
1479  * @dev: DRM device
1480  * @old_state: atomic state object with old state structures
1481  *
1482  * Helper to, after atomic commit, wait for vblanks on all affected
1483  * CRTCs (ie. before cleaning up old framebuffers using
1484  * drm_atomic_helper_cleanup_planes()). It will only wait on CRTCs where the
1485  * framebuffers have actually changed to optimize for the legacy cursor and
1486  * plane update use-case.
1487  *
1488  * Drivers using the nonblocking commit tracking support initialized by calling
1489  * drm_atomic_helper_setup_commit() should look at
1490  * drm_atomic_helper_wait_for_flip_done() as an alternative.
1491  */
1492 void
1493 drm_atomic_helper_wait_for_vblanks(struct drm_device *dev,
1494 		struct drm_atomic_state *old_state)
1495 {
1496 	struct drm_crtc *crtc;
1497 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
1498 	int i, ret;
1499 	unsigned int crtc_mask = 0;
1500 
1501 	 /*
1502 	  * Legacy cursor ioctls are completely unsynced, and userspace
1503 	  * relies on that (by doing tons of cursor updates).
1504 	  */
1505 	if (old_state->legacy_cursor_update)
1506 		return;
1507 
1508 	for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
1509 		if (!new_crtc_state->active)
1510 			continue;
1511 
1512 		ret = drm_crtc_vblank_get(crtc);
1513 		if (ret != 0)
1514 			continue;
1515 
1516 		crtc_mask |= drm_crtc_mask(crtc);
1517 		old_state->crtcs[i].last_vblank_count = drm_crtc_vblank_count(crtc);
1518 	}
1519 
1520 	for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
1521 		if (!(crtc_mask & drm_crtc_mask(crtc)))
1522 			continue;
1523 
1524 		ret = wait_event_timeout(dev->vblank[i].queue,
1525 				old_state->crtcs[i].last_vblank_count !=
1526 					drm_crtc_vblank_count(crtc),
1527 				msecs_to_jiffies(100));
1528 
1529 		WARN(!ret, "[CRTC:%d:%s] vblank wait timed out\n",
1530 		     crtc->base.id, crtc->name);
1531 
1532 		drm_crtc_vblank_put(crtc);
1533 	}
1534 }
1535 EXPORT_SYMBOL(drm_atomic_helper_wait_for_vblanks);
1536 
1537 /**
1538  * drm_atomic_helper_wait_for_flip_done - wait for all page flips to be done
1539  * @dev: DRM device
1540  * @old_state: atomic state object with old state structures
1541  *
1542  * Helper to, after atomic commit, wait for page flips on all affected
1543  * crtcs (ie. before cleaning up old framebuffers using
1544  * drm_atomic_helper_cleanup_planes()). Compared to
1545  * drm_atomic_helper_wait_for_vblanks() this waits for the completion on all
1546  * CRTCs, assuming that cursors-only updates are signalling their completion
1547  * immediately (or using a different path).
1548  *
1549  * This requires that drivers use the nonblocking commit tracking support
1550  * initialized using drm_atomic_helper_setup_commit().
1551  */
1552 void drm_atomic_helper_wait_for_flip_done(struct drm_device *dev,
1553 					  struct drm_atomic_state *old_state)
1554 {
1555 	struct drm_crtc *crtc;
1556 	int i;
1557 
1558 	for (i = 0; i < dev->mode_config.num_crtc; i++) {
1559 		struct drm_crtc_commit *commit = old_state->crtcs[i].commit;
1560 		int ret;
1561 
1562 		crtc = old_state->crtcs[i].ptr;
1563 
1564 		if (!crtc || !commit)
1565 			continue;
1566 
1567 		ret = wait_for_completion_timeout(&commit->flip_done, 10 * HZ);
1568 		if (ret == 0)
1569 			drm_err(dev, "[CRTC:%d:%s] flip_done timed out\n",
1570 				crtc->base.id, crtc->name);
1571 	}
1572 
1573 	if (old_state->fake_commit)
1574 		complete_all(&old_state->fake_commit->flip_done);
1575 }
1576 EXPORT_SYMBOL(drm_atomic_helper_wait_for_flip_done);
1577 
1578 /**
1579  * drm_atomic_helper_commit_tail - commit atomic update to hardware
1580  * @old_state: atomic state object with old state structures
1581  *
1582  * This is the default implementation for the
1583  * &drm_mode_config_helper_funcs.atomic_commit_tail hook, for drivers
1584  * that do not support runtime_pm or do not need the CRTC to be
1585  * enabled to perform a commit. Otherwise, see
1586  * drm_atomic_helper_commit_tail_rpm().
1587  *
1588  * Note that the default ordering of how the various stages are called is to
1589  * match the legacy modeset helper library closest.
1590  */
1591 void drm_atomic_helper_commit_tail(struct drm_atomic_state *old_state)
1592 {
1593 	struct drm_device *dev = old_state->dev;
1594 
1595 	drm_atomic_helper_commit_modeset_disables(dev, old_state);
1596 
1597 	drm_atomic_helper_commit_planes(dev, old_state, 0);
1598 
1599 	drm_atomic_helper_commit_modeset_enables(dev, old_state);
1600 
1601 	drm_atomic_helper_fake_vblank(old_state);
1602 
1603 	drm_atomic_helper_commit_hw_done(old_state);
1604 
1605 	drm_atomic_helper_wait_for_vblanks(dev, old_state);
1606 
1607 	drm_atomic_helper_cleanup_planes(dev, old_state);
1608 }
1609 EXPORT_SYMBOL(drm_atomic_helper_commit_tail);
1610 
1611 /**
1612  * drm_atomic_helper_commit_tail_rpm - commit atomic update to hardware
1613  * @old_state: new modeset state to be committed
1614  *
1615  * This is an alternative implementation for the
1616  * &drm_mode_config_helper_funcs.atomic_commit_tail hook, for drivers
1617  * that support runtime_pm or need the CRTC to be enabled to perform a
1618  * commit. Otherwise, one should use the default implementation
1619  * drm_atomic_helper_commit_tail().
1620  */
1621 void drm_atomic_helper_commit_tail_rpm(struct drm_atomic_state *old_state)
1622 {
1623 	struct drm_device *dev = old_state->dev;
1624 
1625 	drm_atomic_helper_commit_modeset_disables(dev, old_state);
1626 
1627 	drm_atomic_helper_commit_modeset_enables(dev, old_state);
1628 
1629 	drm_atomic_helper_commit_planes(dev, old_state,
1630 					DRM_PLANE_COMMIT_ACTIVE_ONLY);
1631 
1632 	drm_atomic_helper_fake_vblank(old_state);
1633 
1634 	drm_atomic_helper_commit_hw_done(old_state);
1635 
1636 	drm_atomic_helper_wait_for_vblanks(dev, old_state);
1637 
1638 	drm_atomic_helper_cleanup_planes(dev, old_state);
1639 }
1640 EXPORT_SYMBOL(drm_atomic_helper_commit_tail_rpm);
1641 
1642 static void commit_tail(struct drm_atomic_state *old_state)
1643 {
1644 	struct drm_device *dev = old_state->dev;
1645 	const struct drm_mode_config_helper_funcs *funcs;
1646 	struct drm_crtc_state *new_crtc_state;
1647 	struct drm_crtc *crtc;
1648 	ktime_t start;
1649 	s64 commit_time_ms;
1650 	unsigned int i, new_self_refresh_mask = 0;
1651 
1652 	funcs = dev->mode_config.helper_private;
1653 
1654 	/*
1655 	 * We're measuring the _entire_ commit, so the time will vary depending
1656 	 * on how many fences and objects are involved. For the purposes of self
1657 	 * refresh, this is desirable since it'll give us an idea of how
1658 	 * congested things are. This will inform our decision on how often we
1659 	 * should enter self refresh after idle.
1660 	 *
1661 	 * These times will be averaged out in the self refresh helpers to avoid
1662 	 * overreacting over one outlier frame
1663 	 */
1664 	start = ktime_get();
1665 
1666 	drm_atomic_helper_wait_for_fences(dev, old_state, false);
1667 
1668 	drm_atomic_helper_wait_for_dependencies(old_state);
1669 
1670 	/*
1671 	 * We cannot safely access new_crtc_state after
1672 	 * drm_atomic_helper_commit_hw_done() so figure out which crtc's have
1673 	 * self-refresh active beforehand:
1674 	 */
1675 	for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i)
1676 		if (new_crtc_state->self_refresh_active)
1677 			new_self_refresh_mask |= BIT(i);
1678 
1679 	if (funcs && funcs->atomic_commit_tail)
1680 		funcs->atomic_commit_tail(old_state);
1681 	else
1682 		drm_atomic_helper_commit_tail(old_state);
1683 
1684 	commit_time_ms = ktime_ms_delta(ktime_get(), start);
1685 	if (commit_time_ms > 0)
1686 		drm_self_refresh_helper_update_avg_times(old_state,
1687 						 (unsigned long)commit_time_ms,
1688 						 new_self_refresh_mask);
1689 
1690 	drm_atomic_helper_commit_cleanup_done(old_state);
1691 
1692 	drm_atomic_state_put(old_state);
1693 }
1694 
1695 static void commit_work(struct work_struct *work)
1696 {
1697 	struct drm_atomic_state *state = container_of(work,
1698 						      struct drm_atomic_state,
1699 						      commit_work);
1700 	commit_tail(state);
1701 }
1702 
1703 /**
1704  * drm_atomic_helper_async_check - check if state can be committed asynchronously
1705  * @dev: DRM device
1706  * @state: the driver state object
1707  *
1708  * This helper will check if it is possible to commit the state asynchronously.
1709  * Async commits are not supposed to swap the states like normal sync commits
1710  * but just do in-place changes on the current state.
1711  *
1712  * It will return 0 if the commit can happen in an asynchronous fashion or error
1713  * if not. Note that error just mean it can't be committed asynchronously, if it
1714  * fails the commit should be treated like a normal synchronous commit.
1715  */
1716 int drm_atomic_helper_async_check(struct drm_device *dev,
1717 				   struct drm_atomic_state *state)
1718 {
1719 	struct drm_crtc *crtc;
1720 	struct drm_crtc_state *crtc_state;
1721 	struct drm_plane *plane = NULL;
1722 	struct drm_plane_state *old_plane_state = NULL;
1723 	struct drm_plane_state *new_plane_state = NULL;
1724 	const struct drm_plane_helper_funcs *funcs;
1725 	int i, n_planes = 0;
1726 
1727 	for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
1728 		if (drm_atomic_crtc_needs_modeset(crtc_state))
1729 			return -EINVAL;
1730 	}
1731 
1732 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i)
1733 		n_planes++;
1734 
1735 	/* FIXME: we support only single plane updates for now */
1736 	if (n_planes != 1)
1737 		return -EINVAL;
1738 
1739 	if (!new_plane_state->crtc ||
1740 	    old_plane_state->crtc != new_plane_state->crtc)
1741 		return -EINVAL;
1742 
1743 	funcs = plane->helper_private;
1744 	if (!funcs->atomic_async_update)
1745 		return -EINVAL;
1746 
1747 	if (new_plane_state->fence)
1748 		return -EINVAL;
1749 
1750 	/*
1751 	 * Don't do an async update if there is an outstanding commit modifying
1752 	 * the plane.  This prevents our async update's changes from getting
1753 	 * overridden by a previous synchronous update's state.
1754 	 */
1755 	if (old_plane_state->commit &&
1756 	    !try_wait_for_completion(&old_plane_state->commit->hw_done)) {
1757 		drm_dbg_atomic(dev,
1758 			       "[PLANE:%d:%s] inflight previous commit preventing async commit\n",
1759 			       plane->base.id, plane->name);
1760 		return -EBUSY;
1761 	}
1762 
1763 	return funcs->atomic_async_check(plane, state);
1764 }
1765 EXPORT_SYMBOL(drm_atomic_helper_async_check);
1766 
1767 /**
1768  * drm_atomic_helper_async_commit - commit state asynchronously
1769  * @dev: DRM device
1770  * @state: the driver state object
1771  *
1772  * This function commits a state asynchronously, i.e., not vblank
1773  * synchronized. It should be used on a state only when
1774  * drm_atomic_async_check() succeeds. Async commits are not supposed to swap
1775  * the states like normal sync commits, but just do in-place changes on the
1776  * current state.
1777  *
1778  * TODO: Implement full swap instead of doing in-place changes.
1779  */
1780 void drm_atomic_helper_async_commit(struct drm_device *dev,
1781 				    struct drm_atomic_state *state)
1782 {
1783 	struct drm_plane *plane;
1784 	struct drm_plane_state *plane_state;
1785 	const struct drm_plane_helper_funcs *funcs;
1786 	int i;
1787 
1788 	for_each_new_plane_in_state(state, plane, plane_state, i) {
1789 		struct drm_framebuffer *new_fb = plane_state->fb;
1790 		struct drm_framebuffer *old_fb = plane->state->fb;
1791 
1792 		funcs = plane->helper_private;
1793 		funcs->atomic_async_update(plane, state);
1794 
1795 		/*
1796 		 * ->atomic_async_update() is supposed to update the
1797 		 * plane->state in-place, make sure at least common
1798 		 * properties have been properly updated.
1799 		 */
1800 		WARN_ON_ONCE(plane->state->fb != new_fb);
1801 		WARN_ON_ONCE(plane->state->crtc_x != plane_state->crtc_x);
1802 		WARN_ON_ONCE(plane->state->crtc_y != plane_state->crtc_y);
1803 		WARN_ON_ONCE(plane->state->src_x != plane_state->src_x);
1804 		WARN_ON_ONCE(plane->state->src_y != plane_state->src_y);
1805 
1806 		/*
1807 		 * Make sure the FBs have been swapped so that cleanups in the
1808 		 * new_state performs a cleanup in the old FB.
1809 		 */
1810 		WARN_ON_ONCE(plane_state->fb != old_fb);
1811 	}
1812 }
1813 EXPORT_SYMBOL(drm_atomic_helper_async_commit);
1814 
1815 /**
1816  * drm_atomic_helper_commit - commit validated state object
1817  * @dev: DRM device
1818  * @state: the driver state object
1819  * @nonblock: whether nonblocking behavior is requested.
1820  *
1821  * This function commits a with drm_atomic_helper_check() pre-validated state
1822  * object. This can still fail when e.g. the framebuffer reservation fails. This
1823  * function implements nonblocking commits, using
1824  * drm_atomic_helper_setup_commit() and related functions.
1825  *
1826  * Committing the actual hardware state is done through the
1827  * &drm_mode_config_helper_funcs.atomic_commit_tail callback, or its default
1828  * implementation drm_atomic_helper_commit_tail().
1829  *
1830  * RETURNS:
1831  * Zero for success or -errno.
1832  */
1833 int drm_atomic_helper_commit(struct drm_device *dev,
1834 			     struct drm_atomic_state *state,
1835 			     bool nonblock)
1836 {
1837 	int ret;
1838 
1839 	if (state->async_update) {
1840 		ret = drm_atomic_helper_prepare_planes(dev, state);
1841 		if (ret)
1842 			return ret;
1843 
1844 		drm_atomic_helper_async_commit(dev, state);
1845 		drm_atomic_helper_cleanup_planes(dev, state);
1846 
1847 		return 0;
1848 	}
1849 
1850 	ret = drm_atomic_helper_setup_commit(state, nonblock);
1851 	if (ret)
1852 		return ret;
1853 
1854 	INIT_WORK(&state->commit_work, commit_work);
1855 
1856 	ret = drm_atomic_helper_prepare_planes(dev, state);
1857 	if (ret)
1858 		return ret;
1859 
1860 	if (!nonblock) {
1861 		ret = drm_atomic_helper_wait_for_fences(dev, state, true);
1862 		if (ret)
1863 			goto err;
1864 	}
1865 
1866 	/*
1867 	 * This is the point of no return - everything below never fails except
1868 	 * when the hw goes bonghits. Which means we can commit the new state on
1869 	 * the software side now.
1870 	 */
1871 
1872 	ret = drm_atomic_helper_swap_state(state, true);
1873 	if (ret)
1874 		goto err;
1875 
1876 	/*
1877 	 * Everything below can be run asynchronously without the need to grab
1878 	 * any modeset locks at all under one condition: It must be guaranteed
1879 	 * that the asynchronous work has either been cancelled (if the driver
1880 	 * supports it, which at least requires that the framebuffers get
1881 	 * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
1882 	 * before the new state gets committed on the software side with
1883 	 * drm_atomic_helper_swap_state().
1884 	 *
1885 	 * This scheme allows new atomic state updates to be prepared and
1886 	 * checked in parallel to the asynchronous completion of the previous
1887 	 * update. Which is important since compositors need to figure out the
1888 	 * composition of the next frame right after having submitted the
1889 	 * current layout.
1890 	 *
1891 	 * NOTE: Commit work has multiple phases, first hardware commit, then
1892 	 * cleanup. We want them to overlap, hence need system_unbound_wq to
1893 	 * make sure work items don't artificially stall on each another.
1894 	 */
1895 
1896 	drm_atomic_state_get(state);
1897 	if (nonblock)
1898 		queue_work(system_unbound_wq, &state->commit_work);
1899 	else
1900 		commit_tail(state);
1901 
1902 	return 0;
1903 
1904 err:
1905 	drm_atomic_helper_cleanup_planes(dev, state);
1906 	return ret;
1907 }
1908 EXPORT_SYMBOL(drm_atomic_helper_commit);
1909 
1910 /**
1911  * DOC: implementing nonblocking commit
1912  *
1913  * Nonblocking atomic commits should use struct &drm_crtc_commit to sequence
1914  * different operations against each another. Locks, especially struct
1915  * &drm_modeset_lock, should not be held in worker threads or any other
1916  * asynchronous context used to commit the hardware state.
1917  *
1918  * drm_atomic_helper_commit() implements the recommended sequence for
1919  * nonblocking commits, using drm_atomic_helper_setup_commit() internally:
1920  *
1921  * 1. Run drm_atomic_helper_prepare_planes(). Since this can fail and we
1922  * need to propagate out of memory/VRAM errors to userspace, it must be called
1923  * synchronously.
1924  *
1925  * 2. Synchronize with any outstanding nonblocking commit worker threads which
1926  * might be affected by the new state update. This is handled by
1927  * drm_atomic_helper_setup_commit().
1928  *
1929  * Asynchronous workers need to have sufficient parallelism to be able to run
1930  * different atomic commits on different CRTCs in parallel. The simplest way to
1931  * achieve this is by running them on the &system_unbound_wq work queue. Note
1932  * that drivers are not required to split up atomic commits and run an
1933  * individual commit in parallel - userspace is supposed to do that if it cares.
1934  * But it might be beneficial to do that for modesets, since those necessarily
1935  * must be done as one global operation, and enabling or disabling a CRTC can
1936  * take a long time. But even that is not required.
1937  *
1938  * IMPORTANT: A &drm_atomic_state update for multiple CRTCs is sequenced
1939  * against all CRTCs therein. Therefore for atomic state updates which only flip
1940  * planes the driver must not get the struct &drm_crtc_state of unrelated CRTCs
1941  * in its atomic check code: This would prevent committing of atomic updates to
1942  * multiple CRTCs in parallel. In general, adding additional state structures
1943  * should be avoided as much as possible, because this reduces parallelism in
1944  * (nonblocking) commits, both due to locking and due to commit sequencing
1945  * requirements.
1946  *
1947  * 3. The software state is updated synchronously with
1948  * drm_atomic_helper_swap_state(). Doing this under the protection of all modeset
1949  * locks means concurrent callers never see inconsistent state. Note that commit
1950  * workers do not hold any locks; their access is only coordinated through
1951  * ordering. If workers would access state only through the pointers in the
1952  * free-standing state objects (currently not the case for any driver) then even
1953  * multiple pending commits could be in-flight at the same time.
1954  *
1955  * 4. Schedule a work item to do all subsequent steps, using the split-out
1956  * commit helpers: a) pre-plane commit b) plane commit c) post-plane commit and
1957  * then cleaning up the framebuffers after the old framebuffer is no longer
1958  * being displayed. The scheduled work should synchronize against other workers
1959  * using the &drm_crtc_commit infrastructure as needed. See
1960  * drm_atomic_helper_setup_commit() for more details.
1961  */
1962 
1963 static int stall_checks(struct drm_crtc *crtc, bool nonblock)
1964 {
1965 	struct drm_crtc_commit *commit, *stall_commit = NULL;
1966 	bool completed = true;
1967 	int i;
1968 	long ret = 0;
1969 
1970 	spin_lock(&crtc->commit_lock);
1971 	i = 0;
1972 	list_for_each_entry(commit, &crtc->commit_list, commit_entry) {
1973 		if (i == 0) {
1974 			completed = try_wait_for_completion(&commit->flip_done);
1975 			/*
1976 			 * Userspace is not allowed to get ahead of the previous
1977 			 * commit with nonblocking ones.
1978 			 */
1979 			if (!completed && nonblock) {
1980 				spin_unlock(&crtc->commit_lock);
1981 				drm_dbg_atomic(crtc->dev,
1982 					       "[CRTC:%d:%s] busy with a previous commit\n",
1983 					       crtc->base.id, crtc->name);
1984 
1985 				return -EBUSY;
1986 			}
1987 		} else if (i == 1) {
1988 			stall_commit = drm_crtc_commit_get(commit);
1989 			break;
1990 		}
1991 
1992 		i++;
1993 	}
1994 	spin_unlock(&crtc->commit_lock);
1995 
1996 	if (!stall_commit)
1997 		return 0;
1998 
1999 	/* We don't want to let commits get ahead of cleanup work too much,
2000 	 * stalling on 2nd previous commit means triple-buffer won't ever stall.
2001 	 */
2002 	ret = wait_for_completion_interruptible_timeout(&stall_commit->cleanup_done,
2003 							10*HZ);
2004 	if (ret == 0)
2005 		drm_err(crtc->dev, "[CRTC:%d:%s] cleanup_done timed out\n",
2006 			crtc->base.id, crtc->name);
2007 
2008 	drm_crtc_commit_put(stall_commit);
2009 
2010 	return ret < 0 ? ret : 0;
2011 }
2012 
2013 static void release_crtc_commit(struct completion *completion)
2014 {
2015 	struct drm_crtc_commit *commit = container_of(completion,
2016 						      typeof(*commit),
2017 						      flip_done);
2018 
2019 	drm_crtc_commit_put(commit);
2020 }
2021 
2022 static void init_commit(struct drm_crtc_commit *commit, struct drm_crtc *crtc)
2023 {
2024 	init_completion(&commit->flip_done);
2025 	init_completion(&commit->hw_done);
2026 	init_completion(&commit->cleanup_done);
2027 	INIT_LIST_HEAD(&commit->commit_entry);
2028 	kref_init(&commit->ref);
2029 	commit->crtc = crtc;
2030 }
2031 
2032 static struct drm_crtc_commit *
2033 crtc_or_fake_commit(struct drm_atomic_state *state, struct drm_crtc *crtc)
2034 {
2035 	if (crtc) {
2036 		struct drm_crtc_state *new_crtc_state;
2037 
2038 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
2039 
2040 		return new_crtc_state->commit;
2041 	}
2042 
2043 	if (!state->fake_commit) {
2044 		state->fake_commit = kzalloc(sizeof(*state->fake_commit), GFP_KERNEL);
2045 		if (!state->fake_commit)
2046 			return NULL;
2047 
2048 		init_commit(state->fake_commit, NULL);
2049 	}
2050 
2051 	return state->fake_commit;
2052 }
2053 
2054 /**
2055  * drm_atomic_helper_setup_commit - setup possibly nonblocking commit
2056  * @state: new modeset state to be committed
2057  * @nonblock: whether nonblocking behavior is requested.
2058  *
2059  * This function prepares @state to be used by the atomic helper's support for
2060  * nonblocking commits. Drivers using the nonblocking commit infrastructure
2061  * should always call this function from their
2062  * &drm_mode_config_funcs.atomic_commit hook.
2063  *
2064  * Drivers that need to extend the commit setup to private objects can use the
2065  * &drm_mode_config_helper_funcs.atomic_commit_setup hook.
2066  *
2067  * To be able to use this support drivers need to use a few more helper
2068  * functions. drm_atomic_helper_wait_for_dependencies() must be called before
2069  * actually committing the hardware state, and for nonblocking commits this call
2070  * must be placed in the async worker. See also drm_atomic_helper_swap_state()
2071  * and its stall parameter, for when a driver's commit hooks look at the
2072  * &drm_crtc.state, &drm_plane.state or &drm_connector.state pointer directly.
2073  *
2074  * Completion of the hardware commit step must be signalled using
2075  * drm_atomic_helper_commit_hw_done(). After this step the driver is not allowed
2076  * to read or change any permanent software or hardware modeset state. The only
2077  * exception is state protected by other means than &drm_modeset_lock locks.
2078  * Only the free standing @state with pointers to the old state structures can
2079  * be inspected, e.g. to clean up old buffers using
2080  * drm_atomic_helper_cleanup_planes().
2081  *
2082  * At the very end, before cleaning up @state drivers must call
2083  * drm_atomic_helper_commit_cleanup_done().
2084  *
2085  * This is all implemented by in drm_atomic_helper_commit(), giving drivers a
2086  * complete and easy-to-use default implementation of the atomic_commit() hook.
2087  *
2088  * The tracking of asynchronously executed and still pending commits is done
2089  * using the core structure &drm_crtc_commit.
2090  *
2091  * By default there's no need to clean up resources allocated by this function
2092  * explicitly: drm_atomic_state_default_clear() will take care of that
2093  * automatically.
2094  *
2095  * Returns:
2096  *
2097  * 0 on success. -EBUSY when userspace schedules nonblocking commits too fast,
2098  * -ENOMEM on allocation failures and -EINTR when a signal is pending.
2099  */
2100 int drm_atomic_helper_setup_commit(struct drm_atomic_state *state,
2101 				   bool nonblock)
2102 {
2103 	struct drm_crtc *crtc;
2104 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
2105 	struct drm_connector *conn;
2106 	struct drm_connector_state *old_conn_state, *new_conn_state;
2107 	struct drm_plane *plane;
2108 	struct drm_plane_state *old_plane_state, *new_plane_state;
2109 	struct drm_crtc_commit *commit;
2110 	const struct drm_mode_config_helper_funcs *funcs;
2111 	int i, ret;
2112 
2113 	funcs = state->dev->mode_config.helper_private;
2114 
2115 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
2116 		commit = kzalloc(sizeof(*commit), GFP_KERNEL);
2117 		if (!commit)
2118 			return -ENOMEM;
2119 
2120 		init_commit(commit, crtc);
2121 
2122 		new_crtc_state->commit = commit;
2123 
2124 		ret = stall_checks(crtc, nonblock);
2125 		if (ret)
2126 			return ret;
2127 
2128 		/*
2129 		 * Drivers only send out events when at least either current or
2130 		 * new CRTC state is active. Complete right away if everything
2131 		 * stays off.
2132 		 */
2133 		if (!old_crtc_state->active && !new_crtc_state->active) {
2134 			complete_all(&commit->flip_done);
2135 			continue;
2136 		}
2137 
2138 		/* Legacy cursor updates are fully unsynced. */
2139 		if (state->legacy_cursor_update) {
2140 			complete_all(&commit->flip_done);
2141 			continue;
2142 		}
2143 
2144 		if (!new_crtc_state->event) {
2145 			commit->event = kzalloc(sizeof(*commit->event),
2146 						GFP_KERNEL);
2147 			if (!commit->event)
2148 				return -ENOMEM;
2149 
2150 			new_crtc_state->event = commit->event;
2151 		}
2152 
2153 		new_crtc_state->event->base.completion = &commit->flip_done;
2154 		new_crtc_state->event->base.completion_release = release_crtc_commit;
2155 		drm_crtc_commit_get(commit);
2156 
2157 		commit->abort_completion = true;
2158 
2159 		state->crtcs[i].commit = commit;
2160 		drm_crtc_commit_get(commit);
2161 	}
2162 
2163 	for_each_oldnew_connector_in_state(state, conn, old_conn_state, new_conn_state, i) {
2164 		/*
2165 		 * Userspace is not allowed to get ahead of the previous
2166 		 * commit with nonblocking ones.
2167 		 */
2168 		if (nonblock && old_conn_state->commit &&
2169 		    !try_wait_for_completion(&old_conn_state->commit->flip_done)) {
2170 			drm_dbg_atomic(conn->dev,
2171 				       "[CONNECTOR:%d:%s] busy with a previous commit\n",
2172 				       conn->base.id, conn->name);
2173 
2174 			return -EBUSY;
2175 		}
2176 
2177 		/* Always track connectors explicitly for e.g. link retraining. */
2178 		commit = crtc_or_fake_commit(state, new_conn_state->crtc ?: old_conn_state->crtc);
2179 		if (!commit)
2180 			return -ENOMEM;
2181 
2182 		new_conn_state->commit = drm_crtc_commit_get(commit);
2183 	}
2184 
2185 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
2186 		/*
2187 		 * Userspace is not allowed to get ahead of the previous
2188 		 * commit with nonblocking ones.
2189 		 */
2190 		if (nonblock && old_plane_state->commit &&
2191 		    !try_wait_for_completion(&old_plane_state->commit->flip_done)) {
2192 			drm_dbg_atomic(plane->dev,
2193 				       "[PLANE:%d:%s] busy with a previous commit\n",
2194 				       plane->base.id, plane->name);
2195 
2196 			return -EBUSY;
2197 		}
2198 
2199 		/* Always track planes explicitly for async pageflip support. */
2200 		commit = crtc_or_fake_commit(state, new_plane_state->crtc ?: old_plane_state->crtc);
2201 		if (!commit)
2202 			return -ENOMEM;
2203 
2204 		new_plane_state->commit = drm_crtc_commit_get(commit);
2205 	}
2206 
2207 	if (funcs && funcs->atomic_commit_setup)
2208 		return funcs->atomic_commit_setup(state);
2209 
2210 	return 0;
2211 }
2212 EXPORT_SYMBOL(drm_atomic_helper_setup_commit);
2213 
2214 /**
2215  * drm_atomic_helper_wait_for_dependencies - wait for required preceeding commits
2216  * @old_state: atomic state object with old state structures
2217  *
2218  * This function waits for all preceeding commits that touch the same CRTC as
2219  * @old_state to both be committed to the hardware (as signalled by
2220  * drm_atomic_helper_commit_hw_done()) and executed by the hardware (as signalled
2221  * by calling drm_crtc_send_vblank_event() on the &drm_crtc_state.event).
2222  *
2223  * This is part of the atomic helper support for nonblocking commits, see
2224  * drm_atomic_helper_setup_commit() for an overview.
2225  */
2226 void drm_atomic_helper_wait_for_dependencies(struct drm_atomic_state *old_state)
2227 {
2228 	struct drm_crtc *crtc;
2229 	struct drm_crtc_state *old_crtc_state;
2230 	struct drm_plane *plane;
2231 	struct drm_plane_state *old_plane_state;
2232 	struct drm_connector *conn;
2233 	struct drm_connector_state *old_conn_state;
2234 	int i;
2235 	long ret;
2236 
2237 	for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
2238 		ret = drm_crtc_commit_wait(old_crtc_state->commit);
2239 		if (ret)
2240 			drm_err(crtc->dev,
2241 				"[CRTC:%d:%s] commit wait timed out\n",
2242 				crtc->base.id, crtc->name);
2243 	}
2244 
2245 	for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
2246 		ret = drm_crtc_commit_wait(old_conn_state->commit);
2247 		if (ret)
2248 			drm_err(conn->dev,
2249 				"[CONNECTOR:%d:%s] commit wait timed out\n",
2250 				conn->base.id, conn->name);
2251 	}
2252 
2253 	for_each_old_plane_in_state(old_state, plane, old_plane_state, i) {
2254 		ret = drm_crtc_commit_wait(old_plane_state->commit);
2255 		if (ret)
2256 			drm_err(plane->dev,
2257 				"[PLANE:%d:%s] commit wait timed out\n",
2258 				plane->base.id, plane->name);
2259 	}
2260 }
2261 EXPORT_SYMBOL(drm_atomic_helper_wait_for_dependencies);
2262 
2263 /**
2264  * drm_atomic_helper_fake_vblank - fake VBLANK events if needed
2265  * @old_state: atomic state object with old state structures
2266  *
2267  * This function walks all CRTCs and fakes VBLANK events on those with
2268  * &drm_crtc_state.no_vblank set to true and &drm_crtc_state.event != NULL.
2269  * The primary use of this function is writeback connectors working in oneshot
2270  * mode and faking VBLANK events. In this case they only fake the VBLANK event
2271  * when a job is queued, and any change to the pipeline that does not touch the
2272  * connector is leading to timeouts when calling
2273  * drm_atomic_helper_wait_for_vblanks() or
2274  * drm_atomic_helper_wait_for_flip_done(). In addition to writeback
2275  * connectors, this function can also fake VBLANK events for CRTCs without
2276  * VBLANK interrupt.
2277  *
2278  * This is part of the atomic helper support for nonblocking commits, see
2279  * drm_atomic_helper_setup_commit() for an overview.
2280  */
2281 void drm_atomic_helper_fake_vblank(struct drm_atomic_state *old_state)
2282 {
2283 	struct drm_crtc_state *new_crtc_state;
2284 	struct drm_crtc *crtc;
2285 	int i;
2286 
2287 	for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i) {
2288 		unsigned long flags;
2289 
2290 		if (!new_crtc_state->no_vblank)
2291 			continue;
2292 
2293 		spin_lock_irqsave(&old_state->dev->event_lock, flags);
2294 		if (new_crtc_state->event) {
2295 			drm_crtc_send_vblank_event(crtc,
2296 						   new_crtc_state->event);
2297 			new_crtc_state->event = NULL;
2298 		}
2299 		spin_unlock_irqrestore(&old_state->dev->event_lock, flags);
2300 	}
2301 }
2302 EXPORT_SYMBOL(drm_atomic_helper_fake_vblank);
2303 
2304 /**
2305  * drm_atomic_helper_commit_hw_done - setup possible nonblocking commit
2306  * @old_state: atomic state object with old state structures
2307  *
2308  * This function is used to signal completion of the hardware commit step. After
2309  * this step the driver is not allowed to read or change any permanent software
2310  * or hardware modeset state. The only exception is state protected by other
2311  * means than &drm_modeset_lock locks.
2312  *
2313  * Drivers should try to postpone any expensive or delayed cleanup work after
2314  * this function is called.
2315  *
2316  * This is part of the atomic helper support for nonblocking commits, see
2317  * drm_atomic_helper_setup_commit() for an overview.
2318  */
2319 void drm_atomic_helper_commit_hw_done(struct drm_atomic_state *old_state)
2320 {
2321 	struct drm_crtc *crtc;
2322 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
2323 	struct drm_crtc_commit *commit;
2324 	int i;
2325 
2326 	for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
2327 		commit = new_crtc_state->commit;
2328 		if (!commit)
2329 			continue;
2330 
2331 		/*
2332 		 * copy new_crtc_state->commit to old_crtc_state->commit,
2333 		 * it's unsafe to touch new_crtc_state after hw_done,
2334 		 * but we still need to do so in cleanup_done().
2335 		 */
2336 		if (old_crtc_state->commit)
2337 			drm_crtc_commit_put(old_crtc_state->commit);
2338 
2339 		old_crtc_state->commit = drm_crtc_commit_get(commit);
2340 
2341 		/* backend must have consumed any event by now */
2342 		WARN_ON(new_crtc_state->event);
2343 		complete_all(&commit->hw_done);
2344 	}
2345 
2346 	if (old_state->fake_commit) {
2347 		complete_all(&old_state->fake_commit->hw_done);
2348 		complete_all(&old_state->fake_commit->flip_done);
2349 	}
2350 }
2351 EXPORT_SYMBOL(drm_atomic_helper_commit_hw_done);
2352 
2353 /**
2354  * drm_atomic_helper_commit_cleanup_done - signal completion of commit
2355  * @old_state: atomic state object with old state structures
2356  *
2357  * This signals completion of the atomic update @old_state, including any
2358  * cleanup work. If used, it must be called right before calling
2359  * drm_atomic_state_put().
2360  *
2361  * This is part of the atomic helper support for nonblocking commits, see
2362  * drm_atomic_helper_setup_commit() for an overview.
2363  */
2364 void drm_atomic_helper_commit_cleanup_done(struct drm_atomic_state *old_state)
2365 {
2366 	struct drm_crtc *crtc;
2367 	struct drm_crtc_state *old_crtc_state;
2368 	struct drm_crtc_commit *commit;
2369 	int i;
2370 
2371 	for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
2372 		commit = old_crtc_state->commit;
2373 		if (WARN_ON(!commit))
2374 			continue;
2375 
2376 		complete_all(&commit->cleanup_done);
2377 		WARN_ON(!try_wait_for_completion(&commit->hw_done));
2378 
2379 		spin_lock(&crtc->commit_lock);
2380 		list_del(&commit->commit_entry);
2381 		spin_unlock(&crtc->commit_lock);
2382 	}
2383 
2384 	if (old_state->fake_commit) {
2385 		complete_all(&old_state->fake_commit->cleanup_done);
2386 		WARN_ON(!try_wait_for_completion(&old_state->fake_commit->hw_done));
2387 	}
2388 }
2389 EXPORT_SYMBOL(drm_atomic_helper_commit_cleanup_done);
2390 
2391 /**
2392  * drm_atomic_helper_prepare_planes - prepare plane resources before commit
2393  * @dev: DRM device
2394  * @state: atomic state object with new state structures
2395  *
2396  * This function prepares plane state, specifically framebuffers, for the new
2397  * configuration, by calling &drm_plane_helper_funcs.prepare_fb. If any failure
2398  * is encountered this function will call &drm_plane_helper_funcs.cleanup_fb on
2399  * any already successfully prepared framebuffer.
2400  *
2401  * Returns:
2402  * 0 on success, negative error code on failure.
2403  */
2404 int drm_atomic_helper_prepare_planes(struct drm_device *dev,
2405 				     struct drm_atomic_state *state)
2406 {
2407 	struct drm_connector *connector;
2408 	struct drm_connector_state *new_conn_state;
2409 	struct drm_plane *plane;
2410 	struct drm_plane_state *new_plane_state;
2411 	int ret, i, j;
2412 
2413 	for_each_new_connector_in_state(state, connector, new_conn_state, i) {
2414 		if (!new_conn_state->writeback_job)
2415 			continue;
2416 
2417 		ret = drm_writeback_prepare_job(new_conn_state->writeback_job);
2418 		if (ret < 0)
2419 			return ret;
2420 	}
2421 
2422 	for_each_new_plane_in_state(state, plane, new_plane_state, i) {
2423 		const struct drm_plane_helper_funcs *funcs;
2424 
2425 		funcs = plane->helper_private;
2426 
2427 		if (funcs->prepare_fb) {
2428 			ret = funcs->prepare_fb(plane, new_plane_state);
2429 			if (ret)
2430 				goto fail;
2431 		} else {
2432 			WARN_ON_ONCE(funcs->cleanup_fb);
2433 
2434 			if (!drm_core_check_feature(dev, DRIVER_GEM))
2435 				continue;
2436 
2437 			ret = drm_gem_plane_helper_prepare_fb(plane, new_plane_state);
2438 			if (ret)
2439 				goto fail;
2440 		}
2441 	}
2442 
2443 	return 0;
2444 
2445 fail:
2446 	for_each_new_plane_in_state(state, plane, new_plane_state, j) {
2447 		const struct drm_plane_helper_funcs *funcs;
2448 
2449 		if (j >= i)
2450 			continue;
2451 
2452 		funcs = plane->helper_private;
2453 
2454 		if (funcs->cleanup_fb)
2455 			funcs->cleanup_fb(plane, new_plane_state);
2456 	}
2457 
2458 	return ret;
2459 }
2460 EXPORT_SYMBOL(drm_atomic_helper_prepare_planes);
2461 
2462 static bool plane_crtc_active(const struct drm_plane_state *state)
2463 {
2464 	return state->crtc && state->crtc->state->active;
2465 }
2466 
2467 /**
2468  * drm_atomic_helper_commit_planes - commit plane state
2469  * @dev: DRM device
2470  * @old_state: atomic state object with old state structures
2471  * @flags: flags for committing plane state
2472  *
2473  * This function commits the new plane state using the plane and atomic helper
2474  * functions for planes and CRTCs. It assumes that the atomic state has already
2475  * been pushed into the relevant object state pointers, since this step can no
2476  * longer fail.
2477  *
2478  * It still requires the global state object @old_state to know which planes and
2479  * crtcs need to be updated though.
2480  *
2481  * Note that this function does all plane updates across all CRTCs in one step.
2482  * If the hardware can't support this approach look at
2483  * drm_atomic_helper_commit_planes_on_crtc() instead.
2484  *
2485  * Plane parameters can be updated by applications while the associated CRTC is
2486  * disabled. The DRM/KMS core will store the parameters in the plane state,
2487  * which will be available to the driver when the CRTC is turned on. As a result
2488  * most drivers don't need to be immediately notified of plane updates for a
2489  * disabled CRTC.
2490  *
2491  * Unless otherwise needed, drivers are advised to set the ACTIVE_ONLY flag in
2492  * @flags in order not to receive plane update notifications related to a
2493  * disabled CRTC. This avoids the need to manually ignore plane updates in
2494  * driver code when the driver and/or hardware can't or just don't need to deal
2495  * with updates on disabled CRTCs, for example when supporting runtime PM.
2496  *
2497  * Drivers may set the NO_DISABLE_AFTER_MODESET flag in @flags if the relevant
2498  * display controllers require to disable a CRTC's planes when the CRTC is
2499  * disabled. This function would skip the &drm_plane_helper_funcs.atomic_disable
2500  * call for a plane if the CRTC of the old plane state needs a modesetting
2501  * operation. Of course, the drivers need to disable the planes in their CRTC
2502  * disable callbacks since no one else would do that.
2503  *
2504  * The drm_atomic_helper_commit() default implementation doesn't set the
2505  * ACTIVE_ONLY flag to most closely match the behaviour of the legacy helpers.
2506  * This should not be copied blindly by drivers.
2507  */
2508 void drm_atomic_helper_commit_planes(struct drm_device *dev,
2509 				     struct drm_atomic_state *old_state,
2510 				     uint32_t flags)
2511 {
2512 	struct drm_crtc *crtc;
2513 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
2514 	struct drm_plane *plane;
2515 	struct drm_plane_state *old_plane_state, *new_plane_state;
2516 	int i;
2517 	bool active_only = flags & DRM_PLANE_COMMIT_ACTIVE_ONLY;
2518 	bool no_disable = flags & DRM_PLANE_COMMIT_NO_DISABLE_AFTER_MODESET;
2519 
2520 	for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
2521 		const struct drm_crtc_helper_funcs *funcs;
2522 
2523 		funcs = crtc->helper_private;
2524 
2525 		if (!funcs || !funcs->atomic_begin)
2526 			continue;
2527 
2528 		if (active_only && !new_crtc_state->active)
2529 			continue;
2530 
2531 		funcs->atomic_begin(crtc, old_state);
2532 	}
2533 
2534 	for_each_oldnew_plane_in_state(old_state, plane, old_plane_state, new_plane_state, i) {
2535 		const struct drm_plane_helper_funcs *funcs;
2536 		bool disabling;
2537 
2538 		funcs = plane->helper_private;
2539 
2540 		if (!funcs)
2541 			continue;
2542 
2543 		disabling = drm_atomic_plane_disabling(old_plane_state,
2544 						       new_plane_state);
2545 
2546 		if (active_only) {
2547 			/*
2548 			 * Skip planes related to inactive CRTCs. If the plane
2549 			 * is enabled use the state of the current CRTC. If the
2550 			 * plane is being disabled use the state of the old
2551 			 * CRTC to avoid skipping planes being disabled on an
2552 			 * active CRTC.
2553 			 */
2554 			if (!disabling && !plane_crtc_active(new_plane_state))
2555 				continue;
2556 			if (disabling && !plane_crtc_active(old_plane_state))
2557 				continue;
2558 		}
2559 
2560 		/*
2561 		 * Special-case disabling the plane if drivers support it.
2562 		 */
2563 		if (disabling && funcs->atomic_disable) {
2564 			struct drm_crtc_state *crtc_state;
2565 
2566 			crtc_state = old_plane_state->crtc->state;
2567 
2568 			if (drm_atomic_crtc_needs_modeset(crtc_state) &&
2569 			    no_disable)
2570 				continue;
2571 
2572 			funcs->atomic_disable(plane, old_state);
2573 		} else if (new_plane_state->crtc || disabling) {
2574 			funcs->atomic_update(plane, old_state);
2575 		}
2576 	}
2577 
2578 	for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
2579 		const struct drm_crtc_helper_funcs *funcs;
2580 
2581 		funcs = crtc->helper_private;
2582 
2583 		if (!funcs || !funcs->atomic_flush)
2584 			continue;
2585 
2586 		if (active_only && !new_crtc_state->active)
2587 			continue;
2588 
2589 		funcs->atomic_flush(crtc, old_state);
2590 	}
2591 }
2592 EXPORT_SYMBOL(drm_atomic_helper_commit_planes);
2593 
2594 /**
2595  * drm_atomic_helper_commit_planes_on_crtc - commit plane state for a CRTC
2596  * @old_crtc_state: atomic state object with the old CRTC state
2597  *
2598  * This function commits the new plane state using the plane and atomic helper
2599  * functions for planes on the specific CRTC. It assumes that the atomic state
2600  * has already been pushed into the relevant object state pointers, since this
2601  * step can no longer fail.
2602  *
2603  * This function is useful when plane updates should be done CRTC-by-CRTC
2604  * instead of one global step like drm_atomic_helper_commit_planes() does.
2605  *
2606  * This function can only be savely used when planes are not allowed to move
2607  * between different CRTCs because this function doesn't handle inter-CRTC
2608  * dependencies. Callers need to ensure that either no such dependencies exist,
2609  * resolve them through ordering of commit calls or through some other means.
2610  */
2611 void
2612 drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state)
2613 {
2614 	const struct drm_crtc_helper_funcs *crtc_funcs;
2615 	struct drm_crtc *crtc = old_crtc_state->crtc;
2616 	struct drm_atomic_state *old_state = old_crtc_state->state;
2617 	struct drm_crtc_state *new_crtc_state =
2618 		drm_atomic_get_new_crtc_state(old_state, crtc);
2619 	struct drm_plane *plane;
2620 	unsigned int plane_mask;
2621 
2622 	plane_mask = old_crtc_state->plane_mask;
2623 	plane_mask |= new_crtc_state->plane_mask;
2624 
2625 	crtc_funcs = crtc->helper_private;
2626 	if (crtc_funcs && crtc_funcs->atomic_begin)
2627 		crtc_funcs->atomic_begin(crtc, old_state);
2628 
2629 	drm_for_each_plane_mask(plane, crtc->dev, plane_mask) {
2630 		struct drm_plane_state *old_plane_state =
2631 			drm_atomic_get_old_plane_state(old_state, plane);
2632 		struct drm_plane_state *new_plane_state =
2633 			drm_atomic_get_new_plane_state(old_state, plane);
2634 		const struct drm_plane_helper_funcs *plane_funcs;
2635 
2636 		plane_funcs = plane->helper_private;
2637 
2638 		if (!old_plane_state || !plane_funcs)
2639 			continue;
2640 
2641 		WARN_ON(new_plane_state->crtc &&
2642 			new_plane_state->crtc != crtc);
2643 
2644 		if (drm_atomic_plane_disabling(old_plane_state, new_plane_state) &&
2645 		    plane_funcs->atomic_disable)
2646 			plane_funcs->atomic_disable(plane, old_state);
2647 		else if (new_plane_state->crtc ||
2648 			 drm_atomic_plane_disabling(old_plane_state, new_plane_state))
2649 			plane_funcs->atomic_update(plane, old_state);
2650 	}
2651 
2652 	if (crtc_funcs && crtc_funcs->atomic_flush)
2653 		crtc_funcs->atomic_flush(crtc, old_state);
2654 }
2655 EXPORT_SYMBOL(drm_atomic_helper_commit_planes_on_crtc);
2656 
2657 /**
2658  * drm_atomic_helper_disable_planes_on_crtc - helper to disable CRTC's planes
2659  * @old_crtc_state: atomic state object with the old CRTC state
2660  * @atomic: if set, synchronize with CRTC's atomic_begin/flush hooks
2661  *
2662  * Disables all planes associated with the given CRTC. This can be
2663  * used for instance in the CRTC helper atomic_disable callback to disable
2664  * all planes.
2665  *
2666  * If the atomic-parameter is set the function calls the CRTC's
2667  * atomic_begin hook before and atomic_flush hook after disabling the
2668  * planes.
2669  *
2670  * It is a bug to call this function without having implemented the
2671  * &drm_plane_helper_funcs.atomic_disable plane hook.
2672  */
2673 void
2674 drm_atomic_helper_disable_planes_on_crtc(struct drm_crtc_state *old_crtc_state,
2675 					 bool atomic)
2676 {
2677 	struct drm_crtc *crtc = old_crtc_state->crtc;
2678 	const struct drm_crtc_helper_funcs *crtc_funcs =
2679 		crtc->helper_private;
2680 	struct drm_plane *plane;
2681 
2682 	if (atomic && crtc_funcs && crtc_funcs->atomic_begin)
2683 		crtc_funcs->atomic_begin(crtc, NULL);
2684 
2685 	drm_atomic_crtc_state_for_each_plane(plane, old_crtc_state) {
2686 		const struct drm_plane_helper_funcs *plane_funcs =
2687 			plane->helper_private;
2688 
2689 		if (!plane_funcs)
2690 			continue;
2691 
2692 		WARN_ON(!plane_funcs->atomic_disable);
2693 		if (plane_funcs->atomic_disable)
2694 			plane_funcs->atomic_disable(plane, NULL);
2695 	}
2696 
2697 	if (atomic && crtc_funcs && crtc_funcs->atomic_flush)
2698 		crtc_funcs->atomic_flush(crtc, NULL);
2699 }
2700 EXPORT_SYMBOL(drm_atomic_helper_disable_planes_on_crtc);
2701 
2702 /**
2703  * drm_atomic_helper_cleanup_planes - cleanup plane resources after commit
2704  * @dev: DRM device
2705  * @old_state: atomic state object with old state structures
2706  *
2707  * This function cleans up plane state, specifically framebuffers, from the old
2708  * configuration. Hence the old configuration must be perserved in @old_state to
2709  * be able to call this function.
2710  *
2711  * This function must also be called on the new state when the atomic update
2712  * fails at any point after calling drm_atomic_helper_prepare_planes().
2713  */
2714 void drm_atomic_helper_cleanup_planes(struct drm_device *dev,
2715 				      struct drm_atomic_state *old_state)
2716 {
2717 	struct drm_plane *plane;
2718 	struct drm_plane_state *old_plane_state, *new_plane_state;
2719 	int i;
2720 
2721 	for_each_oldnew_plane_in_state(old_state, plane, old_plane_state, new_plane_state, i) {
2722 		const struct drm_plane_helper_funcs *funcs;
2723 		struct drm_plane_state *plane_state;
2724 
2725 		/*
2726 		 * This might be called before swapping when commit is aborted,
2727 		 * in which case we have to cleanup the new state.
2728 		 */
2729 		if (old_plane_state == plane->state)
2730 			plane_state = new_plane_state;
2731 		else
2732 			plane_state = old_plane_state;
2733 
2734 		funcs = plane->helper_private;
2735 
2736 		if (funcs->cleanup_fb)
2737 			funcs->cleanup_fb(plane, plane_state);
2738 	}
2739 }
2740 EXPORT_SYMBOL(drm_atomic_helper_cleanup_planes);
2741 
2742 /**
2743  * drm_atomic_helper_swap_state - store atomic state into current sw state
2744  * @state: atomic state
2745  * @stall: stall for preceding commits
2746  *
2747  * This function stores the atomic state into the current state pointers in all
2748  * driver objects. It should be called after all failing steps have been done
2749  * and succeeded, but before the actual hardware state is committed.
2750  *
2751  * For cleanup and error recovery the current state for all changed objects will
2752  * be swapped into @state.
2753  *
2754  * With that sequence it fits perfectly into the plane prepare/cleanup sequence:
2755  *
2756  * 1. Call drm_atomic_helper_prepare_planes() with the staged atomic state.
2757  *
2758  * 2. Do any other steps that might fail.
2759  *
2760  * 3. Put the staged state into the current state pointers with this function.
2761  *
2762  * 4. Actually commit the hardware state.
2763  *
2764  * 5. Call drm_atomic_helper_cleanup_planes() with @state, which since step 3
2765  * contains the old state. Also do any other cleanup required with that state.
2766  *
2767  * @stall must be set when nonblocking commits for this driver directly access
2768  * the &drm_plane.state, &drm_crtc.state or &drm_connector.state pointer. With
2769  * the current atomic helpers this is almost always the case, since the helpers
2770  * don't pass the right state structures to the callbacks.
2771  *
2772  * Returns:
2773  *
2774  * Returns 0 on success. Can return -ERESTARTSYS when @stall is true and the
2775  * waiting for the previous commits has been interrupted.
2776  */
2777 int drm_atomic_helper_swap_state(struct drm_atomic_state *state,
2778 				  bool stall)
2779 {
2780 	int i, ret;
2781 	struct drm_connector *connector;
2782 	struct drm_connector_state *old_conn_state, *new_conn_state;
2783 	struct drm_crtc *crtc;
2784 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
2785 	struct drm_plane *plane;
2786 	struct drm_plane_state *old_plane_state, *new_plane_state;
2787 	struct drm_crtc_commit *commit;
2788 	struct drm_private_obj *obj;
2789 	struct drm_private_state *old_obj_state, *new_obj_state;
2790 
2791 	if (stall) {
2792 		/*
2793 		 * We have to stall for hw_done here before
2794 		 * drm_atomic_helper_wait_for_dependencies() because flip
2795 		 * depth > 1 is not yet supported by all drivers. As long as
2796 		 * obj->state is directly dereferenced anywhere in the drivers
2797 		 * atomic_commit_tail function, then it's unsafe to swap state
2798 		 * before drm_atomic_helper_commit_hw_done() is called.
2799 		 */
2800 
2801 		for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) {
2802 			commit = old_crtc_state->commit;
2803 
2804 			if (!commit)
2805 				continue;
2806 
2807 			ret = wait_for_completion_interruptible(&commit->hw_done);
2808 			if (ret)
2809 				return ret;
2810 		}
2811 
2812 		for_each_old_connector_in_state(state, connector, old_conn_state, i) {
2813 			commit = old_conn_state->commit;
2814 
2815 			if (!commit)
2816 				continue;
2817 
2818 			ret = wait_for_completion_interruptible(&commit->hw_done);
2819 			if (ret)
2820 				return ret;
2821 		}
2822 
2823 		for_each_old_plane_in_state(state, plane, old_plane_state, i) {
2824 			commit = old_plane_state->commit;
2825 
2826 			if (!commit)
2827 				continue;
2828 
2829 			ret = wait_for_completion_interruptible(&commit->hw_done);
2830 			if (ret)
2831 				return ret;
2832 		}
2833 	}
2834 
2835 	for_each_oldnew_connector_in_state(state, connector, old_conn_state, new_conn_state, i) {
2836 		WARN_ON(connector->state != old_conn_state);
2837 
2838 		old_conn_state->state = state;
2839 		new_conn_state->state = NULL;
2840 
2841 		state->connectors[i].state = old_conn_state;
2842 		connector->state = new_conn_state;
2843 	}
2844 
2845 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
2846 		WARN_ON(crtc->state != old_crtc_state);
2847 
2848 		old_crtc_state->state = state;
2849 		new_crtc_state->state = NULL;
2850 
2851 		state->crtcs[i].state = old_crtc_state;
2852 		crtc->state = new_crtc_state;
2853 
2854 		if (new_crtc_state->commit) {
2855 			spin_lock(&crtc->commit_lock);
2856 			list_add(&new_crtc_state->commit->commit_entry,
2857 				 &crtc->commit_list);
2858 			spin_unlock(&crtc->commit_lock);
2859 
2860 			new_crtc_state->commit->event = NULL;
2861 		}
2862 	}
2863 
2864 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
2865 		WARN_ON(plane->state != old_plane_state);
2866 
2867 		old_plane_state->state = state;
2868 		new_plane_state->state = NULL;
2869 
2870 		state->planes[i].state = old_plane_state;
2871 		plane->state = new_plane_state;
2872 	}
2873 
2874 	for_each_oldnew_private_obj_in_state(state, obj, old_obj_state, new_obj_state, i) {
2875 		WARN_ON(obj->state != old_obj_state);
2876 
2877 		old_obj_state->state = state;
2878 		new_obj_state->state = NULL;
2879 
2880 		state->private_objs[i].state = old_obj_state;
2881 		obj->state = new_obj_state;
2882 	}
2883 
2884 	return 0;
2885 }
2886 EXPORT_SYMBOL(drm_atomic_helper_swap_state);
2887 
2888 /**
2889  * drm_atomic_helper_update_plane - Helper for primary plane update using atomic
2890  * @plane: plane object to update
2891  * @crtc: owning CRTC of owning plane
2892  * @fb: framebuffer to flip onto plane
2893  * @crtc_x: x offset of primary plane on @crtc
2894  * @crtc_y: y offset of primary plane on @crtc
2895  * @crtc_w: width of primary plane rectangle on @crtc
2896  * @crtc_h: height of primary plane rectangle on @crtc
2897  * @src_x: x offset of @fb for panning
2898  * @src_y: y offset of @fb for panning
2899  * @src_w: width of source rectangle in @fb
2900  * @src_h: height of source rectangle in @fb
2901  * @ctx: lock acquire context
2902  *
2903  * Provides a default plane update handler using the atomic driver interface.
2904  *
2905  * RETURNS:
2906  * Zero on success, error code on failure
2907  */
2908 int drm_atomic_helper_update_plane(struct drm_plane *plane,
2909 				   struct drm_crtc *crtc,
2910 				   struct drm_framebuffer *fb,
2911 				   int crtc_x, int crtc_y,
2912 				   unsigned int crtc_w, unsigned int crtc_h,
2913 				   uint32_t src_x, uint32_t src_y,
2914 				   uint32_t src_w, uint32_t src_h,
2915 				   struct drm_modeset_acquire_ctx *ctx)
2916 {
2917 	struct drm_atomic_state *state;
2918 	struct drm_plane_state *plane_state;
2919 	int ret = 0;
2920 
2921 	state = drm_atomic_state_alloc(plane->dev);
2922 	if (!state)
2923 		return -ENOMEM;
2924 
2925 	state->acquire_ctx = ctx;
2926 	plane_state = drm_atomic_get_plane_state(state, plane);
2927 	if (IS_ERR(plane_state)) {
2928 		ret = PTR_ERR(plane_state);
2929 		goto fail;
2930 	}
2931 
2932 	ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
2933 	if (ret != 0)
2934 		goto fail;
2935 	drm_atomic_set_fb_for_plane(plane_state, fb);
2936 	plane_state->crtc_x = crtc_x;
2937 	plane_state->crtc_y = crtc_y;
2938 	plane_state->crtc_w = crtc_w;
2939 	plane_state->crtc_h = crtc_h;
2940 	plane_state->src_x = src_x;
2941 	plane_state->src_y = src_y;
2942 	plane_state->src_w = src_w;
2943 	plane_state->src_h = src_h;
2944 
2945 	if (plane == crtc->cursor)
2946 		state->legacy_cursor_update = true;
2947 
2948 	ret = drm_atomic_commit(state);
2949 fail:
2950 	drm_atomic_state_put(state);
2951 	return ret;
2952 }
2953 EXPORT_SYMBOL(drm_atomic_helper_update_plane);
2954 
2955 /**
2956  * drm_atomic_helper_disable_plane - Helper for primary plane disable using * atomic
2957  * @plane: plane to disable
2958  * @ctx: lock acquire context
2959  *
2960  * Provides a default plane disable handler using the atomic driver interface.
2961  *
2962  * RETURNS:
2963  * Zero on success, error code on failure
2964  */
2965 int drm_atomic_helper_disable_plane(struct drm_plane *plane,
2966 				    struct drm_modeset_acquire_ctx *ctx)
2967 {
2968 	struct drm_atomic_state *state;
2969 	struct drm_plane_state *plane_state;
2970 	int ret = 0;
2971 
2972 	state = drm_atomic_state_alloc(plane->dev);
2973 	if (!state)
2974 		return -ENOMEM;
2975 
2976 	state->acquire_ctx = ctx;
2977 	plane_state = drm_atomic_get_plane_state(state, plane);
2978 	if (IS_ERR(plane_state)) {
2979 		ret = PTR_ERR(plane_state);
2980 		goto fail;
2981 	}
2982 
2983 	if (plane_state->crtc && plane_state->crtc->cursor == plane)
2984 		plane_state->state->legacy_cursor_update = true;
2985 
2986 	ret = __drm_atomic_helper_disable_plane(plane, plane_state);
2987 	if (ret != 0)
2988 		goto fail;
2989 
2990 	ret = drm_atomic_commit(state);
2991 fail:
2992 	drm_atomic_state_put(state);
2993 	return ret;
2994 }
2995 EXPORT_SYMBOL(drm_atomic_helper_disable_plane);
2996 
2997 /**
2998  * drm_atomic_helper_set_config - set a new config from userspace
2999  * @set: mode set configuration
3000  * @ctx: lock acquisition context
3001  *
3002  * Provides a default CRTC set_config handler using the atomic driver interface.
3003  *
3004  * NOTE: For backwards compatibility with old userspace this automatically
3005  * resets the "link-status" property to GOOD, to force any link
3006  * re-training. The SETCRTC ioctl does not define whether an update does
3007  * need a full modeset or just a plane update, hence we're allowed to do
3008  * that. See also drm_connector_set_link_status_property().
3009  *
3010  * Returns:
3011  * Returns 0 on success, negative errno numbers on failure.
3012  */
3013 int drm_atomic_helper_set_config(struct drm_mode_set *set,
3014 				 struct drm_modeset_acquire_ctx *ctx)
3015 {
3016 	struct drm_atomic_state *state;
3017 	struct drm_crtc *crtc = set->crtc;
3018 	int ret = 0;
3019 
3020 	state = drm_atomic_state_alloc(crtc->dev);
3021 	if (!state)
3022 		return -ENOMEM;
3023 
3024 	state->acquire_ctx = ctx;
3025 	ret = __drm_atomic_helper_set_config(set, state);
3026 	if (ret != 0)
3027 		goto fail;
3028 
3029 	ret = handle_conflicting_encoders(state, true);
3030 	if (ret)
3031 		goto fail;
3032 
3033 	ret = drm_atomic_commit(state);
3034 
3035 fail:
3036 	drm_atomic_state_put(state);
3037 	return ret;
3038 }
3039 EXPORT_SYMBOL(drm_atomic_helper_set_config);
3040 
3041 /**
3042  * drm_atomic_helper_disable_all - disable all currently active outputs
3043  * @dev: DRM device
3044  * @ctx: lock acquisition context
3045  *
3046  * Loops through all connectors, finding those that aren't turned off and then
3047  * turns them off by setting their DPMS mode to OFF and deactivating the CRTC
3048  * that they are connected to.
3049  *
3050  * This is used for example in suspend/resume to disable all currently active
3051  * functions when suspending. If you just want to shut down everything at e.g.
3052  * driver unload, look at drm_atomic_helper_shutdown().
3053  *
3054  * Note that if callers haven't already acquired all modeset locks this might
3055  * return -EDEADLK, which must be handled by calling drm_modeset_backoff().
3056  *
3057  * Returns:
3058  * 0 on success or a negative error code on failure.
3059  *
3060  * See also:
3061  * drm_atomic_helper_suspend(), drm_atomic_helper_resume() and
3062  * drm_atomic_helper_shutdown().
3063  */
3064 int drm_atomic_helper_disable_all(struct drm_device *dev,
3065 				  struct drm_modeset_acquire_ctx *ctx)
3066 {
3067 	struct drm_atomic_state *state;
3068 	struct drm_connector_state *conn_state;
3069 	struct drm_connector *conn;
3070 	struct drm_plane_state *plane_state;
3071 	struct drm_plane *plane;
3072 	struct drm_crtc_state *crtc_state;
3073 	struct drm_crtc *crtc;
3074 	int ret, i;
3075 
3076 	state = drm_atomic_state_alloc(dev);
3077 	if (!state)
3078 		return -ENOMEM;
3079 
3080 	state->acquire_ctx = ctx;
3081 
3082 	drm_for_each_crtc(crtc, dev) {
3083 		crtc_state = drm_atomic_get_crtc_state(state, crtc);
3084 		if (IS_ERR(crtc_state)) {
3085 			ret = PTR_ERR(crtc_state);
3086 			goto free;
3087 		}
3088 
3089 		crtc_state->active = false;
3090 
3091 		ret = drm_atomic_set_mode_prop_for_crtc(crtc_state, NULL);
3092 		if (ret < 0)
3093 			goto free;
3094 
3095 		ret = drm_atomic_add_affected_planes(state, crtc);
3096 		if (ret < 0)
3097 			goto free;
3098 
3099 		ret = drm_atomic_add_affected_connectors(state, crtc);
3100 		if (ret < 0)
3101 			goto free;
3102 	}
3103 
3104 	for_each_new_connector_in_state(state, conn, conn_state, i) {
3105 		ret = drm_atomic_set_crtc_for_connector(conn_state, NULL);
3106 		if (ret < 0)
3107 			goto free;
3108 	}
3109 
3110 	for_each_new_plane_in_state(state, plane, plane_state, i) {
3111 		ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
3112 		if (ret < 0)
3113 			goto free;
3114 
3115 		drm_atomic_set_fb_for_plane(plane_state, NULL);
3116 	}
3117 
3118 	ret = drm_atomic_commit(state);
3119 free:
3120 	drm_atomic_state_put(state);
3121 	return ret;
3122 }
3123 EXPORT_SYMBOL(drm_atomic_helper_disable_all);
3124 
3125 /**
3126  * drm_atomic_helper_shutdown - shutdown all CRTC
3127  * @dev: DRM device
3128  *
3129  * This shuts down all CRTC, which is useful for driver unloading. Shutdown on
3130  * suspend should instead be handled with drm_atomic_helper_suspend(), since
3131  * that also takes a snapshot of the modeset state to be restored on resume.
3132  *
3133  * This is just a convenience wrapper around drm_atomic_helper_disable_all(),
3134  * and it is the atomic version of drm_crtc_force_disable_all().
3135  */
3136 void drm_atomic_helper_shutdown(struct drm_device *dev)
3137 {
3138 	struct drm_modeset_acquire_ctx ctx;
3139 	int ret;
3140 
3141 	DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, ret);
3142 
3143 	ret = drm_atomic_helper_disable_all(dev, &ctx);
3144 	if (ret)
3145 		drm_err(dev,
3146 			"Disabling all crtc's during unload failed with %i\n",
3147 			ret);
3148 
3149 	DRM_MODESET_LOCK_ALL_END(dev, ctx, ret);
3150 }
3151 EXPORT_SYMBOL(drm_atomic_helper_shutdown);
3152 
3153 /**
3154  * drm_atomic_helper_duplicate_state - duplicate an atomic state object
3155  * @dev: DRM device
3156  * @ctx: lock acquisition context
3157  *
3158  * Makes a copy of the current atomic state by looping over all objects and
3159  * duplicating their respective states. This is used for example by suspend/
3160  * resume support code to save the state prior to suspend such that it can
3161  * be restored upon resume.
3162  *
3163  * Note that this treats atomic state as persistent between save and restore.
3164  * Drivers must make sure that this is possible and won't result in confusion
3165  * or erroneous behaviour.
3166  *
3167  * Note that if callers haven't already acquired all modeset locks this might
3168  * return -EDEADLK, which must be handled by calling drm_modeset_backoff().
3169  *
3170  * Returns:
3171  * A pointer to the copy of the atomic state object on success or an
3172  * ERR_PTR()-encoded error code on failure.
3173  *
3174  * See also:
3175  * drm_atomic_helper_suspend(), drm_atomic_helper_resume()
3176  */
3177 struct drm_atomic_state *
3178 drm_atomic_helper_duplicate_state(struct drm_device *dev,
3179 				  struct drm_modeset_acquire_ctx *ctx)
3180 {
3181 	struct drm_atomic_state *state;
3182 	struct drm_connector *conn;
3183 	struct drm_connector_list_iter conn_iter;
3184 	struct drm_plane *plane;
3185 	struct drm_crtc *crtc;
3186 	int err = 0;
3187 
3188 	state = drm_atomic_state_alloc(dev);
3189 	if (!state)
3190 		return ERR_PTR(-ENOMEM);
3191 
3192 	state->acquire_ctx = ctx;
3193 	state->duplicated = true;
3194 
3195 	drm_for_each_crtc(crtc, dev) {
3196 		struct drm_crtc_state *crtc_state;
3197 
3198 		crtc_state = drm_atomic_get_crtc_state(state, crtc);
3199 		if (IS_ERR(crtc_state)) {
3200 			err = PTR_ERR(crtc_state);
3201 			goto free;
3202 		}
3203 	}
3204 
3205 	drm_for_each_plane(plane, dev) {
3206 		struct drm_plane_state *plane_state;
3207 
3208 		plane_state = drm_atomic_get_plane_state(state, plane);
3209 		if (IS_ERR(plane_state)) {
3210 			err = PTR_ERR(plane_state);
3211 			goto free;
3212 		}
3213 	}
3214 
3215 	drm_connector_list_iter_begin(dev, &conn_iter);
3216 	drm_for_each_connector_iter(conn, &conn_iter) {
3217 		struct drm_connector_state *conn_state;
3218 
3219 		conn_state = drm_atomic_get_connector_state(state, conn);
3220 		if (IS_ERR(conn_state)) {
3221 			err = PTR_ERR(conn_state);
3222 			drm_connector_list_iter_end(&conn_iter);
3223 			goto free;
3224 		}
3225 	}
3226 	drm_connector_list_iter_end(&conn_iter);
3227 
3228 	/* clear the acquire context so that it isn't accidentally reused */
3229 	state->acquire_ctx = NULL;
3230 
3231 free:
3232 	if (err < 0) {
3233 		drm_atomic_state_put(state);
3234 		state = ERR_PTR(err);
3235 	}
3236 
3237 	return state;
3238 }
3239 EXPORT_SYMBOL(drm_atomic_helper_duplicate_state);
3240 
3241 /**
3242  * drm_atomic_helper_suspend - subsystem-level suspend helper
3243  * @dev: DRM device
3244  *
3245  * Duplicates the current atomic state, disables all active outputs and then
3246  * returns a pointer to the original atomic state to the caller. Drivers can
3247  * pass this pointer to the drm_atomic_helper_resume() helper upon resume to
3248  * restore the output configuration that was active at the time the system
3249  * entered suspend.
3250  *
3251  * Note that it is potentially unsafe to use this. The atomic state object
3252  * returned by this function is assumed to be persistent. Drivers must ensure
3253  * that this holds true. Before calling this function, drivers must make sure
3254  * to suspend fbdev emulation so that nothing can be using the device.
3255  *
3256  * Returns:
3257  * A pointer to a copy of the state before suspend on success or an ERR_PTR()-
3258  * encoded error code on failure. Drivers should store the returned atomic
3259  * state object and pass it to the drm_atomic_helper_resume() helper upon
3260  * resume.
3261  *
3262  * See also:
3263  * drm_atomic_helper_duplicate_state(), drm_atomic_helper_disable_all(),
3264  * drm_atomic_helper_resume(), drm_atomic_helper_commit_duplicated_state()
3265  */
3266 struct drm_atomic_state *drm_atomic_helper_suspend(struct drm_device *dev)
3267 {
3268 	struct drm_modeset_acquire_ctx ctx;
3269 	struct drm_atomic_state *state;
3270 	int err;
3271 
3272 	/* This can never be returned, but it makes the compiler happy */
3273 	state = ERR_PTR(-EINVAL);
3274 
3275 	DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, err);
3276 
3277 	state = drm_atomic_helper_duplicate_state(dev, &ctx);
3278 	if (IS_ERR(state))
3279 		goto unlock;
3280 
3281 	err = drm_atomic_helper_disable_all(dev, &ctx);
3282 	if (err < 0) {
3283 		drm_atomic_state_put(state);
3284 		state = ERR_PTR(err);
3285 		goto unlock;
3286 	}
3287 
3288 unlock:
3289 	DRM_MODESET_LOCK_ALL_END(dev, ctx, err);
3290 	if (err)
3291 		return ERR_PTR(err);
3292 
3293 	return state;
3294 }
3295 EXPORT_SYMBOL(drm_atomic_helper_suspend);
3296 
3297 /**
3298  * drm_atomic_helper_commit_duplicated_state - commit duplicated state
3299  * @state: duplicated atomic state to commit
3300  * @ctx: pointer to acquire_ctx to use for commit.
3301  *
3302  * The state returned by drm_atomic_helper_duplicate_state() and
3303  * drm_atomic_helper_suspend() is partially invalid, and needs to
3304  * be fixed up before commit.
3305  *
3306  * Returns:
3307  * 0 on success or a negative error code on failure.
3308  *
3309  * See also:
3310  * drm_atomic_helper_suspend()
3311  */
3312 int drm_atomic_helper_commit_duplicated_state(struct drm_atomic_state *state,
3313 					      struct drm_modeset_acquire_ctx *ctx)
3314 {
3315 	int i, ret;
3316 	struct drm_plane *plane;
3317 	struct drm_plane_state *new_plane_state;
3318 	struct drm_connector *connector;
3319 	struct drm_connector_state *new_conn_state;
3320 	struct drm_crtc *crtc;
3321 	struct drm_crtc_state *new_crtc_state;
3322 
3323 	state->acquire_ctx = ctx;
3324 
3325 	for_each_new_plane_in_state(state, plane, new_plane_state, i)
3326 		state->planes[i].old_state = plane->state;
3327 
3328 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i)
3329 		state->crtcs[i].old_state = crtc->state;
3330 
3331 	for_each_new_connector_in_state(state, connector, new_conn_state, i)
3332 		state->connectors[i].old_state = connector->state;
3333 
3334 	ret = drm_atomic_commit(state);
3335 
3336 	state->acquire_ctx = NULL;
3337 
3338 	return ret;
3339 }
3340 EXPORT_SYMBOL(drm_atomic_helper_commit_duplicated_state);
3341 
3342 /**
3343  * drm_atomic_helper_resume - subsystem-level resume helper
3344  * @dev: DRM device
3345  * @state: atomic state to resume to
3346  *
3347  * Calls drm_mode_config_reset() to synchronize hardware and software states,
3348  * grabs all modeset locks and commits the atomic state object. This can be
3349  * used in conjunction with the drm_atomic_helper_suspend() helper to
3350  * implement suspend/resume for drivers that support atomic mode-setting.
3351  *
3352  * Returns:
3353  * 0 on success or a negative error code on failure.
3354  *
3355  * See also:
3356  * drm_atomic_helper_suspend()
3357  */
3358 int drm_atomic_helper_resume(struct drm_device *dev,
3359 			     struct drm_atomic_state *state)
3360 {
3361 	struct drm_modeset_acquire_ctx ctx;
3362 	int err;
3363 
3364 	drm_mode_config_reset(dev);
3365 
3366 	DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, err);
3367 
3368 	err = drm_atomic_helper_commit_duplicated_state(state, &ctx);
3369 
3370 	DRM_MODESET_LOCK_ALL_END(dev, ctx, err);
3371 	drm_atomic_state_put(state);
3372 
3373 	return err;
3374 }
3375 EXPORT_SYMBOL(drm_atomic_helper_resume);
3376 
3377 static int page_flip_common(struct drm_atomic_state *state,
3378 			    struct drm_crtc *crtc,
3379 			    struct drm_framebuffer *fb,
3380 			    struct drm_pending_vblank_event *event,
3381 			    uint32_t flags)
3382 {
3383 	struct drm_plane *plane = crtc->primary;
3384 	struct drm_plane_state *plane_state;
3385 	struct drm_crtc_state *crtc_state;
3386 	int ret = 0;
3387 
3388 	crtc_state = drm_atomic_get_crtc_state(state, crtc);
3389 	if (IS_ERR(crtc_state))
3390 		return PTR_ERR(crtc_state);
3391 
3392 	crtc_state->event = event;
3393 	crtc_state->async_flip = flags & DRM_MODE_PAGE_FLIP_ASYNC;
3394 
3395 	plane_state = drm_atomic_get_plane_state(state, plane);
3396 	if (IS_ERR(plane_state))
3397 		return PTR_ERR(plane_state);
3398 
3399 	ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
3400 	if (ret != 0)
3401 		return ret;
3402 	drm_atomic_set_fb_for_plane(plane_state, fb);
3403 
3404 	/* Make sure we don't accidentally do a full modeset. */
3405 	state->allow_modeset = false;
3406 	if (!crtc_state->active) {
3407 		drm_dbg_atomic(crtc->dev,
3408 			       "[CRTC:%d:%s] disabled, rejecting legacy flip\n",
3409 			       crtc->base.id, crtc->name);
3410 		return -EINVAL;
3411 	}
3412 
3413 	return ret;
3414 }
3415 
3416 /**
3417  * drm_atomic_helper_page_flip - execute a legacy page flip
3418  * @crtc: DRM CRTC
3419  * @fb: DRM framebuffer
3420  * @event: optional DRM event to signal upon completion
3421  * @flags: flip flags for non-vblank sync'ed updates
3422  * @ctx: lock acquisition context
3423  *
3424  * Provides a default &drm_crtc_funcs.page_flip implementation
3425  * using the atomic driver interface.
3426  *
3427  * Returns:
3428  * Returns 0 on success, negative errno numbers on failure.
3429  *
3430  * See also:
3431  * drm_atomic_helper_page_flip_target()
3432  */
3433 int drm_atomic_helper_page_flip(struct drm_crtc *crtc,
3434 				struct drm_framebuffer *fb,
3435 				struct drm_pending_vblank_event *event,
3436 				uint32_t flags,
3437 				struct drm_modeset_acquire_ctx *ctx)
3438 {
3439 	struct drm_plane *plane = crtc->primary;
3440 	struct drm_atomic_state *state;
3441 	int ret = 0;
3442 
3443 	state = drm_atomic_state_alloc(plane->dev);
3444 	if (!state)
3445 		return -ENOMEM;
3446 
3447 	state->acquire_ctx = ctx;
3448 
3449 	ret = page_flip_common(state, crtc, fb, event, flags);
3450 	if (ret != 0)
3451 		goto fail;
3452 
3453 	ret = drm_atomic_nonblocking_commit(state);
3454 fail:
3455 	drm_atomic_state_put(state);
3456 	return ret;
3457 }
3458 EXPORT_SYMBOL(drm_atomic_helper_page_flip);
3459 
3460 /**
3461  * drm_atomic_helper_page_flip_target - do page flip on target vblank period.
3462  * @crtc: DRM CRTC
3463  * @fb: DRM framebuffer
3464  * @event: optional DRM event to signal upon completion
3465  * @flags: flip flags for non-vblank sync'ed updates
3466  * @target: specifying the target vblank period when the flip to take effect
3467  * @ctx: lock acquisition context
3468  *
3469  * Provides a default &drm_crtc_funcs.page_flip_target implementation.
3470  * Similar to drm_atomic_helper_page_flip() with extra parameter to specify
3471  * target vblank period to flip.
3472  *
3473  * Returns:
3474  * Returns 0 on success, negative errno numbers on failure.
3475  */
3476 int drm_atomic_helper_page_flip_target(struct drm_crtc *crtc,
3477 				       struct drm_framebuffer *fb,
3478 				       struct drm_pending_vblank_event *event,
3479 				       uint32_t flags,
3480 				       uint32_t target,
3481 				       struct drm_modeset_acquire_ctx *ctx)
3482 {
3483 	struct drm_plane *plane = crtc->primary;
3484 	struct drm_atomic_state *state;
3485 	struct drm_crtc_state *crtc_state;
3486 	int ret = 0;
3487 
3488 	state = drm_atomic_state_alloc(plane->dev);
3489 	if (!state)
3490 		return -ENOMEM;
3491 
3492 	state->acquire_ctx = ctx;
3493 
3494 	ret = page_flip_common(state, crtc, fb, event, flags);
3495 	if (ret != 0)
3496 		goto fail;
3497 
3498 	crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
3499 	if (WARN_ON(!crtc_state)) {
3500 		ret = -EINVAL;
3501 		goto fail;
3502 	}
3503 	crtc_state->target_vblank = target;
3504 
3505 	ret = drm_atomic_nonblocking_commit(state);
3506 fail:
3507 	drm_atomic_state_put(state);
3508 	return ret;
3509 }
3510 EXPORT_SYMBOL(drm_atomic_helper_page_flip_target);
3511 
3512 /**
3513  * drm_atomic_helper_bridge_propagate_bus_fmt() - Propagate output format to
3514  *						  the input end of a bridge
3515  * @bridge: bridge control structure
3516  * @bridge_state: new bridge state
3517  * @crtc_state: new CRTC state
3518  * @conn_state: new connector state
3519  * @output_fmt: tested output bus format
3520  * @num_input_fmts: will contain the size of the returned array
3521  *
3522  * This helper is a pluggable implementation of the
3523  * &drm_bridge_funcs.atomic_get_input_bus_fmts operation for bridges that don't
3524  * modify the bus configuration between their input and their output. It
3525  * returns an array of input formats with a single element set to @output_fmt.
3526  *
3527  * RETURNS:
3528  * a valid format array of size @num_input_fmts, or NULL if the allocation
3529  * failed
3530  */
3531 u32 *
3532 drm_atomic_helper_bridge_propagate_bus_fmt(struct drm_bridge *bridge,
3533 					struct drm_bridge_state *bridge_state,
3534 					struct drm_crtc_state *crtc_state,
3535 					struct drm_connector_state *conn_state,
3536 					u32 output_fmt,
3537 					unsigned int *num_input_fmts)
3538 {
3539 	u32 *input_fmts;
3540 
3541 	input_fmts = kzalloc(sizeof(*input_fmts), GFP_KERNEL);
3542 	if (!input_fmts) {
3543 		*num_input_fmts = 0;
3544 		return NULL;
3545 	}
3546 
3547 	*num_input_fmts = 1;
3548 	input_fmts[0] = output_fmt;
3549 	return input_fmts;
3550 }
3551 EXPORT_SYMBOL(drm_atomic_helper_bridge_propagate_bus_fmt);
3552