xref: /linux/drivers/gpu/drm/drm_atomic_helper.c (revision 22c55fb9eb92395d999b8404d73e58540d11bdd8)
1 /*
2  * Copyright (C) 2014 Red Hat
3  * Copyright (C) 2014 Intel Corp.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  * Rob Clark <robdclark@gmail.com>
25  * Daniel Vetter <daniel.vetter@ffwll.ch>
26  */
27 
28 #include <linux/export.h>
29 #include <linux/dma-fence.h>
30 #include <linux/ktime.h>
31 
32 #include <drm/drm_atomic.h>
33 #include <drm/drm_atomic_helper.h>
34 #include <drm/drm_atomic_uapi.h>
35 #include <drm/drm_blend.h>
36 #include <drm/drm_bridge.h>
37 #include <drm/drm_damage_helper.h>
38 #include <drm/drm_device.h>
39 #include <drm/drm_drv.h>
40 #include <drm/drm_framebuffer.h>
41 #include <drm/drm_gem_atomic_helper.h>
42 #include <drm/drm_panic.h>
43 #include <drm/drm_print.h>
44 #include <drm/drm_self_refresh_helper.h>
45 #include <drm/drm_vblank.h>
46 #include <drm/drm_writeback.h>
47 
48 #include "drm_crtc_helper_internal.h"
49 #include "drm_crtc_internal.h"
50 
51 /**
52  * DOC: overview
53  *
54  * This helper library provides implementations of check and commit functions on
55  * top of the CRTC modeset helper callbacks and the plane helper callbacks. It
56  * also provides convenience implementations for the atomic state handling
57  * callbacks for drivers which don't need to subclass the drm core structures to
58  * add their own additional internal state.
59  *
60  * This library also provides default implementations for the check callback in
61  * drm_atomic_helper_check() and for the commit callback with
62  * drm_atomic_helper_commit(). But the individual stages and callbacks are
63  * exposed to allow drivers to mix and match and e.g. use the plane helpers only
64  * together with a driver private modeset implementation.
65  *
66  * This library also provides implementations for all the legacy driver
67  * interfaces on top of the atomic interface. See drm_atomic_helper_set_config(),
68  * drm_atomic_helper_disable_plane(), and the various functions to implement
69  * set_property callbacks. New drivers must not implement these functions
70  * themselves but must use the provided helpers.
71  *
72  * The atomic helper uses the same function table structures as all other
73  * modesetting helpers. See the documentation for &struct drm_crtc_helper_funcs,
74  * struct &drm_encoder_helper_funcs and &struct drm_connector_helper_funcs. It
75  * also shares the &struct drm_plane_helper_funcs function table with the plane
76  * helpers.
77  */
78 static void
79 drm_atomic_helper_plane_changed(struct drm_atomic_state *state,
80 				struct drm_plane_state *old_plane_state,
81 				struct drm_plane_state *plane_state,
82 				struct drm_plane *plane)
83 {
84 	struct drm_crtc_state *crtc_state;
85 
86 	if (old_plane_state->crtc) {
87 		crtc_state = drm_atomic_get_new_crtc_state(state,
88 							   old_plane_state->crtc);
89 
90 		if (WARN_ON(!crtc_state))
91 			return;
92 
93 		crtc_state->planes_changed = true;
94 	}
95 
96 	if (plane_state->crtc) {
97 		crtc_state = drm_atomic_get_new_crtc_state(state, plane_state->crtc);
98 
99 		if (WARN_ON(!crtc_state))
100 			return;
101 
102 		crtc_state->planes_changed = true;
103 	}
104 }
105 
106 static int handle_conflicting_encoders(struct drm_atomic_state *state,
107 				       bool disable_conflicting_encoders)
108 {
109 	struct drm_connector_state *new_conn_state;
110 	struct drm_connector *connector;
111 	struct drm_connector_list_iter conn_iter;
112 	struct drm_encoder *encoder;
113 	unsigned int encoder_mask = 0;
114 	int i, ret = 0;
115 
116 	/*
117 	 * First loop, find all newly assigned encoders from the connectors
118 	 * part of the state. If the same encoder is assigned to multiple
119 	 * connectors bail out.
120 	 */
121 	for_each_new_connector_in_state(state, connector, new_conn_state, i) {
122 		const struct drm_connector_helper_funcs *funcs = connector->helper_private;
123 		struct drm_encoder *new_encoder;
124 
125 		if (!new_conn_state->crtc)
126 			continue;
127 
128 		if (funcs->atomic_best_encoder)
129 			new_encoder = funcs->atomic_best_encoder(connector,
130 								 state);
131 		else if (funcs->best_encoder)
132 			new_encoder = funcs->best_encoder(connector);
133 		else
134 			new_encoder = drm_connector_get_single_encoder(connector);
135 
136 		if (new_encoder) {
137 			if (encoder_mask & drm_encoder_mask(new_encoder)) {
138 				drm_dbg_atomic(connector->dev,
139 					       "[ENCODER:%d:%s] on [CONNECTOR:%d:%s] already assigned\n",
140 					       new_encoder->base.id, new_encoder->name,
141 					       connector->base.id, connector->name);
142 
143 				return -EINVAL;
144 			}
145 
146 			encoder_mask |= drm_encoder_mask(new_encoder);
147 		}
148 	}
149 
150 	if (!encoder_mask)
151 		return 0;
152 
153 	/*
154 	 * Second loop, iterate over all connectors not part of the state.
155 	 *
156 	 * If a conflicting encoder is found and disable_conflicting_encoders
157 	 * is not set, an error is returned. Userspace can provide a solution
158 	 * through the atomic ioctl.
159 	 *
160 	 * If the flag is set conflicting connectors are removed from the CRTC
161 	 * and the CRTC is disabled if no encoder is left. This preserves
162 	 * compatibility with the legacy set_config behavior.
163 	 */
164 	drm_connector_list_iter_begin(state->dev, &conn_iter);
165 	drm_for_each_connector_iter(connector, &conn_iter) {
166 		struct drm_crtc_state *crtc_state;
167 
168 		if (drm_atomic_get_new_connector_state(state, connector))
169 			continue;
170 
171 		encoder = connector->state->best_encoder;
172 		if (!encoder || !(encoder_mask & drm_encoder_mask(encoder)))
173 			continue;
174 
175 		if (!disable_conflicting_encoders) {
176 			drm_dbg_atomic(connector->dev,
177 				       "[ENCODER:%d:%s] in use on [CRTC:%d:%s] by [CONNECTOR:%d:%s]\n",
178 				       encoder->base.id, encoder->name,
179 				       connector->state->crtc->base.id,
180 				       connector->state->crtc->name,
181 				       connector->base.id, connector->name);
182 			ret = -EINVAL;
183 			goto out;
184 		}
185 
186 		new_conn_state = drm_atomic_get_connector_state(state, connector);
187 		if (IS_ERR(new_conn_state)) {
188 			ret = PTR_ERR(new_conn_state);
189 			goto out;
190 		}
191 
192 		drm_dbg_atomic(connector->dev,
193 			       "[ENCODER:%d:%s] in use on [CRTC:%d:%s], disabling [CONNECTOR:%d:%s]\n",
194 			       encoder->base.id, encoder->name,
195 			       new_conn_state->crtc->base.id, new_conn_state->crtc->name,
196 			       connector->base.id, connector->name);
197 
198 		crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
199 
200 		ret = drm_atomic_set_crtc_for_connector(new_conn_state, NULL);
201 		if (ret)
202 			goto out;
203 
204 		if (!crtc_state->connector_mask) {
205 			ret = drm_atomic_set_mode_prop_for_crtc(crtc_state,
206 								NULL);
207 			if (ret < 0)
208 				goto out;
209 
210 			crtc_state->active = false;
211 		}
212 	}
213 out:
214 	drm_connector_list_iter_end(&conn_iter);
215 
216 	return ret;
217 }
218 
219 static void
220 set_best_encoder(struct drm_atomic_state *state,
221 		 struct drm_connector_state *conn_state,
222 		 struct drm_encoder *encoder)
223 {
224 	struct drm_crtc_state *crtc_state;
225 	struct drm_crtc *crtc;
226 
227 	if (conn_state->best_encoder) {
228 		/* Unset the encoder_mask in the old crtc state. */
229 		crtc = conn_state->connector->state->crtc;
230 
231 		/* A NULL crtc is an error here because we should have
232 		 * duplicated a NULL best_encoder when crtc was NULL.
233 		 * As an exception restoring duplicated atomic state
234 		 * during resume is allowed, so don't warn when
235 		 * best_encoder is equal to encoder we intend to set.
236 		 */
237 		WARN_ON(!crtc && encoder != conn_state->best_encoder);
238 		if (crtc) {
239 			crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
240 
241 			crtc_state->encoder_mask &=
242 				~drm_encoder_mask(conn_state->best_encoder);
243 		}
244 	}
245 
246 	if (encoder) {
247 		crtc = conn_state->crtc;
248 		WARN_ON(!crtc);
249 		if (crtc) {
250 			crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
251 
252 			crtc_state->encoder_mask |=
253 				drm_encoder_mask(encoder);
254 		}
255 	}
256 
257 	conn_state->best_encoder = encoder;
258 }
259 
260 static void
261 steal_encoder(struct drm_atomic_state *state,
262 	      struct drm_encoder *encoder)
263 {
264 	struct drm_crtc_state *crtc_state;
265 	struct drm_connector *connector;
266 	struct drm_connector_state *old_connector_state, *new_connector_state;
267 	int i;
268 
269 	for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) {
270 		struct drm_crtc *encoder_crtc;
271 
272 		if (new_connector_state->best_encoder != encoder)
273 			continue;
274 
275 		encoder_crtc = old_connector_state->crtc;
276 
277 		drm_dbg_atomic(encoder->dev,
278 			       "[ENCODER:%d:%s] in use on [CRTC:%d:%s], stealing it\n",
279 			       encoder->base.id, encoder->name,
280 			       encoder_crtc->base.id, encoder_crtc->name);
281 
282 		set_best_encoder(state, new_connector_state, NULL);
283 
284 		crtc_state = drm_atomic_get_new_crtc_state(state, encoder_crtc);
285 		crtc_state->connectors_changed = true;
286 
287 		return;
288 	}
289 }
290 
291 static int
292 update_connector_routing(struct drm_atomic_state *state,
293 			 struct drm_connector *connector,
294 			 struct drm_connector_state *old_connector_state,
295 			 struct drm_connector_state *new_connector_state,
296 			 bool added_by_user)
297 {
298 	const struct drm_connector_helper_funcs *funcs;
299 	struct drm_encoder *new_encoder;
300 	struct drm_crtc_state *crtc_state;
301 
302 	drm_dbg_atomic(connector->dev, "Updating routing for [CONNECTOR:%d:%s]\n",
303 		       connector->base.id, connector->name);
304 
305 	if (old_connector_state->crtc != new_connector_state->crtc) {
306 		if (old_connector_state->crtc) {
307 			crtc_state = drm_atomic_get_new_crtc_state(state, old_connector_state->crtc);
308 			crtc_state->connectors_changed = true;
309 		}
310 
311 		if (new_connector_state->crtc) {
312 			crtc_state = drm_atomic_get_new_crtc_state(state, new_connector_state->crtc);
313 			crtc_state->connectors_changed = true;
314 		}
315 	}
316 
317 	if (!new_connector_state->crtc) {
318 		drm_dbg_atomic(connector->dev, "Disabling [CONNECTOR:%d:%s]\n",
319 				connector->base.id, connector->name);
320 
321 		set_best_encoder(state, new_connector_state, NULL);
322 
323 		return 0;
324 	}
325 
326 	crtc_state = drm_atomic_get_new_crtc_state(state,
327 						   new_connector_state->crtc);
328 	/*
329 	 * For compatibility with legacy users, we want to make sure that
330 	 * we allow DPMS On->Off modesets on unregistered connectors. Modesets
331 	 * which would result in anything else must be considered invalid, to
332 	 * avoid turning on new displays on dead connectors.
333 	 *
334 	 * Since the connector can be unregistered at any point during an
335 	 * atomic check or commit, this is racy. But that's OK: all we care
336 	 * about is ensuring that userspace can't do anything but shut off the
337 	 * display on a connector that was destroyed after it's been notified,
338 	 * not before.
339 	 *
340 	 * Additionally, we also want to ignore connector registration when
341 	 * we're trying to restore an atomic state during system resume since
342 	 * there's a chance the connector may have been destroyed during the
343 	 * process, but it's better to ignore that then cause
344 	 * drm_atomic_helper_resume() to fail.
345 	 *
346 	 * Last, we want to ignore connector registration when the connector
347 	 * was not pulled in the atomic state by user-space (ie, was pulled
348 	 * in by the driver, e.g. when updating a DP-MST stream).
349 	 */
350 	if (!state->duplicated && drm_connector_is_unregistered(connector) &&
351 	    added_by_user && crtc_state->active) {
352 		drm_dbg_atomic(connector->dev,
353 			       "[CONNECTOR:%d:%s] is not registered\n",
354 			       connector->base.id, connector->name);
355 		return -EINVAL;
356 	}
357 
358 	funcs = connector->helper_private;
359 
360 	if (funcs->atomic_best_encoder)
361 		new_encoder = funcs->atomic_best_encoder(connector, state);
362 	else if (funcs->best_encoder)
363 		new_encoder = funcs->best_encoder(connector);
364 	else
365 		new_encoder = drm_connector_get_single_encoder(connector);
366 
367 	if (!new_encoder) {
368 		drm_dbg_atomic(connector->dev,
369 			       "No suitable encoder found for [CONNECTOR:%d:%s]\n",
370 			       connector->base.id, connector->name);
371 		return -EINVAL;
372 	}
373 
374 	if (!drm_encoder_crtc_ok(new_encoder, new_connector_state->crtc)) {
375 		drm_dbg_atomic(connector->dev,
376 			       "[ENCODER:%d:%s] incompatible with [CRTC:%d:%s]\n",
377 			       new_encoder->base.id,
378 			       new_encoder->name,
379 			       new_connector_state->crtc->base.id,
380 			       new_connector_state->crtc->name);
381 		return -EINVAL;
382 	}
383 
384 	if (new_encoder == new_connector_state->best_encoder) {
385 		set_best_encoder(state, new_connector_state, new_encoder);
386 
387 		drm_dbg_atomic(connector->dev,
388 			       "[CONNECTOR:%d:%s] keeps [ENCODER:%d:%s], now on [CRTC:%d:%s]\n",
389 			       connector->base.id,
390 			       connector->name,
391 			       new_encoder->base.id,
392 			       new_encoder->name,
393 			       new_connector_state->crtc->base.id,
394 			       new_connector_state->crtc->name);
395 
396 		return 0;
397 	}
398 
399 	steal_encoder(state, new_encoder);
400 
401 	set_best_encoder(state, new_connector_state, new_encoder);
402 
403 	crtc_state->connectors_changed = true;
404 
405 	drm_dbg_atomic(connector->dev,
406 		       "[CONNECTOR:%d:%s] using [ENCODER:%d:%s] on [CRTC:%d:%s]\n",
407 		       connector->base.id,
408 		       connector->name,
409 		       new_encoder->base.id,
410 		       new_encoder->name,
411 		       new_connector_state->crtc->base.id,
412 		       new_connector_state->crtc->name);
413 
414 	return 0;
415 }
416 
417 static int
418 mode_fixup(struct drm_atomic_state *state)
419 {
420 	struct drm_crtc *crtc;
421 	struct drm_crtc_state *new_crtc_state;
422 	struct drm_connector *connector;
423 	struct drm_connector_state *new_conn_state;
424 	int i;
425 	int ret;
426 
427 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
428 		if (!new_crtc_state->mode_changed &&
429 		    !new_crtc_state->connectors_changed)
430 			continue;
431 
432 		drm_mode_copy(&new_crtc_state->adjusted_mode, &new_crtc_state->mode);
433 	}
434 
435 	for_each_new_connector_in_state(state, connector, new_conn_state, i) {
436 		const struct drm_encoder_helper_funcs *funcs;
437 		struct drm_encoder *encoder;
438 		struct drm_bridge *bridge;
439 
440 		WARN_ON(!!new_conn_state->best_encoder != !!new_conn_state->crtc);
441 
442 		if (!new_conn_state->crtc || !new_conn_state->best_encoder)
443 			continue;
444 
445 		new_crtc_state =
446 			drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
447 
448 		/*
449 		 * Each encoder has at most one connector (since we always steal
450 		 * it away), so we won't call ->mode_fixup twice.
451 		 */
452 		encoder = new_conn_state->best_encoder;
453 		funcs = encoder->helper_private;
454 
455 		bridge = drm_bridge_chain_get_first_bridge(encoder);
456 		ret = drm_atomic_bridge_chain_check(bridge,
457 						    new_crtc_state,
458 						    new_conn_state);
459 		if (ret) {
460 			drm_dbg_atomic(encoder->dev, "Bridge atomic check failed\n");
461 			return ret;
462 		}
463 
464 		if (funcs && funcs->atomic_check) {
465 			ret = funcs->atomic_check(encoder, new_crtc_state,
466 						  new_conn_state);
467 			if (ret) {
468 				drm_dbg_atomic(encoder->dev,
469 					       "[ENCODER:%d:%s] check failed\n",
470 					       encoder->base.id, encoder->name);
471 				return ret;
472 			}
473 		} else if (funcs && funcs->mode_fixup) {
474 			ret = funcs->mode_fixup(encoder, &new_crtc_state->mode,
475 						&new_crtc_state->adjusted_mode);
476 			if (!ret) {
477 				drm_dbg_atomic(encoder->dev,
478 					       "[ENCODER:%d:%s] fixup failed\n",
479 					       encoder->base.id, encoder->name);
480 				return -EINVAL;
481 			}
482 		}
483 	}
484 
485 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
486 		const struct drm_crtc_helper_funcs *funcs;
487 
488 		if (!new_crtc_state->enable)
489 			continue;
490 
491 		if (!new_crtc_state->mode_changed &&
492 		    !new_crtc_state->connectors_changed)
493 			continue;
494 
495 		funcs = crtc->helper_private;
496 		if (!funcs || !funcs->mode_fixup)
497 			continue;
498 
499 		ret = funcs->mode_fixup(crtc, &new_crtc_state->mode,
500 					&new_crtc_state->adjusted_mode);
501 		if (!ret) {
502 			drm_dbg_atomic(crtc->dev, "[CRTC:%d:%s] fixup failed\n",
503 				       crtc->base.id, crtc->name);
504 			return -EINVAL;
505 		}
506 	}
507 
508 	return 0;
509 }
510 
511 static enum drm_mode_status mode_valid_path(struct drm_connector *connector,
512 					    struct drm_encoder *encoder,
513 					    struct drm_crtc *crtc,
514 					    const struct drm_display_mode *mode)
515 {
516 	struct drm_bridge *bridge;
517 	enum drm_mode_status ret;
518 
519 	ret = drm_encoder_mode_valid(encoder, mode);
520 	if (ret != MODE_OK) {
521 		drm_dbg_atomic(encoder->dev,
522 			       "[ENCODER:%d:%s] mode_valid() failed\n",
523 			       encoder->base.id, encoder->name);
524 		return ret;
525 	}
526 
527 	bridge = drm_bridge_chain_get_first_bridge(encoder);
528 	ret = drm_bridge_chain_mode_valid(bridge, &connector->display_info,
529 					  mode);
530 	if (ret != MODE_OK) {
531 		drm_dbg_atomic(encoder->dev, "[BRIDGE] mode_valid() failed\n");
532 		return ret;
533 	}
534 
535 	ret = drm_crtc_mode_valid(crtc, mode);
536 	if (ret != MODE_OK) {
537 		drm_dbg_atomic(encoder->dev, "[CRTC:%d:%s] mode_valid() failed\n",
538 			       crtc->base.id, crtc->name);
539 		return ret;
540 	}
541 
542 	return ret;
543 }
544 
545 static int
546 mode_valid(struct drm_atomic_state *state)
547 {
548 	struct drm_connector_state *conn_state;
549 	struct drm_connector *connector;
550 	int i;
551 
552 	for_each_new_connector_in_state(state, connector, conn_state, i) {
553 		struct drm_encoder *encoder = conn_state->best_encoder;
554 		struct drm_crtc *crtc = conn_state->crtc;
555 		struct drm_crtc_state *crtc_state;
556 		enum drm_mode_status mode_status;
557 		const struct drm_display_mode *mode;
558 
559 		if (!crtc || !encoder)
560 			continue;
561 
562 		crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
563 		if (!crtc_state)
564 			continue;
565 		if (!crtc_state->mode_changed && !crtc_state->connectors_changed)
566 			continue;
567 
568 		mode = &crtc_state->mode;
569 
570 		mode_status = mode_valid_path(connector, encoder, crtc, mode);
571 		if (mode_status != MODE_OK)
572 			return -EINVAL;
573 	}
574 
575 	return 0;
576 }
577 
578 static int drm_atomic_check_valid_clones(struct drm_atomic_state *state,
579 					 struct drm_crtc *crtc)
580 {
581 	struct drm_encoder *drm_enc;
582 	struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
583 									  crtc);
584 
585 	drm_for_each_encoder_mask(drm_enc, crtc->dev, crtc_state->encoder_mask) {
586 		if (!drm_enc->possible_clones) {
587 			DRM_DEBUG("enc%d possible_clones is 0\n", drm_enc->base.id);
588 			continue;
589 		}
590 
591 		if ((crtc_state->encoder_mask & drm_enc->possible_clones) !=
592 		    crtc_state->encoder_mask) {
593 			DRM_DEBUG("crtc%d failed valid clone check for mask 0x%x\n",
594 				  crtc->base.id, crtc_state->encoder_mask);
595 			return -EINVAL;
596 		}
597 	}
598 
599 	return 0;
600 }
601 
602 /**
603  * drm_atomic_helper_check_modeset - validate state object for modeset changes
604  * @dev: DRM device
605  * @state: the driver state object
606  *
607  * Check the state object to see if the requested state is physically possible.
608  * This does all the CRTC and connector related computations for an atomic
609  * update and adds any additional connectors needed for full modesets. It calls
610  * the various per-object callbacks in the follow order:
611  *
612  * 1. &drm_connector_helper_funcs.atomic_best_encoder for determining the new encoder.
613  * 2. &drm_connector_helper_funcs.atomic_check to validate the connector state.
614  * 3. If it's determined a modeset is needed then all connectors on the affected
615  *    CRTC are added and &drm_connector_helper_funcs.atomic_check is run on them.
616  * 4. &drm_encoder_helper_funcs.mode_valid, &drm_bridge_funcs.mode_valid and
617  *    &drm_crtc_helper_funcs.mode_valid are called on the affected components.
618  * 5. &drm_bridge_funcs.mode_fixup is called on all encoder bridges.
619  * 6. &drm_encoder_helper_funcs.atomic_check is called to validate any encoder state.
620  *    This function is only called when the encoder will be part of a configured CRTC,
621  *    it must not be used for implementing connector property validation.
622  *    If this function is NULL, &drm_atomic_encoder_helper_funcs.mode_fixup is called
623  *    instead.
624  * 7. &drm_crtc_helper_funcs.mode_fixup is called last, to fix up the mode with CRTC constraints.
625  *
626  * &drm_crtc_state.mode_changed is set when the input mode is changed.
627  * &drm_crtc_state.connectors_changed is set when a connector is added or
628  * removed from the CRTC.  &drm_crtc_state.active_changed is set when
629  * &drm_crtc_state.active changes, which is used for DPMS.
630  * &drm_crtc_state.no_vblank is set from the result of drm_dev_has_vblank().
631  * See also: drm_atomic_crtc_needs_modeset()
632  *
633  * IMPORTANT:
634  *
635  * Drivers which set &drm_crtc_state.mode_changed (e.g. in their
636  * &drm_plane_helper_funcs.atomic_check hooks if a plane update can't be done
637  * without a full modeset) _must_ call this function after that change. It is
638  * permitted to call this function multiple times for the same update, e.g.
639  * when the &drm_crtc_helper_funcs.atomic_check functions depend upon the
640  * adjusted dotclock for fifo space allocation and watermark computation.
641  *
642  * RETURNS:
643  * Zero for success or -errno
644  */
645 int
646 drm_atomic_helper_check_modeset(struct drm_device *dev,
647 				struct drm_atomic_state *state)
648 {
649 	struct drm_crtc *crtc;
650 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
651 	struct drm_connector *connector;
652 	struct drm_connector_state *old_connector_state, *new_connector_state;
653 	int i, ret;
654 	unsigned int connectors_mask = 0, user_connectors_mask = 0;
655 
656 	for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i)
657 		user_connectors_mask |= BIT(i);
658 
659 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
660 		bool has_connectors =
661 			!!new_crtc_state->connector_mask;
662 
663 		WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
664 
665 		if (!drm_mode_equal(&old_crtc_state->mode, &new_crtc_state->mode)) {
666 			drm_dbg_atomic(dev, "[CRTC:%d:%s] mode changed\n",
667 				       crtc->base.id, crtc->name);
668 			new_crtc_state->mode_changed = true;
669 		}
670 
671 		if (old_crtc_state->enable != new_crtc_state->enable) {
672 			drm_dbg_atomic(dev, "[CRTC:%d:%s] enable changed\n",
673 				       crtc->base.id, crtc->name);
674 
675 			/*
676 			 * For clarity this assignment is done here, but
677 			 * enable == 0 is only true when there are no
678 			 * connectors and a NULL mode.
679 			 *
680 			 * The other way around is true as well. enable != 0
681 			 * implies that connectors are attached and a mode is set.
682 			 */
683 			new_crtc_state->mode_changed = true;
684 			new_crtc_state->connectors_changed = true;
685 		}
686 
687 		if (old_crtc_state->active != new_crtc_state->active) {
688 			drm_dbg_atomic(dev, "[CRTC:%d:%s] active changed\n",
689 				       crtc->base.id, crtc->name);
690 			new_crtc_state->active_changed = true;
691 		}
692 
693 		if (new_crtc_state->enable != has_connectors) {
694 			drm_dbg_atomic(dev, "[CRTC:%d:%s] enabled/connectors mismatch (%d/%d)\n",
695 				       crtc->base.id, crtc->name,
696 				       new_crtc_state->enable, has_connectors);
697 
698 			return -EINVAL;
699 		}
700 
701 		if (drm_dev_has_vblank(dev))
702 			new_crtc_state->no_vblank = false;
703 		else
704 			new_crtc_state->no_vblank = true;
705 	}
706 
707 	ret = handle_conflicting_encoders(state, false);
708 	if (ret)
709 		return ret;
710 
711 	for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) {
712 		const struct drm_connector_helper_funcs *funcs = connector->helper_private;
713 
714 		WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
715 
716 		/*
717 		 * This only sets crtc->connectors_changed for routing changes,
718 		 * drivers must set crtc->connectors_changed themselves when
719 		 * connector properties need to be updated.
720 		 */
721 		ret = update_connector_routing(state, connector,
722 					       old_connector_state,
723 					       new_connector_state,
724 					       BIT(i) & user_connectors_mask);
725 		if (ret)
726 			return ret;
727 		if (old_connector_state->crtc) {
728 			new_crtc_state = drm_atomic_get_new_crtc_state(state,
729 								       old_connector_state->crtc);
730 			if (old_connector_state->link_status !=
731 			    new_connector_state->link_status)
732 				new_crtc_state->connectors_changed = true;
733 
734 			if (old_connector_state->max_requested_bpc !=
735 			    new_connector_state->max_requested_bpc)
736 				new_crtc_state->connectors_changed = true;
737 		}
738 
739 		if (funcs->atomic_check)
740 			ret = funcs->atomic_check(connector, state);
741 		if (ret) {
742 			drm_dbg_atomic(dev,
743 				       "[CONNECTOR:%d:%s] driver check failed\n",
744 				       connector->base.id, connector->name);
745 			return ret;
746 		}
747 
748 		connectors_mask |= BIT(i);
749 	}
750 
751 	/*
752 	 * After all the routing has been prepared we need to add in any
753 	 * connector which is itself unchanged, but whose CRTC changes its
754 	 * configuration. This must be done before calling mode_fixup in case a
755 	 * crtc only changed its mode but has the same set of connectors.
756 	 */
757 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
758 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
759 			continue;
760 
761 		drm_dbg_atomic(dev,
762 			       "[CRTC:%d:%s] needs all connectors, enable: %c, active: %c\n",
763 			       crtc->base.id, crtc->name,
764 			       new_crtc_state->enable ? 'y' : 'n',
765 			       new_crtc_state->active ? 'y' : 'n');
766 
767 		ret = drm_atomic_add_affected_connectors(state, crtc);
768 		if (ret != 0)
769 			return ret;
770 
771 		ret = drm_atomic_add_affected_planes(state, crtc);
772 		if (ret != 0)
773 			return ret;
774 
775 		ret = drm_atomic_check_valid_clones(state, crtc);
776 		if (ret != 0)
777 			return ret;
778 	}
779 
780 	/*
781 	 * Iterate over all connectors again, to make sure atomic_check()
782 	 * has been called on them when a modeset is forced.
783 	 */
784 	for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) {
785 		const struct drm_connector_helper_funcs *funcs = connector->helper_private;
786 
787 		if (connectors_mask & BIT(i))
788 			continue;
789 
790 		if (funcs->atomic_check)
791 			ret = funcs->atomic_check(connector, state);
792 		if (ret) {
793 			drm_dbg_atomic(dev,
794 				       "[CONNECTOR:%d:%s] driver check failed\n",
795 				       connector->base.id, connector->name);
796 			return ret;
797 		}
798 	}
799 
800 	/*
801 	 * Iterate over all connectors again, and add all affected bridges to
802 	 * the state.
803 	 */
804 	for_each_oldnew_connector_in_state(state, connector,
805 					   old_connector_state,
806 					   new_connector_state, i) {
807 		struct drm_encoder *encoder;
808 
809 		encoder = old_connector_state->best_encoder;
810 		ret = drm_atomic_add_encoder_bridges(state, encoder);
811 		if (ret)
812 			return ret;
813 
814 		encoder = new_connector_state->best_encoder;
815 		ret = drm_atomic_add_encoder_bridges(state, encoder);
816 		if (ret)
817 			return ret;
818 	}
819 
820 	ret = mode_valid(state);
821 	if (ret)
822 		return ret;
823 
824 	return mode_fixup(state);
825 }
826 EXPORT_SYMBOL(drm_atomic_helper_check_modeset);
827 
828 /**
829  * drm_atomic_helper_check_wb_connector_state() - Check writeback connector state
830  * @connector: corresponding connector
831  * @state: the driver state object
832  *
833  * Checks if the writeback connector state is valid, and returns an error if it
834  * isn't.
835  *
836  * RETURNS:
837  * Zero for success or -errno
838  */
839 int
840 drm_atomic_helper_check_wb_connector_state(struct drm_connector *connector,
841 					   struct drm_atomic_state *state)
842 {
843 	struct drm_connector_state *conn_state =
844 		drm_atomic_get_new_connector_state(state, connector);
845 	struct drm_writeback_job *wb_job = conn_state->writeback_job;
846 	struct drm_property_blob *pixel_format_blob;
847 	struct drm_framebuffer *fb;
848 	size_t i, nformats;
849 	u32 *formats;
850 
851 	if (!wb_job || !wb_job->fb)
852 		return 0;
853 
854 	pixel_format_blob = wb_job->connector->pixel_formats_blob_ptr;
855 	nformats = pixel_format_blob->length / sizeof(u32);
856 	formats = pixel_format_blob->data;
857 	fb = wb_job->fb;
858 
859 	for (i = 0; i < nformats; i++)
860 		if (fb->format->format == formats[i])
861 			return 0;
862 
863 	drm_dbg_kms(connector->dev, "Invalid pixel format %p4cc\n", &fb->format->format);
864 
865 	return -EINVAL;
866 }
867 EXPORT_SYMBOL(drm_atomic_helper_check_wb_connector_state);
868 
869 /**
870  * drm_atomic_helper_check_plane_state() - Check plane state for validity
871  * @plane_state: plane state to check
872  * @crtc_state: CRTC state to check
873  * @min_scale: minimum @src:@dest scaling factor in 16.16 fixed point
874  * @max_scale: maximum @src:@dest scaling factor in 16.16 fixed point
875  * @can_position: is it legal to position the plane such that it
876  *                doesn't cover the entire CRTC?  This will generally
877  *                only be false for primary planes.
878  * @can_update_disabled: can the plane be updated while the CRTC
879  *                       is disabled?
880  *
881  * Checks that a desired plane update is valid, and updates various
882  * bits of derived state (clipped coordinates etc.). Drivers that provide
883  * their own plane handling rather than helper-provided implementations may
884  * still wish to call this function to avoid duplication of error checking
885  * code.
886  *
887  * RETURNS:
888  * Zero if update appears valid, error code on failure
889  */
890 int drm_atomic_helper_check_plane_state(struct drm_plane_state *plane_state,
891 					const struct drm_crtc_state *crtc_state,
892 					int min_scale,
893 					int max_scale,
894 					bool can_position,
895 					bool can_update_disabled)
896 {
897 	struct drm_framebuffer *fb = plane_state->fb;
898 	struct drm_rect *src = &plane_state->src;
899 	struct drm_rect *dst = &plane_state->dst;
900 	unsigned int rotation = plane_state->rotation;
901 	struct drm_rect clip = {};
902 	int hscale, vscale;
903 
904 	WARN_ON(plane_state->crtc && plane_state->crtc != crtc_state->crtc);
905 
906 	*src = drm_plane_state_src(plane_state);
907 	*dst = drm_plane_state_dest(plane_state);
908 
909 	if (!fb) {
910 		plane_state->visible = false;
911 		return 0;
912 	}
913 
914 	/* crtc should only be NULL when disabling (i.e., !fb) */
915 	if (WARN_ON(!plane_state->crtc)) {
916 		plane_state->visible = false;
917 		return 0;
918 	}
919 
920 	if (!crtc_state->enable && !can_update_disabled) {
921 		drm_dbg_kms(plane_state->plane->dev,
922 			    "Cannot update plane of a disabled CRTC.\n");
923 		return -EINVAL;
924 	}
925 
926 	drm_rect_rotate(src, fb->width << 16, fb->height << 16, rotation);
927 
928 	/* Check scaling */
929 	hscale = drm_rect_calc_hscale(src, dst, min_scale, max_scale);
930 	vscale = drm_rect_calc_vscale(src, dst, min_scale, max_scale);
931 	if (hscale < 0 || vscale < 0) {
932 		drm_dbg_kms(plane_state->plane->dev,
933 			    "Invalid scaling of plane\n");
934 		drm_rect_debug_print("src: ", &plane_state->src, true);
935 		drm_rect_debug_print("dst: ", &plane_state->dst, false);
936 		return -ERANGE;
937 	}
938 
939 	if (crtc_state->enable)
940 		drm_mode_get_hv_timing(&crtc_state->mode, &clip.x2, &clip.y2);
941 
942 	plane_state->visible = drm_rect_clip_scaled(src, dst, &clip);
943 
944 	drm_rect_rotate_inv(src, fb->width << 16, fb->height << 16, rotation);
945 
946 	if (!plane_state->visible)
947 		/*
948 		 * Plane isn't visible; some drivers can handle this
949 		 * so we just return success here.  Drivers that can't
950 		 * (including those that use the primary plane helper's
951 		 * update function) will return an error from their
952 		 * update_plane handler.
953 		 */
954 		return 0;
955 
956 	if (!can_position && !drm_rect_equals(dst, &clip)) {
957 		drm_dbg_kms(plane_state->plane->dev,
958 			    "Plane must cover entire CRTC\n");
959 		drm_rect_debug_print("dst: ", dst, false);
960 		drm_rect_debug_print("clip: ", &clip, false);
961 		return -EINVAL;
962 	}
963 
964 	return 0;
965 }
966 EXPORT_SYMBOL(drm_atomic_helper_check_plane_state);
967 
968 /**
969  * drm_atomic_helper_check_crtc_primary_plane() - Check CRTC state for primary plane
970  * @crtc_state: CRTC state to check
971  *
972  * Checks that a CRTC has at least one primary plane attached to it, which is
973  * a requirement on some hardware. Note that this only involves the CRTC side
974  * of the test. To test if the primary plane is visible or if it can be updated
975  * without the CRTC being enabled, use drm_atomic_helper_check_plane_state() in
976  * the plane's atomic check.
977  *
978  * RETURNS:
979  * 0 if a primary plane is attached to the CRTC, or an error code otherwise
980  */
981 int drm_atomic_helper_check_crtc_primary_plane(struct drm_crtc_state *crtc_state)
982 {
983 	struct drm_crtc *crtc = crtc_state->crtc;
984 	struct drm_device *dev = crtc->dev;
985 	struct drm_plane *plane;
986 
987 	/* needs at least one primary plane to be enabled */
988 	drm_for_each_plane_mask(plane, dev, crtc_state->plane_mask) {
989 		if (plane->type == DRM_PLANE_TYPE_PRIMARY)
990 			return 0;
991 	}
992 
993 	drm_dbg_atomic(dev, "[CRTC:%d:%s] primary plane missing\n", crtc->base.id, crtc->name);
994 
995 	return -EINVAL;
996 }
997 EXPORT_SYMBOL(drm_atomic_helper_check_crtc_primary_plane);
998 
999 /**
1000  * drm_atomic_helper_check_planes - validate state object for planes changes
1001  * @dev: DRM device
1002  * @state: the driver state object
1003  *
1004  * Check the state object to see if the requested state is physically possible.
1005  * This does all the plane update related checks using by calling into the
1006  * &drm_crtc_helper_funcs.atomic_check and &drm_plane_helper_funcs.atomic_check
1007  * hooks provided by the driver.
1008  *
1009  * It also sets &drm_crtc_state.planes_changed to indicate that a CRTC has
1010  * updated planes.
1011  *
1012  * RETURNS:
1013  * Zero for success or -errno
1014  */
1015 int
1016 drm_atomic_helper_check_planes(struct drm_device *dev,
1017 			       struct drm_atomic_state *state)
1018 {
1019 	struct drm_crtc *crtc;
1020 	struct drm_crtc_state *new_crtc_state;
1021 	struct drm_plane *plane;
1022 	struct drm_plane_state *new_plane_state, *old_plane_state;
1023 	int i, ret = 0;
1024 
1025 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
1026 		const struct drm_plane_helper_funcs *funcs;
1027 
1028 		WARN_ON(!drm_modeset_is_locked(&plane->mutex));
1029 
1030 		funcs = plane->helper_private;
1031 
1032 		drm_atomic_helper_plane_changed(state, old_plane_state, new_plane_state, plane);
1033 
1034 		drm_atomic_helper_check_plane_damage(state, new_plane_state);
1035 
1036 		if (!funcs || !funcs->atomic_check)
1037 			continue;
1038 
1039 		ret = funcs->atomic_check(plane, state);
1040 		if (ret) {
1041 			drm_dbg_atomic(plane->dev,
1042 				       "[PLANE:%d:%s] atomic driver check failed\n",
1043 				       plane->base.id, plane->name);
1044 			return ret;
1045 		}
1046 	}
1047 
1048 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
1049 		const struct drm_crtc_helper_funcs *funcs;
1050 
1051 		funcs = crtc->helper_private;
1052 
1053 		if (!funcs || !funcs->atomic_check)
1054 			continue;
1055 
1056 		ret = funcs->atomic_check(crtc, state);
1057 		if (ret) {
1058 			drm_dbg_atomic(crtc->dev,
1059 				       "[CRTC:%d:%s] atomic driver check failed\n",
1060 				       crtc->base.id, crtc->name);
1061 			return ret;
1062 		}
1063 	}
1064 
1065 	return ret;
1066 }
1067 EXPORT_SYMBOL(drm_atomic_helper_check_planes);
1068 
1069 /**
1070  * drm_atomic_helper_check - validate state object
1071  * @dev: DRM device
1072  * @state: the driver state object
1073  *
1074  * Check the state object to see if the requested state is physically possible.
1075  * Only CRTCs and planes have check callbacks, so for any additional (global)
1076  * checking that a driver needs it can simply wrap that around this function.
1077  * Drivers without such needs can directly use this as their
1078  * &drm_mode_config_funcs.atomic_check callback.
1079  *
1080  * This just wraps the two parts of the state checking for planes and modeset
1081  * state in the default order: First it calls drm_atomic_helper_check_modeset()
1082  * and then drm_atomic_helper_check_planes(). The assumption is that the
1083  * @drm_plane_helper_funcs.atomic_check and @drm_crtc_helper_funcs.atomic_check
1084  * functions depend upon an updated adjusted_mode.clock to e.g. properly compute
1085  * watermarks.
1086  *
1087  * Note that zpos normalization will add all enable planes to the state which
1088  * might not desired for some drivers.
1089  * For example enable/disable of a cursor plane which have fixed zpos value
1090  * would trigger all other enabled planes to be forced to the state change.
1091  *
1092  * IMPORTANT:
1093  *
1094  * As this function calls drm_atomic_helper_check_modeset() internally, its
1095  * restrictions also apply:
1096  * Drivers which set &drm_crtc_state.mode_changed (e.g. in their
1097  * &drm_plane_helper_funcs.atomic_check hooks if a plane update can't be done
1098  * without a full modeset) _must_ call drm_atomic_helper_check_modeset()
1099  * function again after that change.
1100  *
1101  * RETURNS:
1102  * Zero for success or -errno
1103  */
1104 int drm_atomic_helper_check(struct drm_device *dev,
1105 			    struct drm_atomic_state *state)
1106 {
1107 	int ret;
1108 
1109 	ret = drm_atomic_helper_check_modeset(dev, state);
1110 	if (ret)
1111 		return ret;
1112 
1113 	if (dev->mode_config.normalize_zpos) {
1114 		ret = drm_atomic_normalize_zpos(dev, state);
1115 		if (ret)
1116 			return ret;
1117 	}
1118 
1119 	ret = drm_atomic_helper_check_planes(dev, state);
1120 	if (ret)
1121 		return ret;
1122 
1123 	if (state->legacy_cursor_update)
1124 		state->async_update = !drm_atomic_helper_async_check(dev, state);
1125 
1126 	drm_self_refresh_helper_alter_state(state);
1127 
1128 	return ret;
1129 }
1130 EXPORT_SYMBOL(drm_atomic_helper_check);
1131 
1132 static bool
1133 crtc_needs_disable(struct drm_crtc_state *old_state,
1134 		   struct drm_crtc_state *new_state)
1135 {
1136 	/*
1137 	 * No new_state means the CRTC is off, so the only criteria is whether
1138 	 * it's currently active or in self refresh mode.
1139 	 */
1140 	if (!new_state)
1141 		return drm_atomic_crtc_effectively_active(old_state);
1142 
1143 	/*
1144 	 * We need to disable bridge(s) and CRTC if we're transitioning out of
1145 	 * self-refresh and changing CRTCs at the same time, because the
1146 	 * bridge tracks self-refresh status via CRTC state.
1147 	 */
1148 	if (old_state->self_refresh_active &&
1149 	    old_state->crtc != new_state->crtc)
1150 		return true;
1151 
1152 	/*
1153 	 * We also need to run through the crtc_funcs->disable() function if
1154 	 * the CRTC is currently on, if it's transitioning to self refresh
1155 	 * mode, or if it's in self refresh mode and needs to be fully
1156 	 * disabled.
1157 	 */
1158 	return old_state->active ||
1159 	       (old_state->self_refresh_active && !new_state->active) ||
1160 	       new_state->self_refresh_active;
1161 }
1162 
1163 static void
1164 encoder_bridge_disable(struct drm_device *dev, struct drm_atomic_state *state)
1165 {
1166 	struct drm_connector *connector;
1167 	struct drm_connector_state *old_conn_state, *new_conn_state;
1168 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
1169 	int i;
1170 
1171 	for_each_oldnew_connector_in_state(state, connector, old_conn_state, new_conn_state, i) {
1172 		const struct drm_encoder_helper_funcs *funcs;
1173 		struct drm_encoder *encoder;
1174 		struct drm_bridge *bridge;
1175 
1176 		/*
1177 		 * Shut down everything that's in the changeset and currently
1178 		 * still on. So need to check the old, saved state.
1179 		 */
1180 		if (!old_conn_state->crtc)
1181 			continue;
1182 
1183 		old_crtc_state = drm_atomic_get_old_crtc_state(state, old_conn_state->crtc);
1184 
1185 		if (new_conn_state->crtc)
1186 			new_crtc_state = drm_atomic_get_new_crtc_state(
1187 						state,
1188 						new_conn_state->crtc);
1189 		else
1190 			new_crtc_state = NULL;
1191 
1192 		if (!crtc_needs_disable(old_crtc_state, new_crtc_state) ||
1193 		    !drm_atomic_crtc_needs_modeset(old_conn_state->crtc->state))
1194 			continue;
1195 
1196 		encoder = old_conn_state->best_encoder;
1197 
1198 		/* We shouldn't get this far if we didn't previously have
1199 		 * an encoder.. but WARN_ON() rather than explode.
1200 		 */
1201 		if (WARN_ON(!encoder))
1202 			continue;
1203 
1204 		funcs = encoder->helper_private;
1205 
1206 		drm_dbg_atomic(dev, "disabling [ENCODER:%d:%s]\n",
1207 			       encoder->base.id, encoder->name);
1208 
1209 		/*
1210 		 * Each encoder has at most one connector (since we always steal
1211 		 * it away), so we won't call disable hooks twice.
1212 		 */
1213 		bridge = drm_bridge_chain_get_first_bridge(encoder);
1214 		drm_atomic_bridge_chain_disable(bridge, state);
1215 
1216 		/* Right function depends upon target state. */
1217 		if (funcs) {
1218 			if (funcs->atomic_disable)
1219 				funcs->atomic_disable(encoder, state);
1220 			else if (new_conn_state->crtc && funcs->prepare)
1221 				funcs->prepare(encoder);
1222 			else if (funcs->disable)
1223 				funcs->disable(encoder);
1224 			else if (funcs->dpms)
1225 				funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
1226 		}
1227 	}
1228 }
1229 
1230 static void
1231 crtc_disable(struct drm_device *dev, struct drm_atomic_state *state)
1232 {
1233 	struct drm_crtc *crtc;
1234 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
1235 	int i;
1236 
1237 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
1238 		const struct drm_crtc_helper_funcs *funcs;
1239 		int ret;
1240 
1241 		/* Shut down everything that needs a full modeset. */
1242 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
1243 			continue;
1244 
1245 		if (!crtc_needs_disable(old_crtc_state, new_crtc_state))
1246 			continue;
1247 
1248 		funcs = crtc->helper_private;
1249 
1250 		drm_dbg_atomic(dev, "disabling [CRTC:%d:%s]\n",
1251 			       crtc->base.id, crtc->name);
1252 
1253 
1254 		/* Right function depends upon target state. */
1255 		if (new_crtc_state->enable && funcs->prepare)
1256 			funcs->prepare(crtc);
1257 		else if (funcs->atomic_disable)
1258 			funcs->atomic_disable(crtc, state);
1259 		else if (funcs->disable)
1260 			funcs->disable(crtc);
1261 		else if (funcs->dpms)
1262 			funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
1263 
1264 		if (!drm_dev_has_vblank(dev))
1265 			continue;
1266 
1267 		ret = drm_crtc_vblank_get(crtc);
1268 		/*
1269 		 * Self-refresh is not a true "disable"; ensure vblank remains
1270 		 * enabled.
1271 		 */
1272 		if (new_crtc_state->self_refresh_active)
1273 			WARN_ONCE(ret != 0,
1274 				  "driver disabled vblank in self-refresh\n");
1275 		else
1276 			WARN_ONCE(ret != -EINVAL,
1277 				  "driver forgot to call drm_crtc_vblank_off()\n");
1278 		if (ret == 0)
1279 			drm_crtc_vblank_put(crtc);
1280 	}
1281 }
1282 
1283 static void
1284 encoder_bridge_post_disable(struct drm_device *dev, struct drm_atomic_state *state)
1285 {
1286 	struct drm_connector *connector;
1287 	struct drm_connector_state *old_conn_state, *new_conn_state;
1288 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
1289 	int i;
1290 
1291 	for_each_oldnew_connector_in_state(state, connector, old_conn_state, new_conn_state, i) {
1292 		struct drm_encoder *encoder;
1293 		struct drm_bridge *bridge;
1294 
1295 		/*
1296 		 * Shut down everything that's in the changeset and currently
1297 		 * still on. So need to check the old, saved state.
1298 		 */
1299 		if (!old_conn_state->crtc)
1300 			continue;
1301 
1302 		old_crtc_state = drm_atomic_get_old_crtc_state(state, old_conn_state->crtc);
1303 
1304 		if (new_conn_state->crtc)
1305 			new_crtc_state = drm_atomic_get_new_crtc_state(state,
1306 								       new_conn_state->crtc);
1307 		else
1308 			new_crtc_state = NULL;
1309 
1310 		if (!crtc_needs_disable(old_crtc_state, new_crtc_state) ||
1311 		    !drm_atomic_crtc_needs_modeset(old_conn_state->crtc->state))
1312 			continue;
1313 
1314 		encoder = old_conn_state->best_encoder;
1315 
1316 		/*
1317 		 * We shouldn't get this far if we didn't previously have
1318 		 * an encoder.. but WARN_ON() rather than explode.
1319 		 */
1320 		if (WARN_ON(!encoder))
1321 			continue;
1322 
1323 		drm_dbg_atomic(dev, "post-disabling bridges [ENCODER:%d:%s]\n",
1324 			       encoder->base.id, encoder->name);
1325 
1326 		/*
1327 		 * Each encoder has at most one connector (since we always steal
1328 		 * it away), so we won't call disable hooks twice.
1329 		 */
1330 		bridge = drm_bridge_chain_get_first_bridge(encoder);
1331 		drm_atomic_bridge_chain_post_disable(bridge, state);
1332 	}
1333 }
1334 
1335 static void
1336 disable_outputs(struct drm_device *dev, struct drm_atomic_state *state)
1337 {
1338 	encoder_bridge_disable(dev, state);
1339 
1340 	crtc_disable(dev, state);
1341 
1342 	encoder_bridge_post_disable(dev, state);
1343 }
1344 
1345 /**
1346  * drm_atomic_helper_update_legacy_modeset_state - update legacy modeset state
1347  * @dev: DRM device
1348  * @state: atomic state object being committed
1349  *
1350  * This function updates all the various legacy modeset state pointers in
1351  * connectors, encoders and CRTCs.
1352  *
1353  * Drivers can use this for building their own atomic commit if they don't have
1354  * a pure helper-based modeset implementation.
1355  *
1356  * Since these updates are not synchronized with lockings, only code paths
1357  * called from &drm_mode_config_helper_funcs.atomic_commit_tail can look at the
1358  * legacy state filled out by this helper. Defacto this means this helper and
1359  * the legacy state pointers are only really useful for transitioning an
1360  * existing driver to the atomic world.
1361  */
1362 void
1363 drm_atomic_helper_update_legacy_modeset_state(struct drm_device *dev,
1364 					      struct drm_atomic_state *state)
1365 {
1366 	struct drm_connector *connector;
1367 	struct drm_connector_state *old_conn_state, *new_conn_state;
1368 	struct drm_crtc *crtc;
1369 	struct drm_crtc_state *new_crtc_state;
1370 	int i;
1371 
1372 	/* clear out existing links and update dpms */
1373 	for_each_oldnew_connector_in_state(state, connector, old_conn_state, new_conn_state, i) {
1374 		if (connector->encoder) {
1375 			WARN_ON(!connector->encoder->crtc);
1376 
1377 			connector->encoder->crtc = NULL;
1378 			connector->encoder = NULL;
1379 		}
1380 
1381 		crtc = new_conn_state->crtc;
1382 		if ((!crtc && old_conn_state->crtc) ||
1383 		    (crtc && drm_atomic_crtc_needs_modeset(crtc->state))) {
1384 			int mode = DRM_MODE_DPMS_OFF;
1385 
1386 			if (crtc && crtc->state->active)
1387 				mode = DRM_MODE_DPMS_ON;
1388 
1389 			connector->dpms = mode;
1390 		}
1391 	}
1392 
1393 	/* set new links */
1394 	for_each_new_connector_in_state(state, connector, new_conn_state, i) {
1395 		if (!new_conn_state->crtc)
1396 			continue;
1397 
1398 		if (WARN_ON(!new_conn_state->best_encoder))
1399 			continue;
1400 
1401 		connector->encoder = new_conn_state->best_encoder;
1402 		connector->encoder->crtc = new_conn_state->crtc;
1403 	}
1404 
1405 	/* set legacy state in the crtc structure */
1406 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
1407 		struct drm_plane *primary = crtc->primary;
1408 		struct drm_plane_state *new_plane_state;
1409 
1410 		crtc->mode = new_crtc_state->mode;
1411 		crtc->enabled = new_crtc_state->enable;
1412 
1413 		new_plane_state =
1414 			drm_atomic_get_new_plane_state(state, primary);
1415 
1416 		if (new_plane_state && new_plane_state->crtc == crtc) {
1417 			crtc->x = new_plane_state->src_x >> 16;
1418 			crtc->y = new_plane_state->src_y >> 16;
1419 		}
1420 	}
1421 }
1422 EXPORT_SYMBOL(drm_atomic_helper_update_legacy_modeset_state);
1423 
1424 /**
1425  * drm_atomic_helper_calc_timestamping_constants - update vblank timestamping constants
1426  * @state: atomic state object
1427  *
1428  * Updates the timestamping constants used for precise vblank timestamps
1429  * by calling drm_calc_timestamping_constants() for all enabled crtcs in @state.
1430  */
1431 void drm_atomic_helper_calc_timestamping_constants(struct drm_atomic_state *state)
1432 {
1433 	struct drm_crtc_state *new_crtc_state;
1434 	struct drm_crtc *crtc;
1435 	int i;
1436 
1437 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
1438 		if (new_crtc_state->enable)
1439 			drm_calc_timestamping_constants(crtc,
1440 							&new_crtc_state->adjusted_mode);
1441 	}
1442 }
1443 EXPORT_SYMBOL(drm_atomic_helper_calc_timestamping_constants);
1444 
1445 static void
1446 crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *state)
1447 {
1448 	struct drm_crtc *crtc;
1449 	struct drm_crtc_state *new_crtc_state;
1450 	struct drm_connector *connector;
1451 	struct drm_connector_state *new_conn_state;
1452 	int i;
1453 
1454 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
1455 		const struct drm_crtc_helper_funcs *funcs;
1456 
1457 		if (!new_crtc_state->mode_changed)
1458 			continue;
1459 
1460 		funcs = crtc->helper_private;
1461 
1462 		if (new_crtc_state->enable && funcs->mode_set_nofb) {
1463 			drm_dbg_atomic(dev, "modeset on [CRTC:%d:%s]\n",
1464 				       crtc->base.id, crtc->name);
1465 
1466 			funcs->mode_set_nofb(crtc);
1467 		}
1468 	}
1469 
1470 	for_each_new_connector_in_state(state, connector, new_conn_state, i) {
1471 		const struct drm_encoder_helper_funcs *funcs;
1472 		struct drm_encoder *encoder;
1473 		struct drm_display_mode *mode, *adjusted_mode;
1474 		struct drm_bridge *bridge;
1475 
1476 		if (!new_conn_state->best_encoder)
1477 			continue;
1478 
1479 		encoder = new_conn_state->best_encoder;
1480 		funcs = encoder->helper_private;
1481 		new_crtc_state = new_conn_state->crtc->state;
1482 		mode = &new_crtc_state->mode;
1483 		adjusted_mode = &new_crtc_state->adjusted_mode;
1484 
1485 		if (!new_crtc_state->mode_changed && !new_crtc_state->connectors_changed)
1486 			continue;
1487 
1488 		drm_dbg_atomic(dev, "modeset on [ENCODER:%d:%s]\n",
1489 			       encoder->base.id, encoder->name);
1490 
1491 		/*
1492 		 * Each encoder has at most one connector (since we always steal
1493 		 * it away), so we won't call mode_set hooks twice.
1494 		 */
1495 		if (funcs && funcs->atomic_mode_set) {
1496 			funcs->atomic_mode_set(encoder, new_crtc_state,
1497 					       new_conn_state);
1498 		} else if (funcs && funcs->mode_set) {
1499 			funcs->mode_set(encoder, mode, adjusted_mode);
1500 		}
1501 
1502 		bridge = drm_bridge_chain_get_first_bridge(encoder);
1503 		drm_bridge_chain_mode_set(bridge, mode, adjusted_mode);
1504 	}
1505 }
1506 
1507 /**
1508  * drm_atomic_helper_commit_modeset_disables - modeset commit to disable outputs
1509  * @dev: DRM device
1510  * @state: atomic state object being committed
1511  *
1512  * This function shuts down all the outputs that need to be shut down and
1513  * prepares them (if required) with the new mode.
1514  *
1515  * For compatibility with legacy CRTC helpers this should be called before
1516  * drm_atomic_helper_commit_planes(), which is what the default commit function
1517  * does. But drivers with different needs can group the modeset commits together
1518  * and do the plane commits at the end. This is useful for drivers doing runtime
1519  * PM since planes updates then only happen when the CRTC is actually enabled.
1520  */
1521 void drm_atomic_helper_commit_modeset_disables(struct drm_device *dev,
1522 					       struct drm_atomic_state *state)
1523 {
1524 	disable_outputs(dev, state);
1525 
1526 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
1527 	drm_atomic_helper_calc_timestamping_constants(state);
1528 
1529 	crtc_set_mode(dev, state);
1530 }
1531 EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_disables);
1532 
1533 static void drm_atomic_helper_commit_writebacks(struct drm_device *dev,
1534 						struct drm_atomic_state *state)
1535 {
1536 	struct drm_connector *connector;
1537 	struct drm_connector_state *new_conn_state;
1538 	int i;
1539 
1540 	for_each_new_connector_in_state(state, connector, new_conn_state, i) {
1541 		const struct drm_connector_helper_funcs *funcs;
1542 
1543 		funcs = connector->helper_private;
1544 		if (!funcs->atomic_commit)
1545 			continue;
1546 
1547 		if (new_conn_state->writeback_job && new_conn_state->writeback_job->fb) {
1548 			WARN_ON(connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK);
1549 			funcs->atomic_commit(connector, state);
1550 		}
1551 	}
1552 }
1553 
1554 static void
1555 encoder_bridge_pre_enable(struct drm_device *dev, struct drm_atomic_state *state)
1556 {
1557 	struct drm_connector *connector;
1558 	struct drm_connector_state *new_conn_state;
1559 	int i;
1560 
1561 	for_each_new_connector_in_state(state, connector, new_conn_state, i) {
1562 		struct drm_encoder *encoder;
1563 		struct drm_bridge *bridge;
1564 
1565 		if (!new_conn_state->best_encoder)
1566 			continue;
1567 
1568 		if (!new_conn_state->crtc->state->active ||
1569 		    !drm_atomic_crtc_needs_modeset(new_conn_state->crtc->state))
1570 			continue;
1571 
1572 		encoder = new_conn_state->best_encoder;
1573 
1574 		drm_dbg_atomic(dev, "pre-enabling bridges [ENCODER:%d:%s]\n",
1575 			       encoder->base.id, encoder->name);
1576 
1577 		/*
1578 		 * Each encoder has at most one connector (since we always steal
1579 		 * it away), so we won't call enable hooks twice.
1580 		 */
1581 		bridge = drm_bridge_chain_get_first_bridge(encoder);
1582 		drm_atomic_bridge_chain_pre_enable(bridge, state);
1583 	}
1584 }
1585 
1586 static void
1587 crtc_enable(struct drm_device *dev, struct drm_atomic_state *state)
1588 {
1589 	struct drm_crtc *crtc;
1590 	struct drm_crtc_state *old_crtc_state;
1591 	struct drm_crtc_state *new_crtc_state;
1592 	int i;
1593 
1594 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
1595 		const struct drm_crtc_helper_funcs *funcs;
1596 
1597 		/* Need to filter out CRTCs where only planes change. */
1598 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
1599 			continue;
1600 
1601 		if (!new_crtc_state->active)
1602 			continue;
1603 
1604 		funcs = crtc->helper_private;
1605 
1606 		if (new_crtc_state->enable) {
1607 			drm_dbg_atomic(dev, "enabling [CRTC:%d:%s]\n",
1608 				       crtc->base.id, crtc->name);
1609 			if (funcs->atomic_enable)
1610 				funcs->atomic_enable(crtc, state);
1611 			else if (funcs->commit)
1612 				funcs->commit(crtc);
1613 		}
1614 	}
1615 }
1616 
1617 static void
1618 encoder_bridge_enable(struct drm_device *dev, struct drm_atomic_state *state)
1619 {
1620 	struct drm_connector *connector;
1621 	struct drm_connector_state *new_conn_state;
1622 	int i;
1623 
1624 	for_each_new_connector_in_state(state, connector, new_conn_state, i) {
1625 		const struct drm_encoder_helper_funcs *funcs;
1626 		struct drm_encoder *encoder;
1627 		struct drm_bridge *bridge;
1628 
1629 		if (!new_conn_state->best_encoder)
1630 			continue;
1631 
1632 		if (!new_conn_state->crtc->state->active ||
1633 		    !drm_atomic_crtc_needs_modeset(new_conn_state->crtc->state))
1634 			continue;
1635 
1636 		encoder = new_conn_state->best_encoder;
1637 		funcs = encoder->helper_private;
1638 
1639 		drm_dbg_atomic(dev, "enabling [ENCODER:%d:%s]\n",
1640 			       encoder->base.id, encoder->name);
1641 
1642 		/*
1643 		 * Each encoder has at most one connector (since we always steal
1644 		 * it away), so we won't call enable hooks twice.
1645 		 */
1646 		bridge = drm_bridge_chain_get_first_bridge(encoder);
1647 
1648 		if (funcs) {
1649 			if (funcs->atomic_enable)
1650 				funcs->atomic_enable(encoder, state);
1651 			else if (funcs->enable)
1652 				funcs->enable(encoder);
1653 			else if (funcs->commit)
1654 				funcs->commit(encoder);
1655 		}
1656 
1657 		drm_atomic_bridge_chain_enable(bridge, state);
1658 	}
1659 }
1660 
1661 /**
1662  * drm_atomic_helper_commit_modeset_enables - modeset commit to enable outputs
1663  * @dev: DRM device
1664  * @state: atomic state object being committed
1665  *
1666  * This function enables all the outputs with the new configuration which had to
1667  * be turned off for the update.
1668  *
1669  * For compatibility with legacy CRTC helpers this should be called after
1670  * drm_atomic_helper_commit_planes(), which is what the default commit function
1671  * does. But drivers with different needs can group the modeset commits together
1672  * and do the plane commits at the end. This is useful for drivers doing runtime
1673  * PM since planes updates then only happen when the CRTC is actually enabled.
1674  */
1675 void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
1676 					      struct drm_atomic_state *state)
1677 {
1678 	encoder_bridge_pre_enable(dev, state);
1679 
1680 	crtc_enable(dev, state);
1681 
1682 	encoder_bridge_enable(dev, state);
1683 
1684 	drm_atomic_helper_commit_writebacks(dev, state);
1685 }
1686 EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_enables);
1687 
1688 /*
1689  * For atomic updates which touch just a single CRTC, calculate the time of the
1690  * next vblank, and inform all the fences of the deadline.
1691  */
1692 static void set_fence_deadline(struct drm_device *dev,
1693 			       struct drm_atomic_state *state)
1694 {
1695 	struct drm_crtc *crtc;
1696 	struct drm_crtc_state *new_crtc_state;
1697 	struct drm_plane *plane;
1698 	struct drm_plane_state *new_plane_state;
1699 	ktime_t vbltime = 0;
1700 	int i;
1701 
1702 	for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
1703 		ktime_t v;
1704 
1705 		if (drm_atomic_crtc_needs_modeset(new_crtc_state))
1706 			continue;
1707 
1708 		if (!new_crtc_state->active)
1709 			continue;
1710 
1711 		if (drm_crtc_next_vblank_start(crtc, &v))
1712 			continue;
1713 
1714 		if (!vbltime || ktime_before(v, vbltime))
1715 			vbltime = v;
1716 	}
1717 
1718 	/* If no CRTCs updated, then nothing to do: */
1719 	if (!vbltime)
1720 		return;
1721 
1722 	for_each_new_plane_in_state (state, plane, new_plane_state, i) {
1723 		if (!new_plane_state->fence)
1724 			continue;
1725 		dma_fence_set_deadline(new_plane_state->fence, vbltime);
1726 	}
1727 }
1728 
1729 /**
1730  * drm_atomic_helper_wait_for_fences - wait for fences stashed in plane state
1731  * @dev: DRM device
1732  * @state: atomic state object with old state structures
1733  * @pre_swap: If true, do an interruptible wait, and @state is the new state.
1734  *	Otherwise @state is the old state.
1735  *
1736  * For implicit sync, driver should fish the exclusive fence out from the
1737  * incoming fb's and stash it in the drm_plane_state.  This is called after
1738  * drm_atomic_helper_swap_state() so it uses the current plane state (and
1739  * just uses the atomic state to find the changed planes)
1740  *
1741  * Note that @pre_swap is needed since the point where we block for fences moves
1742  * around depending upon whether an atomic commit is blocking or
1743  * non-blocking. For non-blocking commit all waiting needs to happen after
1744  * drm_atomic_helper_swap_state() is called, but for blocking commits we want
1745  * to wait **before** we do anything that can't be easily rolled back. That is
1746  * before we call drm_atomic_helper_swap_state().
1747  *
1748  * Returns zero if success or < 0 if dma_fence_wait() fails.
1749  */
1750 int drm_atomic_helper_wait_for_fences(struct drm_device *dev,
1751 				      struct drm_atomic_state *state,
1752 				      bool pre_swap)
1753 {
1754 	struct drm_plane *plane;
1755 	struct drm_plane_state *new_plane_state;
1756 	int i, ret;
1757 
1758 	set_fence_deadline(dev, state);
1759 
1760 	for_each_new_plane_in_state(state, plane, new_plane_state, i) {
1761 		if (!new_plane_state->fence)
1762 			continue;
1763 
1764 		WARN_ON(!new_plane_state->fb);
1765 
1766 		/*
1767 		 * If waiting for fences pre-swap (ie: nonblock), userspace can
1768 		 * still interrupt the operation. Instead of blocking until the
1769 		 * timer expires, make the wait interruptible.
1770 		 */
1771 		ret = dma_fence_wait(new_plane_state->fence, pre_swap);
1772 		if (ret)
1773 			return ret;
1774 
1775 		dma_fence_put(new_plane_state->fence);
1776 		new_plane_state->fence = NULL;
1777 	}
1778 
1779 	return 0;
1780 }
1781 EXPORT_SYMBOL(drm_atomic_helper_wait_for_fences);
1782 
1783 /**
1784  * drm_atomic_helper_wait_for_vblanks - wait for vblank on CRTCs
1785  * @dev: DRM device
1786  * @state: atomic state object being committed
1787  *
1788  * Helper to, after atomic commit, wait for vblanks on all affected
1789  * CRTCs (ie. before cleaning up old framebuffers using
1790  * drm_atomic_helper_cleanup_planes()). It will only wait on CRTCs where the
1791  * framebuffers have actually changed to optimize for the legacy cursor and
1792  * plane update use-case.
1793  *
1794  * Drivers using the nonblocking commit tracking support initialized by calling
1795  * drm_atomic_helper_setup_commit() should look at
1796  * drm_atomic_helper_wait_for_flip_done() as an alternative.
1797  */
1798 void
1799 drm_atomic_helper_wait_for_vblanks(struct drm_device *dev,
1800 				   struct drm_atomic_state *state)
1801 {
1802 	struct drm_crtc *crtc;
1803 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
1804 	int i, ret;
1805 	unsigned int crtc_mask = 0;
1806 
1807 	 /*
1808 	  * Legacy cursor ioctls are completely unsynced, and userspace
1809 	  * relies on that (by doing tons of cursor updates).
1810 	  */
1811 	if (state->legacy_cursor_update)
1812 		return;
1813 
1814 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
1815 		if (!new_crtc_state->active)
1816 			continue;
1817 
1818 		ret = drm_crtc_vblank_get(crtc);
1819 		if (ret != 0)
1820 			continue;
1821 
1822 		crtc_mask |= drm_crtc_mask(crtc);
1823 		state->crtcs[i].last_vblank_count = drm_crtc_vblank_count(crtc);
1824 	}
1825 
1826 	for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) {
1827 		if (!(crtc_mask & drm_crtc_mask(crtc)))
1828 			continue;
1829 
1830 		ret = wait_event_timeout(dev->vblank[i].queue,
1831 					 state->crtcs[i].last_vblank_count !=
1832 						drm_crtc_vblank_count(crtc),
1833 					 msecs_to_jiffies(100));
1834 
1835 		WARN(!ret, "[CRTC:%d:%s] vblank wait timed out\n",
1836 		     crtc->base.id, crtc->name);
1837 
1838 		drm_crtc_vblank_put(crtc);
1839 	}
1840 }
1841 EXPORT_SYMBOL(drm_atomic_helper_wait_for_vblanks);
1842 
1843 /**
1844  * drm_atomic_helper_wait_for_flip_done - wait for all page flips to be done
1845  * @dev: DRM device
1846  * @state: atomic state object being committed
1847  *
1848  * Helper to, after atomic commit, wait for page flips on all affected
1849  * crtcs (ie. before cleaning up old framebuffers using
1850  * drm_atomic_helper_cleanup_planes()). Compared to
1851  * drm_atomic_helper_wait_for_vblanks() this waits for the completion on all
1852  * CRTCs, assuming that cursors-only updates are signalling their completion
1853  * immediately (or using a different path).
1854  *
1855  * This requires that drivers use the nonblocking commit tracking support
1856  * initialized using drm_atomic_helper_setup_commit().
1857  */
1858 void drm_atomic_helper_wait_for_flip_done(struct drm_device *dev,
1859 					  struct drm_atomic_state *state)
1860 {
1861 	struct drm_crtc *crtc;
1862 	int i;
1863 
1864 	for (i = 0; i < dev->mode_config.num_crtc; i++) {
1865 		struct drm_crtc_commit *commit = state->crtcs[i].commit;
1866 		int ret;
1867 
1868 		crtc = state->crtcs[i].ptr;
1869 
1870 		if (!crtc || !commit)
1871 			continue;
1872 
1873 		ret = wait_for_completion_timeout(&commit->flip_done, 10 * HZ);
1874 		if (ret == 0)
1875 			drm_err(dev, "[CRTC:%d:%s] flip_done timed out\n",
1876 				crtc->base.id, crtc->name);
1877 	}
1878 
1879 	if (state->fake_commit)
1880 		complete_all(&state->fake_commit->flip_done);
1881 }
1882 EXPORT_SYMBOL(drm_atomic_helper_wait_for_flip_done);
1883 
1884 /**
1885  * drm_atomic_helper_commit_tail - commit atomic update to hardware
1886  * @state: atomic state object being committed
1887  *
1888  * This is the default implementation for the
1889  * &drm_mode_config_helper_funcs.atomic_commit_tail hook, for drivers
1890  * that do not support runtime_pm or do not need the CRTC to be
1891  * enabled to perform a commit. Otherwise, see
1892  * drm_atomic_helper_commit_tail_rpm().
1893  *
1894  * Note that the default ordering of how the various stages are called is to
1895  * match the legacy modeset helper library closest.
1896  */
1897 void drm_atomic_helper_commit_tail(struct drm_atomic_state *state)
1898 {
1899 	struct drm_device *dev = state->dev;
1900 
1901 	drm_atomic_helper_commit_modeset_disables(dev, state);
1902 
1903 	drm_atomic_helper_commit_planes(dev, state, 0);
1904 
1905 	drm_atomic_helper_commit_modeset_enables(dev, state);
1906 
1907 	drm_atomic_helper_fake_vblank(state);
1908 
1909 	drm_atomic_helper_commit_hw_done(state);
1910 
1911 	drm_atomic_helper_wait_for_vblanks(dev, state);
1912 
1913 	drm_atomic_helper_cleanup_planes(dev, state);
1914 }
1915 EXPORT_SYMBOL(drm_atomic_helper_commit_tail);
1916 
1917 /**
1918  * drm_atomic_helper_commit_tail_rpm - commit atomic update to hardware
1919  * @state: new modeset state to be committed
1920  *
1921  * This is an alternative implementation for the
1922  * &drm_mode_config_helper_funcs.atomic_commit_tail hook, for drivers
1923  * that support runtime_pm or need the CRTC to be enabled to perform a
1924  * commit. Otherwise, one should use the default implementation
1925  * drm_atomic_helper_commit_tail().
1926  */
1927 void drm_atomic_helper_commit_tail_rpm(struct drm_atomic_state *state)
1928 {
1929 	struct drm_device *dev = state->dev;
1930 
1931 	drm_atomic_helper_commit_modeset_disables(dev, state);
1932 
1933 	drm_atomic_helper_commit_modeset_enables(dev, state);
1934 
1935 	drm_atomic_helper_commit_planes(dev, state,
1936 					DRM_PLANE_COMMIT_ACTIVE_ONLY);
1937 
1938 	drm_atomic_helper_fake_vblank(state);
1939 
1940 	drm_atomic_helper_commit_hw_done(state);
1941 
1942 	drm_atomic_helper_wait_for_vblanks(dev, state);
1943 
1944 	drm_atomic_helper_cleanup_planes(dev, state);
1945 }
1946 EXPORT_SYMBOL(drm_atomic_helper_commit_tail_rpm);
1947 
1948 static void commit_tail(struct drm_atomic_state *state)
1949 {
1950 	struct drm_device *dev = state->dev;
1951 	const struct drm_mode_config_helper_funcs *funcs;
1952 	struct drm_crtc_state *new_crtc_state;
1953 	struct drm_crtc *crtc;
1954 	ktime_t start;
1955 	s64 commit_time_ms;
1956 	unsigned int i, new_self_refresh_mask = 0;
1957 
1958 	funcs = dev->mode_config.helper_private;
1959 
1960 	/*
1961 	 * We're measuring the _entire_ commit, so the time will vary depending
1962 	 * on how many fences and objects are involved. For the purposes of self
1963 	 * refresh, this is desirable since it'll give us an idea of how
1964 	 * congested things are. This will inform our decision on how often we
1965 	 * should enter self refresh after idle.
1966 	 *
1967 	 * These times will be averaged out in the self refresh helpers to avoid
1968 	 * overreacting over one outlier frame
1969 	 */
1970 	start = ktime_get();
1971 
1972 	drm_atomic_helper_wait_for_fences(dev, state, false);
1973 
1974 	drm_atomic_helper_wait_for_dependencies(state);
1975 
1976 	/*
1977 	 * We cannot safely access new_crtc_state after
1978 	 * drm_atomic_helper_commit_hw_done() so figure out which crtc's have
1979 	 * self-refresh active beforehand:
1980 	 */
1981 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i)
1982 		if (new_crtc_state->self_refresh_active)
1983 			new_self_refresh_mask |= BIT(i);
1984 
1985 	if (funcs && funcs->atomic_commit_tail)
1986 		funcs->atomic_commit_tail(state);
1987 	else
1988 		drm_atomic_helper_commit_tail(state);
1989 
1990 	commit_time_ms = ktime_ms_delta(ktime_get(), start);
1991 	if (commit_time_ms > 0)
1992 		drm_self_refresh_helper_update_avg_times(state,
1993 						 (unsigned long)commit_time_ms,
1994 						 new_self_refresh_mask);
1995 
1996 	drm_atomic_helper_commit_cleanup_done(state);
1997 
1998 	drm_atomic_state_put(state);
1999 }
2000 
2001 static void commit_work(struct work_struct *work)
2002 {
2003 	struct drm_atomic_state *state = container_of(work,
2004 						      struct drm_atomic_state,
2005 						      commit_work);
2006 	commit_tail(state);
2007 }
2008 
2009 /**
2010  * drm_atomic_helper_async_check - check if state can be committed asynchronously
2011  * @dev: DRM device
2012  * @state: the driver state object
2013  *
2014  * This helper will check if it is possible to commit the state asynchronously.
2015  * Async commits are not supposed to swap the states like normal sync commits
2016  * but just do in-place changes on the current state.
2017  *
2018  * It will return 0 if the commit can happen in an asynchronous fashion or error
2019  * if not. Note that error just mean it can't be committed asynchronously, if it
2020  * fails the commit should be treated like a normal synchronous commit.
2021  */
2022 int drm_atomic_helper_async_check(struct drm_device *dev,
2023 				   struct drm_atomic_state *state)
2024 {
2025 	struct drm_crtc *crtc;
2026 	struct drm_crtc_state *crtc_state;
2027 	struct drm_plane *plane = NULL;
2028 	struct drm_plane_state *old_plane_state = NULL;
2029 	struct drm_plane_state *new_plane_state = NULL;
2030 	const struct drm_plane_helper_funcs *funcs;
2031 	int i, ret, n_planes = 0;
2032 
2033 	for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
2034 		if (drm_atomic_crtc_needs_modeset(crtc_state))
2035 			return -EINVAL;
2036 	}
2037 
2038 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i)
2039 		n_planes++;
2040 
2041 	/* FIXME: we support only single plane updates for now */
2042 	if (n_planes != 1) {
2043 		drm_dbg_atomic(dev,
2044 			       "only single plane async updates are supported\n");
2045 		return -EINVAL;
2046 	}
2047 
2048 	if (!new_plane_state->crtc ||
2049 	    old_plane_state->crtc != new_plane_state->crtc) {
2050 		drm_dbg_atomic(dev,
2051 			       "[PLANE:%d:%s] async update cannot change CRTC\n",
2052 			       plane->base.id, plane->name);
2053 		return -EINVAL;
2054 	}
2055 
2056 	funcs = plane->helper_private;
2057 	if (!funcs->atomic_async_update) {
2058 		drm_dbg_atomic(dev,
2059 			       "[PLANE:%d:%s] driver does not support async updates\n",
2060 			       plane->base.id, plane->name);
2061 		return -EINVAL;
2062 	}
2063 
2064 	if (new_plane_state->fence) {
2065 		drm_dbg_atomic(dev,
2066 			       "[PLANE:%d:%s] missing fence for async update\n",
2067 			       plane->base.id, plane->name);
2068 		return -EINVAL;
2069 	}
2070 
2071 	/*
2072 	 * Don't do an async update if there is an outstanding commit modifying
2073 	 * the plane.  This prevents our async update's changes from getting
2074 	 * overridden by a previous synchronous update's state.
2075 	 */
2076 	if (old_plane_state->commit &&
2077 	    !try_wait_for_completion(&old_plane_state->commit->hw_done)) {
2078 		drm_dbg_atomic(dev,
2079 			       "[PLANE:%d:%s] inflight previous commit preventing async commit\n",
2080 			       plane->base.id, plane->name);
2081 		return -EBUSY;
2082 	}
2083 
2084 	ret = funcs->atomic_async_check(plane, state, false);
2085 	if (ret != 0)
2086 		drm_dbg_atomic(dev,
2087 			       "[PLANE:%d:%s] driver async check failed\n",
2088 			       plane->base.id, plane->name);
2089 	return ret;
2090 }
2091 EXPORT_SYMBOL(drm_atomic_helper_async_check);
2092 
2093 /**
2094  * drm_atomic_helper_async_commit - commit state asynchronously
2095  * @dev: DRM device
2096  * @state: the driver state object
2097  *
2098  * This function commits a state asynchronously, i.e., not vblank
2099  * synchronized. It should be used on a state only when
2100  * drm_atomic_async_check() succeeds. Async commits are not supposed to swap
2101  * the states like normal sync commits, but just do in-place changes on the
2102  * current state.
2103  *
2104  * TODO: Implement full swap instead of doing in-place changes.
2105  */
2106 void drm_atomic_helper_async_commit(struct drm_device *dev,
2107 				    struct drm_atomic_state *state)
2108 {
2109 	struct drm_plane *plane;
2110 	struct drm_plane_state *plane_state;
2111 	const struct drm_plane_helper_funcs *funcs;
2112 	int i;
2113 
2114 	for_each_new_plane_in_state(state, plane, plane_state, i) {
2115 		struct drm_framebuffer *new_fb = plane_state->fb;
2116 		struct drm_framebuffer *old_fb = plane->state->fb;
2117 
2118 		funcs = plane->helper_private;
2119 		funcs->atomic_async_update(plane, state);
2120 
2121 		/*
2122 		 * ->atomic_async_update() is supposed to update the
2123 		 * plane->state in-place, make sure at least common
2124 		 * properties have been properly updated.
2125 		 */
2126 		WARN_ON_ONCE(plane->state->fb != new_fb);
2127 		WARN_ON_ONCE(plane->state->crtc_x != plane_state->crtc_x);
2128 		WARN_ON_ONCE(plane->state->crtc_y != plane_state->crtc_y);
2129 		WARN_ON_ONCE(plane->state->src_x != plane_state->src_x);
2130 		WARN_ON_ONCE(plane->state->src_y != plane_state->src_y);
2131 
2132 		/*
2133 		 * Make sure the FBs have been swapped so that cleanups in the
2134 		 * new_state performs a cleanup in the old FB.
2135 		 */
2136 		WARN_ON_ONCE(plane_state->fb != old_fb);
2137 	}
2138 }
2139 EXPORT_SYMBOL(drm_atomic_helper_async_commit);
2140 
2141 /**
2142  * drm_atomic_helper_commit - commit validated state object
2143  * @dev: DRM device
2144  * @state: the driver state object
2145  * @nonblock: whether nonblocking behavior is requested.
2146  *
2147  * This function commits a with drm_atomic_helper_check() pre-validated state
2148  * object. This can still fail when e.g. the framebuffer reservation fails. This
2149  * function implements nonblocking commits, using
2150  * drm_atomic_helper_setup_commit() and related functions.
2151  *
2152  * Committing the actual hardware state is done through the
2153  * &drm_mode_config_helper_funcs.atomic_commit_tail callback, or its default
2154  * implementation drm_atomic_helper_commit_tail().
2155  *
2156  * RETURNS:
2157  * Zero for success or -errno.
2158  */
2159 int drm_atomic_helper_commit(struct drm_device *dev,
2160 			     struct drm_atomic_state *state,
2161 			     bool nonblock)
2162 {
2163 	int ret;
2164 
2165 	if (state->async_update) {
2166 		ret = drm_atomic_helper_prepare_planes(dev, state);
2167 		if (ret)
2168 			return ret;
2169 
2170 		drm_atomic_helper_async_commit(dev, state);
2171 		drm_atomic_helper_unprepare_planes(dev, state);
2172 
2173 		return 0;
2174 	}
2175 
2176 	ret = drm_atomic_helper_setup_commit(state, nonblock);
2177 	if (ret)
2178 		return ret;
2179 
2180 	INIT_WORK(&state->commit_work, commit_work);
2181 
2182 	ret = drm_atomic_helper_prepare_planes(dev, state);
2183 	if (ret)
2184 		return ret;
2185 
2186 	if (!nonblock) {
2187 		ret = drm_atomic_helper_wait_for_fences(dev, state, true);
2188 		if (ret)
2189 			goto err;
2190 	}
2191 
2192 	/*
2193 	 * This is the point of no return - everything below never fails except
2194 	 * when the hw goes bonghits. Which means we can commit the new state on
2195 	 * the software side now.
2196 	 */
2197 
2198 	ret = drm_atomic_helper_swap_state(state, true);
2199 	if (ret)
2200 		goto err;
2201 
2202 	/*
2203 	 * Everything below can be run asynchronously without the need to grab
2204 	 * any modeset locks at all under one condition: It must be guaranteed
2205 	 * that the asynchronous work has either been cancelled (if the driver
2206 	 * supports it, which at least requires that the framebuffers get
2207 	 * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
2208 	 * before the new state gets committed on the software side with
2209 	 * drm_atomic_helper_swap_state().
2210 	 *
2211 	 * This scheme allows new atomic state updates to be prepared and
2212 	 * checked in parallel to the asynchronous completion of the previous
2213 	 * update. Which is important since compositors need to figure out the
2214 	 * composition of the next frame right after having submitted the
2215 	 * current layout.
2216 	 *
2217 	 * NOTE: Commit work has multiple phases, first hardware commit, then
2218 	 * cleanup. We want them to overlap, hence need system_unbound_wq to
2219 	 * make sure work items don't artificially stall on each another.
2220 	 */
2221 
2222 	drm_atomic_state_get(state);
2223 	if (nonblock)
2224 		queue_work(system_unbound_wq, &state->commit_work);
2225 	else
2226 		commit_tail(state);
2227 
2228 	return 0;
2229 
2230 err:
2231 	drm_atomic_helper_unprepare_planes(dev, state);
2232 	return ret;
2233 }
2234 EXPORT_SYMBOL(drm_atomic_helper_commit);
2235 
2236 /**
2237  * DOC: implementing nonblocking commit
2238  *
2239  * Nonblocking atomic commits should use struct &drm_crtc_commit to sequence
2240  * different operations against each another. Locks, especially struct
2241  * &drm_modeset_lock, should not be held in worker threads or any other
2242  * asynchronous context used to commit the hardware state.
2243  *
2244  * drm_atomic_helper_commit() implements the recommended sequence for
2245  * nonblocking commits, using drm_atomic_helper_setup_commit() internally:
2246  *
2247  * 1. Run drm_atomic_helper_prepare_planes(). Since this can fail and we
2248  * need to propagate out of memory/VRAM errors to userspace, it must be called
2249  * synchronously.
2250  *
2251  * 2. Synchronize with any outstanding nonblocking commit worker threads which
2252  * might be affected by the new state update. This is handled by
2253  * drm_atomic_helper_setup_commit().
2254  *
2255  * Asynchronous workers need to have sufficient parallelism to be able to run
2256  * different atomic commits on different CRTCs in parallel. The simplest way to
2257  * achieve this is by running them on the &system_unbound_wq work queue. Note
2258  * that drivers are not required to split up atomic commits and run an
2259  * individual commit in parallel - userspace is supposed to do that if it cares.
2260  * But it might be beneficial to do that for modesets, since those necessarily
2261  * must be done as one global operation, and enabling or disabling a CRTC can
2262  * take a long time. But even that is not required.
2263  *
2264  * IMPORTANT: A &drm_atomic_state update for multiple CRTCs is sequenced
2265  * against all CRTCs therein. Therefore for atomic state updates which only flip
2266  * planes the driver must not get the struct &drm_crtc_state of unrelated CRTCs
2267  * in its atomic check code: This would prevent committing of atomic updates to
2268  * multiple CRTCs in parallel. In general, adding additional state structures
2269  * should be avoided as much as possible, because this reduces parallelism in
2270  * (nonblocking) commits, both due to locking and due to commit sequencing
2271  * requirements.
2272  *
2273  * 3. The software state is updated synchronously with
2274  * drm_atomic_helper_swap_state(). Doing this under the protection of all modeset
2275  * locks means concurrent callers never see inconsistent state. Note that commit
2276  * workers do not hold any locks; their access is only coordinated through
2277  * ordering. If workers would access state only through the pointers in the
2278  * free-standing state objects (currently not the case for any driver) then even
2279  * multiple pending commits could be in-flight at the same time.
2280  *
2281  * 4. Schedule a work item to do all subsequent steps, using the split-out
2282  * commit helpers: a) pre-plane commit b) plane commit c) post-plane commit and
2283  * then cleaning up the framebuffers after the old framebuffer is no longer
2284  * being displayed. The scheduled work should synchronize against other workers
2285  * using the &drm_crtc_commit infrastructure as needed. See
2286  * drm_atomic_helper_setup_commit() for more details.
2287  */
2288 
2289 static int stall_checks(struct drm_crtc *crtc, bool nonblock)
2290 {
2291 	struct drm_crtc_commit *commit, *stall_commit = NULL;
2292 	bool completed = true;
2293 	int i;
2294 	long ret = 0;
2295 
2296 	spin_lock(&crtc->commit_lock);
2297 	i = 0;
2298 	list_for_each_entry(commit, &crtc->commit_list, commit_entry) {
2299 		if (i == 0) {
2300 			completed = try_wait_for_completion(&commit->flip_done);
2301 			/*
2302 			 * Userspace is not allowed to get ahead of the previous
2303 			 * commit with nonblocking ones.
2304 			 */
2305 			if (!completed && nonblock) {
2306 				spin_unlock(&crtc->commit_lock);
2307 				drm_dbg_atomic(crtc->dev,
2308 					       "[CRTC:%d:%s] busy with a previous commit\n",
2309 					       crtc->base.id, crtc->name);
2310 
2311 				return -EBUSY;
2312 			}
2313 		} else if (i == 1) {
2314 			stall_commit = drm_crtc_commit_get(commit);
2315 			break;
2316 		}
2317 
2318 		i++;
2319 	}
2320 	spin_unlock(&crtc->commit_lock);
2321 
2322 	if (!stall_commit)
2323 		return 0;
2324 
2325 	/* We don't want to let commits get ahead of cleanup work too much,
2326 	 * stalling on 2nd previous commit means triple-buffer won't ever stall.
2327 	 */
2328 	ret = wait_for_completion_interruptible_timeout(&stall_commit->cleanup_done,
2329 							10*HZ);
2330 	if (ret == 0)
2331 		drm_err(crtc->dev, "[CRTC:%d:%s] cleanup_done timed out\n",
2332 			crtc->base.id, crtc->name);
2333 
2334 	drm_crtc_commit_put(stall_commit);
2335 
2336 	return ret < 0 ? ret : 0;
2337 }
2338 
2339 static void release_crtc_commit(struct completion *completion)
2340 {
2341 	struct drm_crtc_commit *commit = container_of(completion,
2342 						      typeof(*commit),
2343 						      flip_done);
2344 
2345 	drm_crtc_commit_put(commit);
2346 }
2347 
2348 static void init_commit(struct drm_crtc_commit *commit, struct drm_crtc *crtc)
2349 {
2350 	init_completion(&commit->flip_done);
2351 	init_completion(&commit->hw_done);
2352 	init_completion(&commit->cleanup_done);
2353 	INIT_LIST_HEAD(&commit->commit_entry);
2354 	kref_init(&commit->ref);
2355 	commit->crtc = crtc;
2356 }
2357 
2358 static struct drm_crtc_commit *
2359 crtc_or_fake_commit(struct drm_atomic_state *state, struct drm_crtc *crtc)
2360 {
2361 	if (crtc) {
2362 		struct drm_crtc_state *new_crtc_state;
2363 
2364 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
2365 
2366 		return new_crtc_state->commit;
2367 	}
2368 
2369 	if (!state->fake_commit) {
2370 		state->fake_commit = kzalloc(sizeof(*state->fake_commit), GFP_KERNEL);
2371 		if (!state->fake_commit)
2372 			return NULL;
2373 
2374 		init_commit(state->fake_commit, NULL);
2375 	}
2376 
2377 	return state->fake_commit;
2378 }
2379 
2380 /**
2381  * drm_atomic_helper_setup_commit - setup possibly nonblocking commit
2382  * @state: new modeset state to be committed
2383  * @nonblock: whether nonblocking behavior is requested.
2384  *
2385  * This function prepares @state to be used by the atomic helper's support for
2386  * nonblocking commits. Drivers using the nonblocking commit infrastructure
2387  * should always call this function from their
2388  * &drm_mode_config_funcs.atomic_commit hook.
2389  *
2390  * Drivers that need to extend the commit setup to private objects can use the
2391  * &drm_mode_config_helper_funcs.atomic_commit_setup hook.
2392  *
2393  * To be able to use this support drivers need to use a few more helper
2394  * functions. drm_atomic_helper_wait_for_dependencies() must be called before
2395  * actually committing the hardware state, and for nonblocking commits this call
2396  * must be placed in the async worker. See also drm_atomic_helper_swap_state()
2397  * and its stall parameter, for when a driver's commit hooks look at the
2398  * &drm_crtc.state, &drm_plane.state or &drm_connector.state pointer directly.
2399  *
2400  * Completion of the hardware commit step must be signalled using
2401  * drm_atomic_helper_commit_hw_done(). After this step the driver is not allowed
2402  * to read or change any permanent software or hardware modeset state. The only
2403  * exception is state protected by other means than &drm_modeset_lock locks.
2404  * Only the free standing @state with pointers to the old state structures can
2405  * be inspected, e.g. to clean up old buffers using
2406  * drm_atomic_helper_cleanup_planes().
2407  *
2408  * At the very end, before cleaning up @state drivers must call
2409  * drm_atomic_helper_commit_cleanup_done().
2410  *
2411  * This is all implemented by in drm_atomic_helper_commit(), giving drivers a
2412  * complete and easy-to-use default implementation of the atomic_commit() hook.
2413  *
2414  * The tracking of asynchronously executed and still pending commits is done
2415  * using the core structure &drm_crtc_commit.
2416  *
2417  * By default there's no need to clean up resources allocated by this function
2418  * explicitly: drm_atomic_state_default_clear() will take care of that
2419  * automatically.
2420  *
2421  * Returns:
2422  * 0 on success. -EBUSY when userspace schedules nonblocking commits too fast,
2423  * -ENOMEM on allocation failures and -EINTR when a signal is pending.
2424  */
2425 int drm_atomic_helper_setup_commit(struct drm_atomic_state *state,
2426 				   bool nonblock)
2427 {
2428 	struct drm_crtc *crtc;
2429 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
2430 	struct drm_connector *conn;
2431 	struct drm_connector_state *old_conn_state, *new_conn_state;
2432 	struct drm_plane *plane;
2433 	struct drm_plane_state *old_plane_state, *new_plane_state;
2434 	struct drm_crtc_commit *commit;
2435 	const struct drm_mode_config_helper_funcs *funcs;
2436 	int i, ret;
2437 
2438 	funcs = state->dev->mode_config.helper_private;
2439 
2440 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
2441 		commit = kzalloc(sizeof(*commit), GFP_KERNEL);
2442 		if (!commit)
2443 			return -ENOMEM;
2444 
2445 		init_commit(commit, crtc);
2446 
2447 		new_crtc_state->commit = commit;
2448 
2449 		ret = stall_checks(crtc, nonblock);
2450 		if (ret)
2451 			return ret;
2452 
2453 		/*
2454 		 * Drivers only send out events when at least either current or
2455 		 * new CRTC state is active. Complete right away if everything
2456 		 * stays off.
2457 		 */
2458 		if (!old_crtc_state->active && !new_crtc_state->active) {
2459 			complete_all(&commit->flip_done);
2460 			continue;
2461 		}
2462 
2463 		/* Legacy cursor updates are fully unsynced. */
2464 		if (state->legacy_cursor_update) {
2465 			complete_all(&commit->flip_done);
2466 			continue;
2467 		}
2468 
2469 		if (!new_crtc_state->event) {
2470 			commit->event = kzalloc(sizeof(*commit->event),
2471 						GFP_KERNEL);
2472 			if (!commit->event)
2473 				return -ENOMEM;
2474 
2475 			new_crtc_state->event = commit->event;
2476 		}
2477 
2478 		new_crtc_state->event->base.completion = &commit->flip_done;
2479 		new_crtc_state->event->base.completion_release = release_crtc_commit;
2480 		drm_crtc_commit_get(commit);
2481 
2482 		commit->abort_completion = true;
2483 
2484 		state->crtcs[i].commit = commit;
2485 		drm_crtc_commit_get(commit);
2486 	}
2487 
2488 	for_each_oldnew_connector_in_state(state, conn, old_conn_state, new_conn_state, i) {
2489 		/*
2490 		 * Userspace is not allowed to get ahead of the previous
2491 		 * commit with nonblocking ones.
2492 		 */
2493 		if (nonblock && old_conn_state->commit &&
2494 		    !try_wait_for_completion(&old_conn_state->commit->flip_done)) {
2495 			drm_dbg_atomic(conn->dev,
2496 				       "[CONNECTOR:%d:%s] busy with a previous commit\n",
2497 				       conn->base.id, conn->name);
2498 
2499 			return -EBUSY;
2500 		}
2501 
2502 		/* Always track connectors explicitly for e.g. link retraining. */
2503 		commit = crtc_or_fake_commit(state, new_conn_state->crtc ?: old_conn_state->crtc);
2504 		if (!commit)
2505 			return -ENOMEM;
2506 
2507 		new_conn_state->commit = drm_crtc_commit_get(commit);
2508 	}
2509 
2510 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
2511 		/*
2512 		 * Userspace is not allowed to get ahead of the previous
2513 		 * commit with nonblocking ones.
2514 		 */
2515 		if (nonblock && old_plane_state->commit &&
2516 		    !try_wait_for_completion(&old_plane_state->commit->flip_done)) {
2517 			drm_dbg_atomic(plane->dev,
2518 				       "[PLANE:%d:%s] busy with a previous commit\n",
2519 				       plane->base.id, plane->name);
2520 
2521 			return -EBUSY;
2522 		}
2523 
2524 		/* Always track planes explicitly for async pageflip support. */
2525 		commit = crtc_or_fake_commit(state, new_plane_state->crtc ?: old_plane_state->crtc);
2526 		if (!commit)
2527 			return -ENOMEM;
2528 
2529 		new_plane_state->commit = drm_crtc_commit_get(commit);
2530 	}
2531 
2532 	if (funcs && funcs->atomic_commit_setup)
2533 		return funcs->atomic_commit_setup(state);
2534 
2535 	return 0;
2536 }
2537 EXPORT_SYMBOL(drm_atomic_helper_setup_commit);
2538 
2539 /**
2540  * drm_atomic_helper_wait_for_dependencies - wait for required preceding commits
2541  * @state: atomic state object being committed
2542  *
2543  * This function waits for all preceding commits that touch the same CRTC as
2544  * @state to both be committed to the hardware (as signalled by
2545  * drm_atomic_helper_commit_hw_done()) and executed by the hardware (as signalled
2546  * by calling drm_crtc_send_vblank_event() on the &drm_crtc_state.event).
2547  *
2548  * This is part of the atomic helper support for nonblocking commits, see
2549  * drm_atomic_helper_setup_commit() for an overview.
2550  */
2551 void drm_atomic_helper_wait_for_dependencies(struct drm_atomic_state *state)
2552 {
2553 	struct drm_crtc *crtc;
2554 	struct drm_crtc_state *old_crtc_state;
2555 	struct drm_plane *plane;
2556 	struct drm_plane_state *old_plane_state;
2557 	struct drm_connector *conn;
2558 	struct drm_connector_state *old_conn_state;
2559 	int i;
2560 	long ret;
2561 
2562 	for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) {
2563 		ret = drm_crtc_commit_wait(old_crtc_state->commit);
2564 		if (ret)
2565 			drm_err(crtc->dev,
2566 				"[CRTC:%d:%s] commit wait timed out\n",
2567 				crtc->base.id, crtc->name);
2568 	}
2569 
2570 	for_each_old_connector_in_state(state, conn, old_conn_state, i) {
2571 		ret = drm_crtc_commit_wait(old_conn_state->commit);
2572 		if (ret)
2573 			drm_err(conn->dev,
2574 				"[CONNECTOR:%d:%s] commit wait timed out\n",
2575 				conn->base.id, conn->name);
2576 	}
2577 
2578 	for_each_old_plane_in_state(state, plane, old_plane_state, i) {
2579 		ret = drm_crtc_commit_wait(old_plane_state->commit);
2580 		if (ret)
2581 			drm_err(plane->dev,
2582 				"[PLANE:%d:%s] commit wait timed out\n",
2583 				plane->base.id, plane->name);
2584 	}
2585 }
2586 EXPORT_SYMBOL(drm_atomic_helper_wait_for_dependencies);
2587 
2588 /**
2589  * drm_atomic_helper_fake_vblank - fake VBLANK events if needed
2590  * @state: atomic state object being committed
2591  *
2592  * This function walks all CRTCs and fakes VBLANK events on those with
2593  * &drm_crtc_state.no_vblank set to true and &drm_crtc_state.event != NULL.
2594  * The primary use of this function is writeback connectors working in oneshot
2595  * mode and faking VBLANK events. In this case they only fake the VBLANK event
2596  * when a job is queued, and any change to the pipeline that does not touch the
2597  * connector is leading to timeouts when calling
2598  * drm_atomic_helper_wait_for_vblanks() or
2599  * drm_atomic_helper_wait_for_flip_done(). In addition to writeback
2600  * connectors, this function can also fake VBLANK events for CRTCs without
2601  * VBLANK interrupt.
2602  *
2603  * This is part of the atomic helper support for nonblocking commits, see
2604  * drm_atomic_helper_setup_commit() for an overview.
2605  */
2606 void drm_atomic_helper_fake_vblank(struct drm_atomic_state *state)
2607 {
2608 	struct drm_crtc_state *new_crtc_state;
2609 	struct drm_crtc *crtc;
2610 	int i;
2611 
2612 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
2613 		unsigned long flags;
2614 
2615 		if (!new_crtc_state->no_vblank)
2616 			continue;
2617 
2618 		spin_lock_irqsave(&state->dev->event_lock, flags);
2619 		if (new_crtc_state->event) {
2620 			drm_crtc_send_vblank_event(crtc,
2621 						   new_crtc_state->event);
2622 			new_crtc_state->event = NULL;
2623 		}
2624 		spin_unlock_irqrestore(&state->dev->event_lock, flags);
2625 	}
2626 }
2627 EXPORT_SYMBOL(drm_atomic_helper_fake_vblank);
2628 
2629 /**
2630  * drm_atomic_helper_commit_hw_done - setup possible nonblocking commit
2631  * @state: atomic state object being committed
2632  *
2633  * This function is used to signal completion of the hardware commit step. After
2634  * this step the driver is not allowed to read or change any permanent software
2635  * or hardware modeset state. The only exception is state protected by other
2636  * means than &drm_modeset_lock locks.
2637  *
2638  * Drivers should try to postpone any expensive or delayed cleanup work after
2639  * this function is called.
2640  *
2641  * This is part of the atomic helper support for nonblocking commits, see
2642  * drm_atomic_helper_setup_commit() for an overview.
2643  */
2644 void drm_atomic_helper_commit_hw_done(struct drm_atomic_state *state)
2645 {
2646 	struct drm_crtc *crtc;
2647 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
2648 	struct drm_crtc_commit *commit;
2649 	int i;
2650 
2651 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
2652 		commit = new_crtc_state->commit;
2653 		if (!commit)
2654 			continue;
2655 
2656 		/*
2657 		 * copy new_crtc_state->commit to old_crtc_state->commit,
2658 		 * it's unsafe to touch new_crtc_state after hw_done,
2659 		 * but we still need to do so in cleanup_done().
2660 		 */
2661 		if (old_crtc_state->commit)
2662 			drm_crtc_commit_put(old_crtc_state->commit);
2663 
2664 		old_crtc_state->commit = drm_crtc_commit_get(commit);
2665 
2666 		/* backend must have consumed any event by now */
2667 		WARN_ON(new_crtc_state->event);
2668 		complete_all(&commit->hw_done);
2669 	}
2670 
2671 	if (state->fake_commit) {
2672 		complete_all(&state->fake_commit->hw_done);
2673 		complete_all(&state->fake_commit->flip_done);
2674 	}
2675 }
2676 EXPORT_SYMBOL(drm_atomic_helper_commit_hw_done);
2677 
2678 /**
2679  * drm_atomic_helper_commit_cleanup_done - signal completion of commit
2680  * @state: atomic state object being committed
2681  *
2682  * This signals completion of the atomic update @state, including any
2683  * cleanup work. If used, it must be called right before calling
2684  * drm_atomic_state_put().
2685  *
2686  * This is part of the atomic helper support for nonblocking commits, see
2687  * drm_atomic_helper_setup_commit() for an overview.
2688  */
2689 void drm_atomic_helper_commit_cleanup_done(struct drm_atomic_state *state)
2690 {
2691 	struct drm_crtc *crtc;
2692 	struct drm_crtc_state *old_crtc_state;
2693 	struct drm_crtc_commit *commit;
2694 	int i;
2695 
2696 	for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) {
2697 		commit = old_crtc_state->commit;
2698 		if (WARN_ON(!commit))
2699 			continue;
2700 
2701 		complete_all(&commit->cleanup_done);
2702 		WARN_ON(!try_wait_for_completion(&commit->hw_done));
2703 
2704 		spin_lock(&crtc->commit_lock);
2705 		list_del(&commit->commit_entry);
2706 		spin_unlock(&crtc->commit_lock);
2707 	}
2708 
2709 	if (state->fake_commit) {
2710 		complete_all(&state->fake_commit->cleanup_done);
2711 		WARN_ON(!try_wait_for_completion(&state->fake_commit->hw_done));
2712 	}
2713 }
2714 EXPORT_SYMBOL(drm_atomic_helper_commit_cleanup_done);
2715 
2716 /**
2717  * drm_atomic_helper_prepare_planes - prepare plane resources before commit
2718  * @dev: DRM device
2719  * @state: atomic state object with new state structures
2720  *
2721  * This function prepares plane state, specifically framebuffers, for the new
2722  * configuration, by calling &drm_plane_helper_funcs.prepare_fb. If any failure
2723  * is encountered this function will call &drm_plane_helper_funcs.cleanup_fb on
2724  * any already successfully prepared framebuffer.
2725  *
2726  * Returns:
2727  * 0 on success, negative error code on failure.
2728  */
2729 int drm_atomic_helper_prepare_planes(struct drm_device *dev,
2730 				     struct drm_atomic_state *state)
2731 {
2732 	struct drm_connector *connector;
2733 	struct drm_connector_state *new_conn_state;
2734 	struct drm_plane *plane;
2735 	struct drm_plane_state *new_plane_state;
2736 	int ret, i, j;
2737 
2738 	for_each_new_connector_in_state(state, connector, new_conn_state, i) {
2739 		if (!new_conn_state->writeback_job)
2740 			continue;
2741 
2742 		ret = drm_writeback_prepare_job(new_conn_state->writeback_job);
2743 		if (ret < 0)
2744 			return ret;
2745 	}
2746 
2747 	for_each_new_plane_in_state(state, plane, new_plane_state, i) {
2748 		const struct drm_plane_helper_funcs *funcs;
2749 
2750 		funcs = plane->helper_private;
2751 
2752 		if (funcs->prepare_fb) {
2753 			ret = funcs->prepare_fb(plane, new_plane_state);
2754 			if (ret)
2755 				goto fail_prepare_fb;
2756 		} else {
2757 			WARN_ON_ONCE(funcs->cleanup_fb);
2758 
2759 			if (!drm_core_check_feature(dev, DRIVER_GEM))
2760 				continue;
2761 
2762 			ret = drm_gem_plane_helper_prepare_fb(plane, new_plane_state);
2763 			if (ret)
2764 				goto fail_prepare_fb;
2765 		}
2766 	}
2767 
2768 	for_each_new_plane_in_state(state, plane, new_plane_state, i) {
2769 		const struct drm_plane_helper_funcs *funcs = plane->helper_private;
2770 
2771 		if (funcs->begin_fb_access) {
2772 			ret = funcs->begin_fb_access(plane, new_plane_state);
2773 			if (ret)
2774 				goto fail_begin_fb_access;
2775 		}
2776 	}
2777 
2778 	return 0;
2779 
2780 fail_begin_fb_access:
2781 	for_each_new_plane_in_state(state, plane, new_plane_state, j) {
2782 		const struct drm_plane_helper_funcs *funcs = plane->helper_private;
2783 
2784 		if (j >= i)
2785 			continue;
2786 
2787 		if (funcs->end_fb_access)
2788 			funcs->end_fb_access(plane, new_plane_state);
2789 	}
2790 	i = j; /* set i to upper limit to cleanup all planes */
2791 fail_prepare_fb:
2792 	for_each_new_plane_in_state(state, plane, new_plane_state, j) {
2793 		const struct drm_plane_helper_funcs *funcs;
2794 
2795 		if (j >= i)
2796 			continue;
2797 
2798 		funcs = plane->helper_private;
2799 
2800 		if (funcs->cleanup_fb)
2801 			funcs->cleanup_fb(plane, new_plane_state);
2802 	}
2803 
2804 	return ret;
2805 }
2806 EXPORT_SYMBOL(drm_atomic_helper_prepare_planes);
2807 
2808 /**
2809  * drm_atomic_helper_unprepare_planes - release plane resources on aborts
2810  * @dev: DRM device
2811  * @state: atomic state object with old state structures
2812  *
2813  * This function cleans up plane state, specifically framebuffers, from the
2814  * atomic state. It undoes the effects of drm_atomic_helper_prepare_planes()
2815  * when aborting an atomic commit. For cleaning up after a successful commit
2816  * use drm_atomic_helper_cleanup_planes().
2817  */
2818 void drm_atomic_helper_unprepare_planes(struct drm_device *dev,
2819 					struct drm_atomic_state *state)
2820 {
2821 	struct drm_plane *plane;
2822 	struct drm_plane_state *new_plane_state;
2823 	int i;
2824 
2825 	for_each_new_plane_in_state(state, plane, new_plane_state, i) {
2826 		const struct drm_plane_helper_funcs *funcs = plane->helper_private;
2827 
2828 		if (funcs->end_fb_access)
2829 			funcs->end_fb_access(plane, new_plane_state);
2830 	}
2831 
2832 	for_each_new_plane_in_state(state, plane, new_plane_state, i) {
2833 		const struct drm_plane_helper_funcs *funcs = plane->helper_private;
2834 
2835 		if (funcs->cleanup_fb)
2836 			funcs->cleanup_fb(plane, new_plane_state);
2837 	}
2838 }
2839 EXPORT_SYMBOL(drm_atomic_helper_unprepare_planes);
2840 
2841 static bool plane_crtc_active(const struct drm_plane_state *state)
2842 {
2843 	return state->crtc && state->crtc->state->active;
2844 }
2845 
2846 /**
2847  * drm_atomic_helper_commit_planes - commit plane state
2848  * @dev: DRM device
2849  * @state: atomic state object being committed
2850  * @flags: flags for committing plane state
2851  *
2852  * This function commits the new plane state using the plane and atomic helper
2853  * functions for planes and CRTCs. It assumes that the atomic state has already
2854  * been pushed into the relevant object state pointers, since this step can no
2855  * longer fail.
2856  *
2857  * It still requires the global state object @state to know which planes and
2858  * crtcs need to be updated though.
2859  *
2860  * Note that this function does all plane updates across all CRTCs in one step.
2861  * If the hardware can't support this approach look at
2862  * drm_atomic_helper_commit_planes_on_crtc() instead.
2863  *
2864  * Plane parameters can be updated by applications while the associated CRTC is
2865  * disabled. The DRM/KMS core will store the parameters in the plane state,
2866  * which will be available to the driver when the CRTC is turned on. As a result
2867  * most drivers don't need to be immediately notified of plane updates for a
2868  * disabled CRTC.
2869  *
2870  * Unless otherwise needed, drivers are advised to set the ACTIVE_ONLY flag in
2871  * @flags in order not to receive plane update notifications related to a
2872  * disabled CRTC. This avoids the need to manually ignore plane updates in
2873  * driver code when the driver and/or hardware can't or just don't need to deal
2874  * with updates on disabled CRTCs, for example when supporting runtime PM.
2875  *
2876  * Drivers may set the NO_DISABLE_AFTER_MODESET flag in @flags if the relevant
2877  * display controllers require to disable a CRTC's planes when the CRTC is
2878  * disabled. This function would skip the &drm_plane_helper_funcs.atomic_disable
2879  * call for a plane if the CRTC of the old plane state needs a modesetting
2880  * operation. Of course, the drivers need to disable the planes in their CRTC
2881  * disable callbacks since no one else would do that.
2882  *
2883  * The drm_atomic_helper_commit() default implementation doesn't set the
2884  * ACTIVE_ONLY flag to most closely match the behaviour of the legacy helpers.
2885  * This should not be copied blindly by drivers.
2886  */
2887 void drm_atomic_helper_commit_planes(struct drm_device *dev,
2888 				     struct drm_atomic_state *state,
2889 				     uint32_t flags)
2890 {
2891 	struct drm_crtc *crtc;
2892 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
2893 	struct drm_plane *plane;
2894 	struct drm_plane_state *old_plane_state, *new_plane_state;
2895 	int i;
2896 	bool active_only = flags & DRM_PLANE_COMMIT_ACTIVE_ONLY;
2897 	bool no_disable = flags & DRM_PLANE_COMMIT_NO_DISABLE_AFTER_MODESET;
2898 
2899 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
2900 		const struct drm_crtc_helper_funcs *funcs;
2901 
2902 		funcs = crtc->helper_private;
2903 
2904 		if (!funcs || !funcs->atomic_begin)
2905 			continue;
2906 
2907 		if (active_only && !new_crtc_state->active)
2908 			continue;
2909 
2910 		funcs->atomic_begin(crtc, state);
2911 	}
2912 
2913 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
2914 		const struct drm_plane_helper_funcs *funcs;
2915 		bool disabling;
2916 
2917 		funcs = plane->helper_private;
2918 
2919 		if (!funcs)
2920 			continue;
2921 
2922 		disabling = drm_atomic_plane_disabling(old_plane_state,
2923 						       new_plane_state);
2924 
2925 		if (active_only) {
2926 			/*
2927 			 * Skip planes related to inactive CRTCs. If the plane
2928 			 * is enabled use the state of the current CRTC. If the
2929 			 * plane is being disabled use the state of the old
2930 			 * CRTC to avoid skipping planes being disabled on an
2931 			 * active CRTC.
2932 			 */
2933 			if (!disabling && !plane_crtc_active(new_plane_state))
2934 				continue;
2935 			if (disabling && !plane_crtc_active(old_plane_state))
2936 				continue;
2937 		}
2938 
2939 		/*
2940 		 * Special-case disabling the plane if drivers support it.
2941 		 */
2942 		if (disabling && funcs->atomic_disable) {
2943 			struct drm_crtc_state *crtc_state;
2944 
2945 			crtc_state = old_plane_state->crtc->state;
2946 
2947 			if (drm_atomic_crtc_needs_modeset(crtc_state) &&
2948 			    no_disable)
2949 				continue;
2950 
2951 			funcs->atomic_disable(plane, state);
2952 		} else if (new_plane_state->crtc || disabling) {
2953 			funcs->atomic_update(plane, state);
2954 
2955 			if (!disabling && funcs->atomic_enable) {
2956 				if (drm_atomic_plane_enabling(old_plane_state, new_plane_state))
2957 					funcs->atomic_enable(plane, state);
2958 			}
2959 		}
2960 	}
2961 
2962 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
2963 		const struct drm_crtc_helper_funcs *funcs;
2964 
2965 		funcs = crtc->helper_private;
2966 
2967 		if (!funcs || !funcs->atomic_flush)
2968 			continue;
2969 
2970 		if (active_only && !new_crtc_state->active)
2971 			continue;
2972 
2973 		funcs->atomic_flush(crtc, state);
2974 	}
2975 
2976 	/*
2977 	 * Signal end of framebuffer access here before hw_done. After hw_done,
2978 	 * a later commit might have already released the plane state.
2979 	 */
2980 	for_each_old_plane_in_state(state, plane, old_plane_state, i) {
2981 		const struct drm_plane_helper_funcs *funcs = plane->helper_private;
2982 
2983 		if (funcs->end_fb_access)
2984 			funcs->end_fb_access(plane, old_plane_state);
2985 	}
2986 }
2987 EXPORT_SYMBOL(drm_atomic_helper_commit_planes);
2988 
2989 /**
2990  * drm_atomic_helper_commit_planes_on_crtc - commit plane state for a CRTC
2991  * @old_crtc_state: atomic state object with the old CRTC state
2992  *
2993  * This function commits the new plane state using the plane and atomic helper
2994  * functions for planes on the specific CRTC. It assumes that the atomic state
2995  * has already been pushed into the relevant object state pointers, since this
2996  * step can no longer fail.
2997  *
2998  * This function is useful when plane updates should be done CRTC-by-CRTC
2999  * instead of one global step like drm_atomic_helper_commit_planes() does.
3000  *
3001  * This function can only be savely used when planes are not allowed to move
3002  * between different CRTCs because this function doesn't handle inter-CRTC
3003  * dependencies. Callers need to ensure that either no such dependencies exist,
3004  * resolve them through ordering of commit calls or through some other means.
3005  */
3006 void
3007 drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state)
3008 {
3009 	const struct drm_crtc_helper_funcs *crtc_funcs;
3010 	struct drm_crtc *crtc = old_crtc_state->crtc;
3011 	struct drm_atomic_state *old_state = old_crtc_state->state;
3012 	struct drm_crtc_state *new_crtc_state =
3013 		drm_atomic_get_new_crtc_state(old_state, crtc);
3014 	struct drm_plane *plane;
3015 	unsigned int plane_mask;
3016 
3017 	plane_mask = old_crtc_state->plane_mask;
3018 	plane_mask |= new_crtc_state->plane_mask;
3019 
3020 	crtc_funcs = crtc->helper_private;
3021 	if (crtc_funcs && crtc_funcs->atomic_begin)
3022 		crtc_funcs->atomic_begin(crtc, old_state);
3023 
3024 	drm_for_each_plane_mask(plane, crtc->dev, plane_mask) {
3025 		struct drm_plane_state *old_plane_state =
3026 			drm_atomic_get_old_plane_state(old_state, plane);
3027 		struct drm_plane_state *new_plane_state =
3028 			drm_atomic_get_new_plane_state(old_state, plane);
3029 		const struct drm_plane_helper_funcs *plane_funcs;
3030 		bool disabling;
3031 
3032 		plane_funcs = plane->helper_private;
3033 
3034 		if (!old_plane_state || !plane_funcs)
3035 			continue;
3036 
3037 		WARN_ON(new_plane_state->crtc &&
3038 			new_plane_state->crtc != crtc);
3039 
3040 		disabling = drm_atomic_plane_disabling(old_plane_state, new_plane_state);
3041 
3042 		if (disabling && plane_funcs->atomic_disable) {
3043 			plane_funcs->atomic_disable(plane, old_state);
3044 		} else if (new_plane_state->crtc || disabling) {
3045 			plane_funcs->atomic_update(plane, old_state);
3046 
3047 			if (!disabling && plane_funcs->atomic_enable) {
3048 				if (drm_atomic_plane_enabling(old_plane_state, new_plane_state))
3049 					plane_funcs->atomic_enable(plane, old_state);
3050 			}
3051 		}
3052 	}
3053 
3054 	if (crtc_funcs && crtc_funcs->atomic_flush)
3055 		crtc_funcs->atomic_flush(crtc, old_state);
3056 }
3057 EXPORT_SYMBOL(drm_atomic_helper_commit_planes_on_crtc);
3058 
3059 /**
3060  * drm_atomic_helper_disable_planes_on_crtc - helper to disable CRTC's planes
3061  * @old_crtc_state: atomic state object with the old CRTC state
3062  * @atomic: if set, synchronize with CRTC's atomic_begin/flush hooks
3063  *
3064  * Disables all planes associated with the given CRTC. This can be
3065  * used for instance in the CRTC helper atomic_disable callback to disable
3066  * all planes.
3067  *
3068  * If the atomic-parameter is set the function calls the CRTC's
3069  * atomic_begin hook before and atomic_flush hook after disabling the
3070  * planes.
3071  *
3072  * It is a bug to call this function without having implemented the
3073  * &drm_plane_helper_funcs.atomic_disable plane hook.
3074  */
3075 void
3076 drm_atomic_helper_disable_planes_on_crtc(struct drm_crtc_state *old_crtc_state,
3077 					 bool atomic)
3078 {
3079 	struct drm_crtc *crtc = old_crtc_state->crtc;
3080 	const struct drm_crtc_helper_funcs *crtc_funcs =
3081 		crtc->helper_private;
3082 	struct drm_plane *plane;
3083 
3084 	if (atomic && crtc_funcs && crtc_funcs->atomic_begin)
3085 		crtc_funcs->atomic_begin(crtc, NULL);
3086 
3087 	drm_atomic_crtc_state_for_each_plane(plane, old_crtc_state) {
3088 		const struct drm_plane_helper_funcs *plane_funcs =
3089 			plane->helper_private;
3090 
3091 		if (!plane_funcs)
3092 			continue;
3093 
3094 		WARN_ON(!plane_funcs->atomic_disable);
3095 		if (plane_funcs->atomic_disable)
3096 			plane_funcs->atomic_disable(plane, NULL);
3097 	}
3098 
3099 	if (atomic && crtc_funcs && crtc_funcs->atomic_flush)
3100 		crtc_funcs->atomic_flush(crtc, NULL);
3101 }
3102 EXPORT_SYMBOL(drm_atomic_helper_disable_planes_on_crtc);
3103 
3104 /**
3105  * drm_atomic_helper_cleanup_planes - cleanup plane resources after commit
3106  * @dev: DRM device
3107  * @state: atomic state object being committed
3108  *
3109  * This function cleans up plane state, specifically framebuffers, from the old
3110  * configuration. Hence the old configuration must be perserved in @state to
3111  * be able to call this function.
3112  *
3113  * This function may not be called on the new state when the atomic update
3114  * fails at any point after calling drm_atomic_helper_prepare_planes(). Use
3115  * drm_atomic_helper_unprepare_planes() in this case.
3116  */
3117 void drm_atomic_helper_cleanup_planes(struct drm_device *dev,
3118 				      struct drm_atomic_state *state)
3119 {
3120 	struct drm_plane *plane;
3121 	struct drm_plane_state *old_plane_state;
3122 	int i;
3123 
3124 	for_each_old_plane_in_state(state, plane, old_plane_state, i) {
3125 		const struct drm_plane_helper_funcs *funcs = plane->helper_private;
3126 
3127 		if (funcs->cleanup_fb)
3128 			funcs->cleanup_fb(plane, old_plane_state);
3129 	}
3130 }
3131 EXPORT_SYMBOL(drm_atomic_helper_cleanup_planes);
3132 
3133 /**
3134  * drm_atomic_helper_swap_state - store atomic state into current sw state
3135  * @state: atomic state
3136  * @stall: stall for preceding commits
3137  *
3138  * This function stores the atomic state into the current state pointers in all
3139  * driver objects. It should be called after all failing steps have been done
3140  * and succeeded, but before the actual hardware state is committed.
3141  *
3142  * For cleanup and error recovery the current state for all changed objects will
3143  * be swapped into @state.
3144  *
3145  * With that sequence it fits perfectly into the plane prepare/cleanup sequence:
3146  *
3147  * 1. Call drm_atomic_helper_prepare_planes() with the staged atomic state.
3148  *
3149  * 2. Do any other steps that might fail.
3150  *
3151  * 3. Put the staged state into the current state pointers with this function.
3152  *
3153  * 4. Actually commit the hardware state.
3154  *
3155  * 5. Call drm_atomic_helper_cleanup_planes() with @state, which since step 3
3156  * contains the old state. Also do any other cleanup required with that state.
3157  *
3158  * @stall must be set when nonblocking commits for this driver directly access
3159  * the &drm_plane.state, &drm_crtc.state or &drm_connector.state pointer. With
3160  * the current atomic helpers this is almost always the case, since the helpers
3161  * don't pass the right state structures to the callbacks.
3162  *
3163  * Returns:
3164  * Returns 0 on success. Can return -ERESTARTSYS when @stall is true and the
3165  * waiting for the previous commits has been interrupted.
3166  */
3167 int drm_atomic_helper_swap_state(struct drm_atomic_state *state,
3168 				  bool stall)
3169 {
3170 	int i, ret;
3171 	unsigned long flags = 0;
3172 	struct drm_connector *connector;
3173 	struct drm_connector_state *old_conn_state, *new_conn_state;
3174 	struct drm_crtc *crtc;
3175 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
3176 	struct drm_plane *plane;
3177 	struct drm_plane_state *old_plane_state, *new_plane_state;
3178 	struct drm_crtc_commit *commit;
3179 	struct drm_private_obj *obj;
3180 	struct drm_private_state *old_obj_state, *new_obj_state;
3181 
3182 	if (stall) {
3183 		/*
3184 		 * We have to stall for hw_done here before
3185 		 * drm_atomic_helper_wait_for_dependencies() because flip
3186 		 * depth > 1 is not yet supported by all drivers. As long as
3187 		 * obj->state is directly dereferenced anywhere in the drivers
3188 		 * atomic_commit_tail function, then it's unsafe to swap state
3189 		 * before drm_atomic_helper_commit_hw_done() is called.
3190 		 */
3191 
3192 		for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) {
3193 			commit = old_crtc_state->commit;
3194 
3195 			if (!commit)
3196 				continue;
3197 
3198 			ret = wait_for_completion_interruptible(&commit->hw_done);
3199 			if (ret)
3200 				return ret;
3201 		}
3202 
3203 		for_each_old_connector_in_state(state, connector, old_conn_state, i) {
3204 			commit = old_conn_state->commit;
3205 
3206 			if (!commit)
3207 				continue;
3208 
3209 			ret = wait_for_completion_interruptible(&commit->hw_done);
3210 			if (ret)
3211 				return ret;
3212 		}
3213 
3214 		for_each_old_plane_in_state(state, plane, old_plane_state, i) {
3215 			commit = old_plane_state->commit;
3216 
3217 			if (!commit)
3218 				continue;
3219 
3220 			ret = wait_for_completion_interruptible(&commit->hw_done);
3221 			if (ret)
3222 				return ret;
3223 		}
3224 	}
3225 
3226 	for_each_oldnew_connector_in_state(state, connector, old_conn_state, new_conn_state, i) {
3227 		WARN_ON(connector->state != old_conn_state);
3228 
3229 		old_conn_state->state = state;
3230 		new_conn_state->state = NULL;
3231 
3232 		state->connectors[i].state = old_conn_state;
3233 		connector->state = new_conn_state;
3234 	}
3235 
3236 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
3237 		WARN_ON(crtc->state != old_crtc_state);
3238 
3239 		old_crtc_state->state = state;
3240 		new_crtc_state->state = NULL;
3241 
3242 		state->crtcs[i].state = old_crtc_state;
3243 		crtc->state = new_crtc_state;
3244 
3245 		if (new_crtc_state->commit) {
3246 			spin_lock(&crtc->commit_lock);
3247 			list_add(&new_crtc_state->commit->commit_entry,
3248 				 &crtc->commit_list);
3249 			spin_unlock(&crtc->commit_lock);
3250 
3251 			new_crtc_state->commit->event = NULL;
3252 		}
3253 	}
3254 
3255 	drm_panic_lock(state->dev, flags);
3256 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
3257 		WARN_ON(plane->state != old_plane_state);
3258 
3259 		old_plane_state->state = state;
3260 		new_plane_state->state = NULL;
3261 
3262 		state->planes[i].state = old_plane_state;
3263 		plane->state = new_plane_state;
3264 	}
3265 	drm_panic_unlock(state->dev, flags);
3266 
3267 	for_each_oldnew_private_obj_in_state(state, obj, old_obj_state, new_obj_state, i) {
3268 		WARN_ON(obj->state != old_obj_state);
3269 
3270 		old_obj_state->state = state;
3271 		new_obj_state->state = NULL;
3272 
3273 		state->private_objs[i].state = old_obj_state;
3274 		obj->state = new_obj_state;
3275 	}
3276 
3277 	return 0;
3278 }
3279 EXPORT_SYMBOL(drm_atomic_helper_swap_state);
3280 
3281 /**
3282  * drm_atomic_helper_update_plane - Helper for primary plane update using atomic
3283  * @plane: plane object to update
3284  * @crtc: owning CRTC of owning plane
3285  * @fb: framebuffer to flip onto plane
3286  * @crtc_x: x offset of primary plane on @crtc
3287  * @crtc_y: y offset of primary plane on @crtc
3288  * @crtc_w: width of primary plane rectangle on @crtc
3289  * @crtc_h: height of primary plane rectangle on @crtc
3290  * @src_x: x offset of @fb for panning
3291  * @src_y: y offset of @fb for panning
3292  * @src_w: width of source rectangle in @fb
3293  * @src_h: height of source rectangle in @fb
3294  * @ctx: lock acquire context
3295  *
3296  * Provides a default plane update handler using the atomic driver interface.
3297  *
3298  * RETURNS:
3299  * Zero on success, error code on failure
3300  */
3301 int drm_atomic_helper_update_plane(struct drm_plane *plane,
3302 				   struct drm_crtc *crtc,
3303 				   struct drm_framebuffer *fb,
3304 				   int crtc_x, int crtc_y,
3305 				   unsigned int crtc_w, unsigned int crtc_h,
3306 				   uint32_t src_x, uint32_t src_y,
3307 				   uint32_t src_w, uint32_t src_h,
3308 				   struct drm_modeset_acquire_ctx *ctx)
3309 {
3310 	struct drm_atomic_state *state;
3311 	struct drm_plane_state *plane_state;
3312 	int ret = 0;
3313 
3314 	state = drm_atomic_state_alloc(plane->dev);
3315 	if (!state)
3316 		return -ENOMEM;
3317 
3318 	state->acquire_ctx = ctx;
3319 	plane_state = drm_atomic_get_plane_state(state, plane);
3320 	if (IS_ERR(plane_state)) {
3321 		ret = PTR_ERR(plane_state);
3322 		goto fail;
3323 	}
3324 
3325 	ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
3326 	if (ret != 0)
3327 		goto fail;
3328 	drm_atomic_set_fb_for_plane(plane_state, fb);
3329 	plane_state->crtc_x = crtc_x;
3330 	plane_state->crtc_y = crtc_y;
3331 	plane_state->crtc_w = crtc_w;
3332 	plane_state->crtc_h = crtc_h;
3333 	plane_state->src_x = src_x;
3334 	plane_state->src_y = src_y;
3335 	plane_state->src_w = src_w;
3336 	plane_state->src_h = src_h;
3337 
3338 	if (plane == crtc->cursor)
3339 		state->legacy_cursor_update = true;
3340 
3341 	ret = drm_atomic_commit(state);
3342 fail:
3343 	drm_atomic_state_put(state);
3344 	return ret;
3345 }
3346 EXPORT_SYMBOL(drm_atomic_helper_update_plane);
3347 
3348 /**
3349  * drm_atomic_helper_disable_plane - Helper for primary plane disable using atomic
3350  * @plane: plane to disable
3351  * @ctx: lock acquire context
3352  *
3353  * Provides a default plane disable handler using the atomic driver interface.
3354  *
3355  * RETURNS:
3356  * Zero on success, error code on failure
3357  */
3358 int drm_atomic_helper_disable_plane(struct drm_plane *plane,
3359 				    struct drm_modeset_acquire_ctx *ctx)
3360 {
3361 	struct drm_atomic_state *state;
3362 	struct drm_plane_state *plane_state;
3363 	int ret = 0;
3364 
3365 	state = drm_atomic_state_alloc(plane->dev);
3366 	if (!state)
3367 		return -ENOMEM;
3368 
3369 	state->acquire_ctx = ctx;
3370 	plane_state = drm_atomic_get_plane_state(state, plane);
3371 	if (IS_ERR(plane_state)) {
3372 		ret = PTR_ERR(plane_state);
3373 		goto fail;
3374 	}
3375 
3376 	if (plane_state->crtc && plane_state->crtc->cursor == plane)
3377 		plane_state->state->legacy_cursor_update = true;
3378 
3379 	ret = __drm_atomic_helper_disable_plane(plane, plane_state);
3380 	if (ret != 0)
3381 		goto fail;
3382 
3383 	ret = drm_atomic_commit(state);
3384 fail:
3385 	drm_atomic_state_put(state);
3386 	return ret;
3387 }
3388 EXPORT_SYMBOL(drm_atomic_helper_disable_plane);
3389 
3390 /**
3391  * drm_atomic_helper_set_config - set a new config from userspace
3392  * @set: mode set configuration
3393  * @ctx: lock acquisition context
3394  *
3395  * Provides a default CRTC set_config handler using the atomic driver interface.
3396  *
3397  * NOTE: For backwards compatibility with old userspace this automatically
3398  * resets the "link-status" property to GOOD, to force any link
3399  * re-training. The SETCRTC ioctl does not define whether an update does
3400  * need a full modeset or just a plane update, hence we're allowed to do
3401  * that. See also drm_connector_set_link_status_property().
3402  *
3403  * Returns:
3404  * Returns 0 on success, negative errno numbers on failure.
3405  */
3406 int drm_atomic_helper_set_config(struct drm_mode_set *set,
3407 				 struct drm_modeset_acquire_ctx *ctx)
3408 {
3409 	struct drm_atomic_state *state;
3410 	struct drm_crtc *crtc = set->crtc;
3411 	int ret = 0;
3412 
3413 	state = drm_atomic_state_alloc(crtc->dev);
3414 	if (!state)
3415 		return -ENOMEM;
3416 
3417 	state->acquire_ctx = ctx;
3418 	ret = __drm_atomic_helper_set_config(set, state);
3419 	if (ret != 0)
3420 		goto fail;
3421 
3422 	ret = handle_conflicting_encoders(state, true);
3423 	if (ret)
3424 		goto fail;
3425 
3426 	ret = drm_atomic_commit(state);
3427 
3428 fail:
3429 	drm_atomic_state_put(state);
3430 	return ret;
3431 }
3432 EXPORT_SYMBOL(drm_atomic_helper_set_config);
3433 
3434 /**
3435  * drm_atomic_helper_disable_all - disable all currently active outputs
3436  * @dev: DRM device
3437  * @ctx: lock acquisition context
3438  *
3439  * Loops through all connectors, finding those that aren't turned off and then
3440  * turns them off by setting their DPMS mode to OFF and deactivating the CRTC
3441  * that they are connected to.
3442  *
3443  * This is used for example in suspend/resume to disable all currently active
3444  * functions when suspending. If you just want to shut down everything at e.g.
3445  * driver unload, look at drm_atomic_helper_shutdown().
3446  *
3447  * Note that if callers haven't already acquired all modeset locks this might
3448  * return -EDEADLK, which must be handled by calling drm_modeset_backoff().
3449  *
3450  * Returns:
3451  * 0 on success or a negative error code on failure.
3452  *
3453  * See also:
3454  * drm_atomic_helper_suspend(), drm_atomic_helper_resume() and
3455  * drm_atomic_helper_shutdown().
3456  */
3457 int drm_atomic_helper_disable_all(struct drm_device *dev,
3458 				  struct drm_modeset_acquire_ctx *ctx)
3459 {
3460 	struct drm_atomic_state *state;
3461 	struct drm_connector_state *conn_state;
3462 	struct drm_connector *conn;
3463 	struct drm_plane_state *plane_state;
3464 	struct drm_plane *plane;
3465 	struct drm_crtc_state *crtc_state;
3466 	struct drm_crtc *crtc;
3467 	int ret, i;
3468 
3469 	state = drm_atomic_state_alloc(dev);
3470 	if (!state)
3471 		return -ENOMEM;
3472 
3473 	state->acquire_ctx = ctx;
3474 
3475 	drm_for_each_crtc(crtc, dev) {
3476 		crtc_state = drm_atomic_get_crtc_state(state, crtc);
3477 		if (IS_ERR(crtc_state)) {
3478 			ret = PTR_ERR(crtc_state);
3479 			goto free;
3480 		}
3481 
3482 		crtc_state->active = false;
3483 
3484 		ret = drm_atomic_set_mode_prop_for_crtc(crtc_state, NULL);
3485 		if (ret < 0)
3486 			goto free;
3487 
3488 		ret = drm_atomic_add_affected_planes(state, crtc);
3489 		if (ret < 0)
3490 			goto free;
3491 
3492 		ret = drm_atomic_add_affected_connectors(state, crtc);
3493 		if (ret < 0)
3494 			goto free;
3495 	}
3496 
3497 	for_each_new_connector_in_state(state, conn, conn_state, i) {
3498 		ret = drm_atomic_set_crtc_for_connector(conn_state, NULL);
3499 		if (ret < 0)
3500 			goto free;
3501 	}
3502 
3503 	for_each_new_plane_in_state(state, plane, plane_state, i) {
3504 		ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
3505 		if (ret < 0)
3506 			goto free;
3507 
3508 		drm_atomic_set_fb_for_plane(plane_state, NULL);
3509 	}
3510 
3511 	ret = drm_atomic_commit(state);
3512 free:
3513 	drm_atomic_state_put(state);
3514 	return ret;
3515 }
3516 EXPORT_SYMBOL(drm_atomic_helper_disable_all);
3517 
3518 /**
3519  * drm_atomic_helper_reset_crtc - reset the active outputs of a CRTC
3520  * @crtc: DRM CRTC
3521  * @ctx: lock acquisition context
3522  *
3523  * Reset the active outputs by indicating that connectors have changed.
3524  * This implies a reset of all active components available between the CRTC and
3525  * connectors.
3526  *
3527  * A variant of this function exists with
3528  * drm_bridge_helper_reset_crtc(), dedicated to bridges.
3529  *
3530  * NOTE: This relies on resetting &drm_crtc_state.connectors_changed.
3531  * For drivers which optimize out unnecessary modesets this will result in
3532  * a no-op commit, achieving nothing.
3533  *
3534  * Returns:
3535  * 0 on success or a negative error code on failure.
3536  */
3537 int drm_atomic_helper_reset_crtc(struct drm_crtc *crtc,
3538 				 struct drm_modeset_acquire_ctx *ctx)
3539 {
3540 	struct drm_atomic_state *state;
3541 	struct drm_crtc_state *crtc_state;
3542 	int ret;
3543 
3544 	state = drm_atomic_state_alloc(crtc->dev);
3545 	if (!state)
3546 		return -ENOMEM;
3547 
3548 	state->acquire_ctx = ctx;
3549 
3550 	crtc_state = drm_atomic_get_crtc_state(state, crtc);
3551 	if (IS_ERR(crtc_state)) {
3552 		ret = PTR_ERR(crtc_state);
3553 		goto out;
3554 	}
3555 
3556 	crtc_state->connectors_changed = true;
3557 
3558 	ret = drm_atomic_commit(state);
3559 out:
3560 	drm_atomic_state_put(state);
3561 
3562 	return ret;
3563 }
3564 EXPORT_SYMBOL(drm_atomic_helper_reset_crtc);
3565 
3566 /**
3567  * drm_atomic_helper_shutdown - shutdown all CRTC
3568  * @dev: DRM device
3569  *
3570  * This shuts down all CRTC, which is useful for driver unloading. Shutdown on
3571  * suspend should instead be handled with drm_atomic_helper_suspend(), since
3572  * that also takes a snapshot of the modeset state to be restored on resume.
3573  *
3574  * This is just a convenience wrapper around drm_atomic_helper_disable_all(),
3575  * and it is the atomic version of drm_helper_force_disable_all().
3576  */
3577 void drm_atomic_helper_shutdown(struct drm_device *dev)
3578 {
3579 	struct drm_modeset_acquire_ctx ctx;
3580 	int ret;
3581 
3582 	if (dev == NULL)
3583 		return;
3584 
3585 	DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, ret);
3586 
3587 	ret = drm_atomic_helper_disable_all(dev, &ctx);
3588 	if (ret)
3589 		drm_err(dev,
3590 			"Disabling all crtc's during unload failed with %i\n",
3591 			ret);
3592 
3593 	DRM_MODESET_LOCK_ALL_END(dev, ctx, ret);
3594 }
3595 EXPORT_SYMBOL(drm_atomic_helper_shutdown);
3596 
3597 /**
3598  * drm_atomic_helper_duplicate_state - duplicate an atomic state object
3599  * @dev: DRM device
3600  * @ctx: lock acquisition context
3601  *
3602  * Makes a copy of the current atomic state by looping over all objects and
3603  * duplicating their respective states. This is used for example by suspend/
3604  * resume support code to save the state prior to suspend such that it can
3605  * be restored upon resume.
3606  *
3607  * Note that this treats atomic state as persistent between save and restore.
3608  * Drivers must make sure that this is possible and won't result in confusion
3609  * or erroneous behaviour.
3610  *
3611  * Note that if callers haven't already acquired all modeset locks this might
3612  * return -EDEADLK, which must be handled by calling drm_modeset_backoff().
3613  *
3614  * Returns:
3615  * A pointer to the copy of the atomic state object on success or an
3616  * ERR_PTR()-encoded error code on failure.
3617  *
3618  * See also:
3619  * drm_atomic_helper_suspend(), drm_atomic_helper_resume()
3620  */
3621 struct drm_atomic_state *
3622 drm_atomic_helper_duplicate_state(struct drm_device *dev,
3623 				  struct drm_modeset_acquire_ctx *ctx)
3624 {
3625 	struct drm_atomic_state *state;
3626 	struct drm_connector *conn;
3627 	struct drm_connector_list_iter conn_iter;
3628 	struct drm_plane *plane;
3629 	struct drm_crtc *crtc;
3630 	int err = 0;
3631 
3632 	state = drm_atomic_state_alloc(dev);
3633 	if (!state)
3634 		return ERR_PTR(-ENOMEM);
3635 
3636 	state->acquire_ctx = ctx;
3637 	state->duplicated = true;
3638 
3639 	drm_for_each_crtc(crtc, dev) {
3640 		struct drm_crtc_state *crtc_state;
3641 
3642 		crtc_state = drm_atomic_get_crtc_state(state, crtc);
3643 		if (IS_ERR(crtc_state)) {
3644 			err = PTR_ERR(crtc_state);
3645 			goto free;
3646 		}
3647 	}
3648 
3649 	drm_for_each_plane(plane, dev) {
3650 		struct drm_plane_state *plane_state;
3651 
3652 		plane_state = drm_atomic_get_plane_state(state, plane);
3653 		if (IS_ERR(plane_state)) {
3654 			err = PTR_ERR(plane_state);
3655 			goto free;
3656 		}
3657 	}
3658 
3659 	drm_connector_list_iter_begin(dev, &conn_iter);
3660 	drm_for_each_connector_iter(conn, &conn_iter) {
3661 		struct drm_connector_state *conn_state;
3662 
3663 		conn_state = drm_atomic_get_connector_state(state, conn);
3664 		if (IS_ERR(conn_state)) {
3665 			err = PTR_ERR(conn_state);
3666 			drm_connector_list_iter_end(&conn_iter);
3667 			goto free;
3668 		}
3669 	}
3670 	drm_connector_list_iter_end(&conn_iter);
3671 
3672 	/* clear the acquire context so that it isn't accidentally reused */
3673 	state->acquire_ctx = NULL;
3674 
3675 free:
3676 	if (err < 0) {
3677 		drm_atomic_state_put(state);
3678 		state = ERR_PTR(err);
3679 	}
3680 
3681 	return state;
3682 }
3683 EXPORT_SYMBOL(drm_atomic_helper_duplicate_state);
3684 
3685 /**
3686  * drm_atomic_helper_suspend - subsystem-level suspend helper
3687  * @dev: DRM device
3688  *
3689  * Duplicates the current atomic state, disables all active outputs and then
3690  * returns a pointer to the original atomic state to the caller. Drivers can
3691  * pass this pointer to the drm_atomic_helper_resume() helper upon resume to
3692  * restore the output configuration that was active at the time the system
3693  * entered suspend.
3694  *
3695  * Note that it is potentially unsafe to use this. The atomic state object
3696  * returned by this function is assumed to be persistent. Drivers must ensure
3697  * that this holds true. Before calling this function, drivers must make sure
3698  * to suspend fbdev emulation so that nothing can be using the device.
3699  *
3700  * Returns:
3701  * A pointer to a copy of the state before suspend on success or an ERR_PTR()-
3702  * encoded error code on failure. Drivers should store the returned atomic
3703  * state object and pass it to the drm_atomic_helper_resume() helper upon
3704  * resume.
3705  *
3706  * See also:
3707  * drm_atomic_helper_duplicate_state(), drm_atomic_helper_disable_all(),
3708  * drm_atomic_helper_resume(), drm_atomic_helper_commit_duplicated_state()
3709  */
3710 struct drm_atomic_state *drm_atomic_helper_suspend(struct drm_device *dev)
3711 {
3712 	struct drm_modeset_acquire_ctx ctx;
3713 	struct drm_atomic_state *state;
3714 	int err;
3715 
3716 	/* This can never be returned, but it makes the compiler happy */
3717 	state = ERR_PTR(-EINVAL);
3718 
3719 	DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, err);
3720 
3721 	state = drm_atomic_helper_duplicate_state(dev, &ctx);
3722 	if (IS_ERR(state))
3723 		goto unlock;
3724 
3725 	err = drm_atomic_helper_disable_all(dev, &ctx);
3726 	if (err < 0) {
3727 		drm_atomic_state_put(state);
3728 		state = ERR_PTR(err);
3729 		goto unlock;
3730 	}
3731 
3732 unlock:
3733 	DRM_MODESET_LOCK_ALL_END(dev, ctx, err);
3734 	if (err)
3735 		return ERR_PTR(err);
3736 
3737 	return state;
3738 }
3739 EXPORT_SYMBOL(drm_atomic_helper_suspend);
3740 
3741 /**
3742  * drm_atomic_helper_commit_duplicated_state - commit duplicated state
3743  * @state: duplicated atomic state to commit
3744  * @ctx: pointer to acquire_ctx to use for commit.
3745  *
3746  * The state returned by drm_atomic_helper_duplicate_state() and
3747  * drm_atomic_helper_suspend() is partially invalid, and needs to
3748  * be fixed up before commit.
3749  *
3750  * Returns:
3751  * 0 on success or a negative error code on failure.
3752  *
3753  * See also:
3754  * drm_atomic_helper_suspend()
3755  */
3756 int drm_atomic_helper_commit_duplicated_state(struct drm_atomic_state *state,
3757 					      struct drm_modeset_acquire_ctx *ctx)
3758 {
3759 	int i, ret;
3760 	struct drm_plane *plane;
3761 	struct drm_plane_state *new_plane_state;
3762 	struct drm_connector *connector;
3763 	struct drm_connector_state *new_conn_state;
3764 	struct drm_crtc *crtc;
3765 	struct drm_crtc_state *new_crtc_state;
3766 
3767 	state->acquire_ctx = ctx;
3768 
3769 	for_each_new_plane_in_state(state, plane, new_plane_state, i)
3770 		state->planes[i].old_state = plane->state;
3771 
3772 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i)
3773 		state->crtcs[i].old_state = crtc->state;
3774 
3775 	for_each_new_connector_in_state(state, connector, new_conn_state, i)
3776 		state->connectors[i].old_state = connector->state;
3777 
3778 	ret = drm_atomic_commit(state);
3779 
3780 	state->acquire_ctx = NULL;
3781 
3782 	return ret;
3783 }
3784 EXPORT_SYMBOL(drm_atomic_helper_commit_duplicated_state);
3785 
3786 /**
3787  * drm_atomic_helper_resume - subsystem-level resume helper
3788  * @dev: DRM device
3789  * @state: atomic state to resume to
3790  *
3791  * Calls drm_mode_config_reset() to synchronize hardware and software states,
3792  * grabs all modeset locks and commits the atomic state object. This can be
3793  * used in conjunction with the drm_atomic_helper_suspend() helper to
3794  * implement suspend/resume for drivers that support atomic mode-setting.
3795  *
3796  * Returns:
3797  * 0 on success or a negative error code on failure.
3798  *
3799  * See also:
3800  * drm_atomic_helper_suspend()
3801  */
3802 int drm_atomic_helper_resume(struct drm_device *dev,
3803 			     struct drm_atomic_state *state)
3804 {
3805 	struct drm_modeset_acquire_ctx ctx;
3806 	int err;
3807 
3808 	drm_mode_config_reset(dev);
3809 
3810 	DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, err);
3811 
3812 	err = drm_atomic_helper_commit_duplicated_state(state, &ctx);
3813 
3814 	DRM_MODESET_LOCK_ALL_END(dev, ctx, err);
3815 	drm_atomic_state_put(state);
3816 
3817 	return err;
3818 }
3819 EXPORT_SYMBOL(drm_atomic_helper_resume);
3820 
3821 static int page_flip_common(struct drm_atomic_state *state,
3822 			    struct drm_crtc *crtc,
3823 			    struct drm_framebuffer *fb,
3824 			    struct drm_pending_vblank_event *event,
3825 			    uint32_t flags)
3826 {
3827 	struct drm_plane *plane = crtc->primary;
3828 	struct drm_plane_state *plane_state;
3829 	struct drm_crtc_state *crtc_state;
3830 	int ret = 0;
3831 
3832 	crtc_state = drm_atomic_get_crtc_state(state, crtc);
3833 	if (IS_ERR(crtc_state))
3834 		return PTR_ERR(crtc_state);
3835 
3836 	crtc_state->event = event;
3837 	crtc_state->async_flip = flags & DRM_MODE_PAGE_FLIP_ASYNC;
3838 
3839 	plane_state = drm_atomic_get_plane_state(state, plane);
3840 	if (IS_ERR(plane_state))
3841 		return PTR_ERR(plane_state);
3842 
3843 	ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
3844 	if (ret != 0)
3845 		return ret;
3846 	drm_atomic_set_fb_for_plane(plane_state, fb);
3847 
3848 	/* Make sure we don't accidentally do a full modeset. */
3849 	state->allow_modeset = false;
3850 	if (!crtc_state->active) {
3851 		drm_dbg_atomic(crtc->dev,
3852 			       "[CRTC:%d:%s] disabled, rejecting legacy flip\n",
3853 			       crtc->base.id, crtc->name);
3854 		return -EINVAL;
3855 	}
3856 
3857 	return ret;
3858 }
3859 
3860 /**
3861  * drm_atomic_helper_page_flip - execute a legacy page flip
3862  * @crtc: DRM CRTC
3863  * @fb: DRM framebuffer
3864  * @event: optional DRM event to signal upon completion
3865  * @flags: flip flags for non-vblank sync'ed updates
3866  * @ctx: lock acquisition context
3867  *
3868  * Provides a default &drm_crtc_funcs.page_flip implementation
3869  * using the atomic driver interface.
3870  *
3871  * Returns:
3872  * Returns 0 on success, negative errno numbers on failure.
3873  *
3874  * See also:
3875  * drm_atomic_helper_page_flip_target()
3876  */
3877 int drm_atomic_helper_page_flip(struct drm_crtc *crtc,
3878 				struct drm_framebuffer *fb,
3879 				struct drm_pending_vblank_event *event,
3880 				uint32_t flags,
3881 				struct drm_modeset_acquire_ctx *ctx)
3882 {
3883 	struct drm_plane *plane = crtc->primary;
3884 	struct drm_atomic_state *state;
3885 	int ret = 0;
3886 
3887 	state = drm_atomic_state_alloc(plane->dev);
3888 	if (!state)
3889 		return -ENOMEM;
3890 
3891 	state->acquire_ctx = ctx;
3892 
3893 	ret = page_flip_common(state, crtc, fb, event, flags);
3894 	if (ret != 0)
3895 		goto fail;
3896 
3897 	ret = drm_atomic_nonblocking_commit(state);
3898 fail:
3899 	drm_atomic_state_put(state);
3900 	return ret;
3901 }
3902 EXPORT_SYMBOL(drm_atomic_helper_page_flip);
3903 
3904 /**
3905  * drm_atomic_helper_page_flip_target - do page flip on target vblank period.
3906  * @crtc: DRM CRTC
3907  * @fb: DRM framebuffer
3908  * @event: optional DRM event to signal upon completion
3909  * @flags: flip flags for non-vblank sync'ed updates
3910  * @target: specifying the target vblank period when the flip to take effect
3911  * @ctx: lock acquisition context
3912  *
3913  * Provides a default &drm_crtc_funcs.page_flip_target implementation.
3914  * Similar to drm_atomic_helper_page_flip() with extra parameter to specify
3915  * target vblank period to flip.
3916  *
3917  * Returns:
3918  * Returns 0 on success, negative errno numbers on failure.
3919  */
3920 int drm_atomic_helper_page_flip_target(struct drm_crtc *crtc,
3921 				       struct drm_framebuffer *fb,
3922 				       struct drm_pending_vblank_event *event,
3923 				       uint32_t flags,
3924 				       uint32_t target,
3925 				       struct drm_modeset_acquire_ctx *ctx)
3926 {
3927 	struct drm_plane *plane = crtc->primary;
3928 	struct drm_atomic_state *state;
3929 	struct drm_crtc_state *crtc_state;
3930 	int ret = 0;
3931 
3932 	state = drm_atomic_state_alloc(plane->dev);
3933 	if (!state)
3934 		return -ENOMEM;
3935 
3936 	state->acquire_ctx = ctx;
3937 
3938 	ret = page_flip_common(state, crtc, fb, event, flags);
3939 	if (ret != 0)
3940 		goto fail;
3941 
3942 	crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
3943 	if (WARN_ON(!crtc_state)) {
3944 		ret = -EINVAL;
3945 		goto fail;
3946 	}
3947 	crtc_state->target_vblank = target;
3948 
3949 	ret = drm_atomic_nonblocking_commit(state);
3950 fail:
3951 	drm_atomic_state_put(state);
3952 	return ret;
3953 }
3954 EXPORT_SYMBOL(drm_atomic_helper_page_flip_target);
3955 
3956 /**
3957  * drm_atomic_helper_bridge_propagate_bus_fmt() - Propagate output format to
3958  *						  the input end of a bridge
3959  * @bridge: bridge control structure
3960  * @bridge_state: new bridge state
3961  * @crtc_state: new CRTC state
3962  * @conn_state: new connector state
3963  * @output_fmt: tested output bus format
3964  * @num_input_fmts: will contain the size of the returned array
3965  *
3966  * This helper is a pluggable implementation of the
3967  * &drm_bridge_funcs.atomic_get_input_bus_fmts operation for bridges that don't
3968  * modify the bus configuration between their input and their output. It
3969  * returns an array of input formats with a single element set to @output_fmt.
3970  *
3971  * RETURNS:
3972  * a valid format array of size @num_input_fmts, or NULL if the allocation
3973  * failed
3974  */
3975 u32 *
3976 drm_atomic_helper_bridge_propagate_bus_fmt(struct drm_bridge *bridge,
3977 					struct drm_bridge_state *bridge_state,
3978 					struct drm_crtc_state *crtc_state,
3979 					struct drm_connector_state *conn_state,
3980 					u32 output_fmt,
3981 					unsigned int *num_input_fmts)
3982 {
3983 	u32 *input_fmts;
3984 
3985 	input_fmts = kzalloc(sizeof(*input_fmts), GFP_KERNEL);
3986 	if (!input_fmts) {
3987 		*num_input_fmts = 0;
3988 		return NULL;
3989 	}
3990 
3991 	*num_input_fmts = 1;
3992 	input_fmts[0] = output_fmt;
3993 	return input_fmts;
3994 }
3995 EXPORT_SYMBOL(drm_atomic_helper_bridge_propagate_bus_fmt);
3996