xref: /linux/drivers/gpu/drm/i915/display/intel_dp_tunnel.c (revision a5210135489ae7bc1ef1cb4a8157361dd7b468cd)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2023 Intel Corporation
4  */
5 
6 #include <drm/display/drm_dp_tunnel.h>
7 #include <drm/drm_print.h>
8 
9 #include "intel_atomic.h"
10 #include "intel_display_core.h"
11 #include "intel_display_limits.h"
12 #include "intel_display_types.h"
13 #include "intel_dp.h"
14 #include "intel_dp_link_training.h"
15 #include "intel_dp_mst.h"
16 #include "intel_dp_tunnel.h"
17 #include "intel_link_bw.h"
18 
19 struct intel_dp_tunnel_inherited_state {
20 	struct drm_dp_tunnel_ref ref[I915_MAX_PIPES];
21 };
22 
23 /**
24  * intel_dp_tunnel_disconnect - Disconnect a DP tunnel from a port
25  * @intel_dp: DP port object the tunnel is connected to
26  *
27  * Disconnect a DP tunnel from @intel_dp, destroying any related state. This
28  * should be called after detecting a sink-disconnect event from the port.
29  */
intel_dp_tunnel_disconnect(struct intel_dp * intel_dp)30 void intel_dp_tunnel_disconnect(struct intel_dp *intel_dp)
31 {
32 	drm_dp_tunnel_destroy(intel_dp->tunnel);
33 	intel_dp->tunnel = NULL;
34 }
35 
36 /**
37  * intel_dp_tunnel_destroy - Destroy a DP tunnel
38  * @intel_dp: DP port object the tunnel is connected to
39  *
40  * Destroy a DP tunnel connected to @intel_dp, after disabling the BW
41  * allocation mode on the tunnel. This should be called while destroying the
42  * port.
43  */
intel_dp_tunnel_destroy(struct intel_dp * intel_dp)44 void intel_dp_tunnel_destroy(struct intel_dp *intel_dp)
45 {
46 	if (intel_dp_tunnel_bw_alloc_is_enabled(intel_dp))
47 		drm_dp_tunnel_disable_bw_alloc(intel_dp->tunnel);
48 
49 	intel_dp_tunnel_disconnect(intel_dp);
50 }
51 
kbytes_to_mbits(int kbytes)52 static int kbytes_to_mbits(int kbytes)
53 {
54 	return DIV_ROUND_UP(kbytes * 8, 1000);
55 }
56 
get_current_link_bw(struct intel_dp * intel_dp)57 static int get_current_link_bw(struct intel_dp *intel_dp)
58 {
59 	int rate = intel_dp_max_common_rate(intel_dp);
60 	int lane_count = intel_dp_max_common_lane_count(intel_dp);
61 
62 	return intel_dp_max_link_data_rate(intel_dp, rate, lane_count);
63 }
64 
__update_tunnel_state(struct intel_dp * intel_dp,bool force_sink_update)65 static int __update_tunnel_state(struct intel_dp *intel_dp, bool force_sink_update)
66 {
67 	struct intel_display *display = to_intel_display(intel_dp);
68 	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
69 	int ret;
70 
71 	ret = drm_dp_tunnel_update_state(intel_dp->tunnel);
72 	if (ret < 0) {
73 		drm_dbg_kms(display->drm,
74 			    "[DPTUN %s][ENCODER:%d:%s] State update failed (err %pe)\n",
75 			    drm_dp_tunnel_name(intel_dp->tunnel),
76 			    encoder->base.base.id, encoder->base.name,
77 			    ERR_PTR(ret));
78 
79 		return ret;
80 	}
81 
82 	if (!force_sink_update &&
83 	    (ret == 0 || !drm_dp_tunnel_bw_alloc_is_enabled(intel_dp->tunnel)))
84 		return 0;
85 
86 	intel_dp_update_sink_caps(intel_dp);
87 
88 	return 0;
89 }
90 
has_tunnel_bw_changed(struct intel_dp * intel_dp,int old_bw)91 static bool has_tunnel_bw_changed(struct intel_dp *intel_dp, int old_bw)
92 {
93 	struct intel_display *display = to_intel_display(intel_dp);
94 	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
95 	int new_bw;
96 
97 	new_bw = get_current_link_bw(intel_dp);
98 
99 	/* Suppress the notification if the mode list can't change due to bw. */
100 	if (old_bw == new_bw)
101 		return false;
102 
103 	drm_dbg_kms(display->drm,
104 		    "[DPTUN %s][ENCODER:%d:%s] Notify users about BW change: %d -> %d\n",
105 		    drm_dp_tunnel_name(intel_dp->tunnel),
106 		    encoder->base.base.id, encoder->base.name,
107 		    kbytes_to_mbits(old_bw), kbytes_to_mbits(new_bw));
108 
109 	return true;
110 }
111 
112 /*
113  * Returns:
114  * - 0 in case of success - if there wasn't any change in the tunnel state
115  *   requiring a user notification
116  * - 1 in case of success - if there was a change in the tunnel state
117  *   requiring a user notification
118  * - Negative error code if updating the tunnel state failed
119  */
update_tunnel_state(struct intel_dp * intel_dp)120 static int update_tunnel_state(struct intel_dp *intel_dp)
121 {
122 	int old_bw;
123 	int err;
124 
125 	old_bw = get_current_link_bw(intel_dp);
126 
127 	err = __update_tunnel_state(intel_dp, false);
128 	if (err)
129 		return err;
130 
131 	return has_tunnel_bw_changed(intel_dp, old_bw) ? 1 : 0;
132 }
133 
134 /*
135  * Allocate the BW for a tunnel on a DP connector/port if the connector/port
136  * was already active when detecting the tunnel. The allocated BW must be
137  * freed by the next atomic modeset, storing the BW in the
138  * intel_atomic_state::inherited_dp_tunnels, and calling
139  * intel_dp_tunnel_atomic_free_bw().
140  */
allocate_initial_tunnel_bw_for_pipes(struct intel_dp * intel_dp,u8 pipe_mask)141 static int allocate_initial_tunnel_bw_for_pipes(struct intel_dp *intel_dp, u8 pipe_mask)
142 {
143 	struct intel_display *display = to_intel_display(intel_dp);
144 	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
145 	struct intel_crtc *crtc;
146 	int tunnel_bw = 0;
147 	int err;
148 
149 	for_each_intel_crtc_in_pipe_mask(display->drm, crtc, pipe_mask) {
150 		const struct intel_crtc_state *crtc_state =
151 			to_intel_crtc_state(crtc->base.state);
152 		int stream_bw = intel_dp_config_required_rate(crtc_state);
153 
154 		tunnel_bw += stream_bw;
155 
156 		drm_dbg_kms(display->drm,
157 			    "[DPTUN %s][ENCODER:%d:%s][CRTC:%d:%s] Initial BW for stream %d: %d/%d Mb/s\n",
158 			    drm_dp_tunnel_name(intel_dp->tunnel),
159 			    encoder->base.base.id, encoder->base.name,
160 			    crtc->base.base.id, crtc->base.name,
161 			    crtc->pipe,
162 			    kbytes_to_mbits(stream_bw), kbytes_to_mbits(tunnel_bw));
163 	}
164 
165 	err = drm_dp_tunnel_alloc_bw(intel_dp->tunnel, tunnel_bw);
166 	if (err) {
167 		drm_dbg_kms(display->drm,
168 			    "[DPTUN %s][ENCODER:%d:%s] Initial BW allocation failed (err %pe)\n",
169 			    drm_dp_tunnel_name(intel_dp->tunnel),
170 			    encoder->base.base.id, encoder->base.name,
171 			    ERR_PTR(err));
172 	}
173 
174 	return err;
175 }
176 
allocate_initial_tunnel_bw(struct intel_dp * intel_dp,struct drm_modeset_acquire_ctx * ctx)177 static int allocate_initial_tunnel_bw(struct intel_dp *intel_dp,
178 				      struct drm_modeset_acquire_ctx *ctx)
179 {
180 	u8 pipe_mask;
181 	int err;
182 
183 	err = intel_dp_get_active_pipes(intel_dp, ctx, &pipe_mask);
184 	if (err)
185 		return err;
186 
187 	return allocate_initial_tunnel_bw_for_pipes(intel_dp, pipe_mask);
188 }
189 
190 /*
191  * Returns:
192  * - 0 in case of success - after any tunnel detected and added to @intel_dp
193  * - 1 in case of success - after a tunnel detected and added to @intel_dp,
194  *   where the link BW via the tunnel changed in a way requiring a user
195  *   notification
196  * - Negative error code if the tunnel detection failed
197  */
detect_new_tunnel(struct intel_dp * intel_dp,struct drm_modeset_acquire_ctx * ctx)198 static int detect_new_tunnel(struct intel_dp *intel_dp, struct drm_modeset_acquire_ctx *ctx)
199 {
200 	struct intel_display *display = to_intel_display(intel_dp);
201 	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
202 	struct drm_dp_tunnel *tunnel;
203 	int old_bw;
204 	int ret;
205 
206 	old_bw = get_current_link_bw(intel_dp);
207 
208 	tunnel = drm_dp_tunnel_detect(display->dp_tunnel_mgr,
209 				      &intel_dp->aux);
210 	if (IS_ERR(tunnel))
211 		return PTR_ERR(tunnel);
212 
213 	intel_dp->tunnel = tunnel;
214 
215 	ret = drm_dp_tunnel_enable_bw_alloc(intel_dp->tunnel);
216 	if (ret) {
217 		if (ret == -EOPNOTSUPP)
218 			return 0;
219 
220 		drm_dbg_kms(display->drm,
221 			    "[DPTUN %s][ENCODER:%d:%s] Failed to enable BW allocation mode (ret %pe)\n",
222 			    drm_dp_tunnel_name(intel_dp->tunnel),
223 			    encoder->base.base.id, encoder->base.name,
224 			    ERR_PTR(ret));
225 
226 		/* Keep the tunnel with BWA disabled */
227 		return 0;
228 	}
229 
230 	ret = allocate_initial_tunnel_bw(intel_dp, ctx);
231 	if (ret < 0) {
232 		intel_dp_tunnel_destroy(intel_dp);
233 
234 		return ret;
235 	}
236 
237 	ret = __update_tunnel_state(intel_dp, true);
238 	if (ret)
239 		return ret;
240 
241 	return has_tunnel_bw_changed(intel_dp, old_bw) ? 1 : 0;
242 }
243 
244 /**
245  * intel_dp_tunnel_detect - Detect a DP tunnel on a port
246  * @intel_dp: DP port object
247  * @ctx: lock context acquired by the connector detection handler
248  *
249  * Detect a DP tunnel on the @intel_dp port, enabling the BW allocation mode
250  * on it if supported and allocating the BW required on an already active port.
251  * The BW allocated this way must be freed by the next atomic modeset calling
252  * intel_dp_tunnel_atomic_free_bw().
253  *
254  * If @intel_dp has already a tunnel detected on it, update the tunnel's state
255  * wrt. its support for BW allocation mode and the available BW via the
256  * tunnel. If the tunnel's state change requires this - for instance the
257  * tunnel's group ID has changed - the tunnel will be dropped and recreated.
258  *
259  * Returns:
260  * - 0 in case of success - after any tunnel detected and added to @intel_dp
261  * - 1 in case the link BW via the new or an already existing tunnel has changed
262  *   in a way that requires notifying user space
263  * - Negative error code, if creating a new tunnel or updating the tunnel
264  *   state failed
265  */
intel_dp_tunnel_detect(struct intel_dp * intel_dp,struct drm_modeset_acquire_ctx * ctx)266 int intel_dp_tunnel_detect(struct intel_dp *intel_dp, struct drm_modeset_acquire_ctx *ctx)
267 {
268 	int ret;
269 
270 	if (intel_dp_is_edp(intel_dp))
271 		return 0;
272 
273 	if (intel_dp->tunnel) {
274 		ret = update_tunnel_state(intel_dp);
275 		if (ret >= 0)
276 			return ret;
277 
278 		/* Try to recreate the tunnel after an update error. */
279 		intel_dp_tunnel_destroy(intel_dp);
280 	}
281 
282 	return detect_new_tunnel(intel_dp, ctx);
283 }
284 
285 /**
286  * intel_dp_tunnel_bw_alloc_is_enabled - Query the BW allocation support on a tunnel
287  * @intel_dp: DP port object
288  *
289  * Query whether a DP tunnel is connected on @intel_dp and the tunnel supports
290  * the BW allocation mode.
291  *
292  * Returns %true if the BW allocation mode is supported on @intel_dp.
293  */
intel_dp_tunnel_bw_alloc_is_enabled(struct intel_dp * intel_dp)294 bool intel_dp_tunnel_bw_alloc_is_enabled(struct intel_dp *intel_dp)
295 {
296 	return drm_dp_tunnel_bw_alloc_is_enabled(intel_dp->tunnel);
297 }
298 
299 /**
300  * intel_dp_tunnel_suspend - Suspend a DP tunnel connected on a port
301  * @intel_dp: DP port object
302  *
303  * Suspend a DP tunnel on @intel_dp with BW allocation mode enabled on it.
304  */
intel_dp_tunnel_suspend(struct intel_dp * intel_dp)305 void intel_dp_tunnel_suspend(struct intel_dp *intel_dp)
306 {
307 	struct intel_display *display = to_intel_display(intel_dp);
308 	struct intel_connector *connector = intel_dp->attached_connector;
309 	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
310 
311 	if (!intel_dp_tunnel_bw_alloc_is_enabled(intel_dp))
312 		return;
313 
314 	drm_dbg_kms(display->drm,
315 		    "[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s] Suspend\n",
316 		    drm_dp_tunnel_name(intel_dp->tunnel),
317 		    connector->base.base.id, connector->base.name,
318 		    encoder->base.base.id, encoder->base.name);
319 
320 	drm_dp_tunnel_disable_bw_alloc(intel_dp->tunnel);
321 
322 	intel_dp->tunnel_suspended = true;
323 }
324 
325 /**
326  * intel_dp_tunnel_resume - Resume a DP tunnel connected on a port
327  * @intel_dp: DP port object
328  * @crtc_state: CRTC state
329  * @dpcd_updated: the DPCD DPRX capabilities got updated during resume
330  *
331  * Resume a DP tunnel on @intel_dp with BW allocation mode enabled on it.
332  */
intel_dp_tunnel_resume(struct intel_dp * intel_dp,const struct intel_crtc_state * crtc_state,bool dpcd_updated)333 void intel_dp_tunnel_resume(struct intel_dp *intel_dp,
334 			    const struct intel_crtc_state *crtc_state,
335 			    bool dpcd_updated)
336 {
337 	struct intel_display *display = to_intel_display(intel_dp);
338 	struct intel_connector *connector = intel_dp->attached_connector;
339 	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
340 	u8 dpcd[DP_RECEIVER_CAP_SIZE];
341 	u8 pipe_mask;
342 	int err = 0;
343 
344 	if (!intel_dp->tunnel_suspended)
345 		return;
346 
347 	intel_dp->tunnel_suspended = false;
348 
349 	drm_dbg_kms(display->drm,
350 		    "[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s] Resume\n",
351 		    drm_dp_tunnel_name(intel_dp->tunnel),
352 		    connector->base.base.id, connector->base.name,
353 		    encoder->base.base.id, encoder->base.name);
354 
355 	/*
356 	 * The TBT Connection Manager requires the GFX driver to read out
357 	 * the sink's DPRX caps to be able to service any BW requests later.
358 	 * During resume overriding the caps in @intel_dp cached before
359 	 * suspend must be avoided, so do here only a dummy read, unless the
360 	 * capabilities were updated already during resume.
361 	 */
362 	if (!dpcd_updated) {
363 		err = intel_dp_read_dprx_caps(intel_dp, dpcd);
364 
365 		if (err) {
366 			drm_dp_tunnel_set_io_error(intel_dp->tunnel);
367 			goto out_err;
368 		}
369 	}
370 
371 	err = drm_dp_tunnel_enable_bw_alloc(intel_dp->tunnel);
372 	if (err)
373 		goto out_err;
374 
375 	pipe_mask = 0;
376 	if (crtc_state) {
377 		struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
378 
379 		/* TODO: Add support for MST */
380 		pipe_mask |= BIT(crtc->pipe);
381 	}
382 
383 	err = allocate_initial_tunnel_bw_for_pipes(intel_dp, pipe_mask);
384 	if (err < 0)
385 		goto out_err;
386 
387 	return;
388 
389 out_err:
390 	drm_dbg_kms(display->drm,
391 		    "[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s] Tunnel can't be resumed, will drop and reject it (err %pe)\n",
392 		    drm_dp_tunnel_name(intel_dp->tunnel),
393 		    connector->base.base.id, connector->base.name,
394 		    encoder->base.base.id, encoder->base.name,
395 		    ERR_PTR(err));
396 }
397 
398 static struct drm_dp_tunnel *
get_inherited_tunnel(struct intel_atomic_state * state,struct intel_crtc * crtc)399 get_inherited_tunnel(struct intel_atomic_state *state, struct intel_crtc *crtc)
400 {
401 	if (!state->inherited_dp_tunnels)
402 		return NULL;
403 
404 	return state->inherited_dp_tunnels->ref[crtc->pipe].tunnel;
405 }
406 
407 static int
add_inherited_tunnel(struct intel_atomic_state * state,struct drm_dp_tunnel * tunnel,struct intel_crtc * crtc)408 add_inherited_tunnel(struct intel_atomic_state *state,
409 		     struct drm_dp_tunnel *tunnel,
410 		     struct intel_crtc *crtc)
411 {
412 	struct intel_display *display = to_intel_display(state);
413 	struct drm_dp_tunnel *old_tunnel;
414 
415 	old_tunnel = get_inherited_tunnel(state, crtc);
416 	if (old_tunnel) {
417 		drm_WARN_ON(display->drm, old_tunnel != tunnel);
418 		return 0;
419 	}
420 
421 	if (!state->inherited_dp_tunnels) {
422 		state->inherited_dp_tunnels = kzalloc_obj(*state->inherited_dp_tunnels);
423 		if (!state->inherited_dp_tunnels)
424 			return -ENOMEM;
425 	}
426 
427 	drm_dp_tunnel_ref_get(tunnel, &state->inherited_dp_tunnels->ref[crtc->pipe]);
428 
429 	return 0;
430 }
431 
check_inherited_tunnel_state(struct intel_atomic_state * state,struct intel_dp * intel_dp,const struct intel_digital_connector_state * old_conn_state)432 static int check_inherited_tunnel_state(struct intel_atomic_state *state,
433 					struct intel_dp *intel_dp,
434 					const struct intel_digital_connector_state *old_conn_state)
435 {
436 	struct intel_display *display = to_intel_display(state);
437 	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
438 	struct intel_connector *connector =
439 		to_intel_connector(old_conn_state->base.connector);
440 	struct intel_crtc *old_crtc;
441 	const struct intel_crtc_state *old_crtc_state;
442 
443 	/*
444 	 * If a BWA tunnel gets detected only after the corresponding
445 	 * connector got enabled already without a BWA tunnel, or a different
446 	 * BWA tunnel (which was removed meanwhile) the old CRTC state won't
447 	 * contain the state of the current tunnel. This tunnel still has a
448 	 * reserved BW, which needs to be released, add the state for such
449 	 * inherited tunnels separately only to this atomic state.
450 	 */
451 	if (!intel_dp_tunnel_bw_alloc_is_enabled(intel_dp))
452 		return 0;
453 
454 	if (!old_conn_state->base.crtc)
455 		return 0;
456 
457 	old_crtc = to_intel_crtc(old_conn_state->base.crtc);
458 	old_crtc_state = intel_atomic_get_old_crtc_state(state, old_crtc);
459 
460 	if (!old_crtc_state->hw.active ||
461 	    old_crtc_state->dp_tunnel_ref.tunnel == intel_dp->tunnel)
462 		return 0;
463 
464 	drm_dbg_kms(display->drm,
465 		    "[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s][CRTC:%d:%s] Adding state for inherited tunnel %p\n",
466 		    drm_dp_tunnel_name(intel_dp->tunnel),
467 		    connector->base.base.id, connector->base.name,
468 		    encoder->base.base.id, encoder->base.name,
469 		    old_crtc->base.base.id, old_crtc->base.name,
470 		    intel_dp->tunnel);
471 
472 	return add_inherited_tunnel(state, intel_dp->tunnel, old_crtc);
473 }
474 
475 /**
476  * intel_dp_tunnel_atomic_cleanup_inherited_state - Free any inherited DP tunnel state
477  * @state: Atomic state
478  *
479  * Free the inherited DP tunnel state in @state.
480  */
intel_dp_tunnel_atomic_cleanup_inherited_state(struct intel_atomic_state * state)481 void intel_dp_tunnel_atomic_cleanup_inherited_state(struct intel_atomic_state *state)
482 {
483 	struct intel_display *display = to_intel_display(state);
484 	enum pipe pipe;
485 
486 	if (!state->inherited_dp_tunnels)
487 		return;
488 
489 	for_each_pipe(display, pipe)
490 		if (state->inherited_dp_tunnels->ref[pipe].tunnel)
491 			drm_dp_tunnel_ref_put(&state->inherited_dp_tunnels->ref[pipe]);
492 
493 	kfree(state->inherited_dp_tunnels);
494 	state->inherited_dp_tunnels = NULL;
495 }
496 
intel_dp_tunnel_atomic_add_group_state(struct intel_atomic_state * state,struct drm_dp_tunnel * tunnel)497 static int intel_dp_tunnel_atomic_add_group_state(struct intel_atomic_state *state,
498 						  struct drm_dp_tunnel *tunnel)
499 {
500 	struct intel_display *display = to_intel_display(state);
501 	u32 pipe_mask;
502 	int err;
503 
504 	err = drm_dp_tunnel_atomic_get_group_streams_in_state(&state->base,
505 							      tunnel, &pipe_mask);
506 	if (err)
507 		return err;
508 
509 	drm_WARN_ON(display->drm, pipe_mask & ~((1 << I915_MAX_PIPES) - 1));
510 
511 	return intel_modeset_pipes_in_mask_early(state, "DPTUN", pipe_mask);
512 }
513 
514 /**
515  * intel_dp_tunnel_atomic_add_state_for_crtc - Add CRTC specific DP tunnel state
516  * @state: Atomic state
517  * @crtc: CRTC to add the tunnel state for
518  *
519  * Add the DP tunnel state for @crtc if the CRTC (aka DP tunnel stream) is enabled
520  * via a DP tunnel.
521  *
522  * Return 0 in case of success, a negative error code otherwise.
523  */
intel_dp_tunnel_atomic_add_state_for_crtc(struct intel_atomic_state * state,struct intel_crtc * crtc)524 int intel_dp_tunnel_atomic_add_state_for_crtc(struct intel_atomic_state *state,
525 					      struct intel_crtc *crtc)
526 {
527 	const struct intel_crtc_state *new_crtc_state =
528 		intel_atomic_get_new_crtc_state(state, crtc);
529 	const struct drm_dp_tunnel_state *tunnel_state;
530 	struct drm_dp_tunnel *tunnel = new_crtc_state->dp_tunnel_ref.tunnel;
531 
532 	if (!tunnel)
533 		return 0;
534 
535 	tunnel_state = drm_dp_tunnel_atomic_get_state(&state->base, tunnel);
536 	if (IS_ERR(tunnel_state))
537 		return PTR_ERR(tunnel_state);
538 
539 	return 0;
540 }
541 
check_group_state(struct intel_atomic_state * state,struct intel_dp * intel_dp,struct intel_connector * connector,struct intel_crtc * crtc)542 static int check_group_state(struct intel_atomic_state *state,
543 			     struct intel_dp *intel_dp,
544 			     struct intel_connector *connector,
545 			     struct intel_crtc *crtc)
546 {
547 	struct intel_display *display = to_intel_display(state);
548 	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
549 	const struct intel_crtc_state *crtc_state =
550 		intel_atomic_get_new_crtc_state(state, crtc);
551 
552 	if (!crtc_state->dp_tunnel_ref.tunnel)
553 		return 0;
554 
555 	drm_dbg_kms(display->drm,
556 		    "[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s][CRTC:%d:%s] Adding group state for tunnel %p\n",
557 		    drm_dp_tunnel_name(intel_dp->tunnel),
558 		    connector->base.base.id, connector->base.name,
559 		    encoder->base.base.id, encoder->base.name,
560 		    crtc->base.base.id, crtc->base.name,
561 		    crtc_state->dp_tunnel_ref.tunnel);
562 
563 	return intel_dp_tunnel_atomic_add_group_state(state, crtc_state->dp_tunnel_ref.tunnel);
564 }
565 
566 /**
567  * intel_dp_tunnel_atomic_check_state - Check a connector's DP tunnel specific state
568  * @state: Atomic state
569  * @intel_dp: DP port object
570  * @connector: connector using @intel_dp
571  *
572  * Check and add the DP tunnel atomic state for @intel_dp/@connector to
573  * @state, if there is a DP tunnel detected on @intel_dp with BW allocation
574  * mode enabled on it, or if @intel_dp/@connector was previously enabled via a
575  * DP tunnel.
576  *
577  * Returns 0 in case of success, or a negative error code otherwise.
578  */
intel_dp_tunnel_atomic_check_state(struct intel_atomic_state * state,struct intel_dp * intel_dp,struct intel_connector * connector)579 int intel_dp_tunnel_atomic_check_state(struct intel_atomic_state *state,
580 				       struct intel_dp *intel_dp,
581 				       struct intel_connector *connector)
582 {
583 	const struct intel_digital_connector_state *old_conn_state =
584 		intel_atomic_get_old_connector_state(state, connector);
585 	const struct intel_digital_connector_state *new_conn_state =
586 		intel_atomic_get_new_connector_state(state, connector);
587 	int err;
588 
589 	if (old_conn_state->base.crtc) {
590 		err = check_group_state(state, intel_dp, connector,
591 					to_intel_crtc(old_conn_state->base.crtc));
592 		if (err)
593 			return err;
594 	}
595 
596 	if (new_conn_state->base.crtc &&
597 	    new_conn_state->base.crtc != old_conn_state->base.crtc) {
598 		err = check_group_state(state, intel_dp, connector,
599 					to_intel_crtc(new_conn_state->base.crtc));
600 		if (err)
601 			return err;
602 	}
603 
604 	return check_inherited_tunnel_state(state, intel_dp, old_conn_state);
605 }
606 
607 /**
608  * intel_dp_tunnel_atomic_compute_stream_bw - Compute the BW required by a DP tunnel stream
609  * @state: Atomic state
610  * @intel_dp: DP object
611  * @connector: connector using @intel_dp
612  * @crtc_state: state of CRTC of the given DP tunnel stream
613  *
614  * Compute the required BW of CRTC (aka DP tunnel stream), storing this BW to
615  * the DP tunnel state containing the stream in @state. Before re-calculating a
616  * BW requirement in the crtc_state state the old BW requirement computed by this
617  * function must be cleared by calling intel_dp_tunnel_atomic_clear_stream_bw().
618  *
619  * Returns 0 in case of success, a negative error code otherwise.
620  */
intel_dp_tunnel_atomic_compute_stream_bw(struct intel_atomic_state * state,struct intel_dp * intel_dp,const struct intel_connector * connector,struct intel_crtc_state * crtc_state)621 int intel_dp_tunnel_atomic_compute_stream_bw(struct intel_atomic_state *state,
622 					     struct intel_dp *intel_dp,
623 					     const struct intel_connector *connector,
624 					     struct intel_crtc_state *crtc_state)
625 {
626 	struct intel_display *display = to_intel_display(state);
627 	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
628 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
629 	int required_rate = intel_dp_config_required_rate(crtc_state);
630 	int ret;
631 
632 	if (!intel_dp_tunnel_bw_alloc_is_enabled(intel_dp))
633 		return 0;
634 
635 	drm_dbg_kms(display->drm,
636 		    "[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s][CRTC:%d:%s] Stream %d required BW %d Mb/s\n",
637 		    drm_dp_tunnel_name(intel_dp->tunnel),
638 		    connector->base.base.id, connector->base.name,
639 		    encoder->base.base.id, encoder->base.name,
640 		    crtc->base.base.id, crtc->base.name,
641 		    crtc->pipe,
642 		    kbytes_to_mbits(required_rate));
643 
644 	ret = drm_dp_tunnel_atomic_set_stream_bw(&state->base, intel_dp->tunnel,
645 						 crtc->pipe, required_rate);
646 	if (ret < 0)
647 		return ret;
648 
649 	drm_dp_tunnel_ref_get(intel_dp->tunnel,
650 			      &crtc_state->dp_tunnel_ref);
651 
652 	return 0;
653 }
654 
655 /**
656  * intel_dp_tunnel_atomic_clear_stream_bw - Clear any DP tunnel stream BW requirement
657  * @state: Atomic state
658  * @crtc_state: state of CRTC of the given DP tunnel stream
659  *
660  * Clear any DP tunnel stream BW requirement set by
661  * intel_dp_tunnel_atomic_compute_stream_bw().
662  *
663  * Returns 0 in case of success, a negative error code otherwise.
664  */
intel_dp_tunnel_atomic_clear_stream_bw(struct intel_atomic_state * state,struct intel_crtc_state * crtc_state)665 int intel_dp_tunnel_atomic_clear_stream_bw(struct intel_atomic_state *state,
666 					   struct intel_crtc_state *crtc_state)
667 {
668 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
669 	int err;
670 
671 	if (!crtc_state->dp_tunnel_ref.tunnel)
672 		return 0;
673 
674 	err = drm_dp_tunnel_atomic_set_stream_bw(&state->base,
675 						 crtc_state->dp_tunnel_ref.tunnel,
676 						 crtc->pipe, 0);
677 	if (err)
678 		return err;
679 
680 	drm_dp_tunnel_ref_put(&crtc_state->dp_tunnel_ref);
681 
682 	return 0;
683 }
684 
685 /**
686  * intel_dp_tunnel_atomic_check_link - Check the DP tunnel atomic state
687  * @state: intel atomic state
688  * @limits: link BW limits
689  *
690  * Check the link configuration for all DP tunnels in @state. If the
691  * configuration is invalid @limits will be updated if possible to
692  * reduce the total BW, after which the configuration for all CRTCs in
693  * @state must be recomputed with the updated @limits.
694  *
695  * Returns:
696  *   - 0 if the configuration is valid
697  *   - %-EAGAIN, if the configuration is invalid and @limits got updated
698  *     with fallback values with which the configuration of all CRTCs in
699  *     @state must be recomputed
700  *   - Other negative error, if the configuration is invalid without a
701  *     fallback possibility, or the check failed for another reason
702  */
intel_dp_tunnel_atomic_check_link(struct intel_atomic_state * state,struct intel_link_bw_limits * limits)703 int intel_dp_tunnel_atomic_check_link(struct intel_atomic_state *state,
704 				      struct intel_link_bw_limits *limits)
705 {
706 	u32 failed_stream_mask;
707 	int err;
708 
709 	err = drm_dp_tunnel_atomic_check_stream_bws(&state->base,
710 						    &failed_stream_mask);
711 	if (err != -ENOSPC)
712 		return err;
713 
714 	err = intel_link_bw_reduce_bpp(state, limits,
715 				       failed_stream_mask, "DP tunnel link BW");
716 
717 	return err ? : -EAGAIN;
718 }
719 
atomic_decrease_bw(struct intel_atomic_state * state)720 static void atomic_decrease_bw(struct intel_atomic_state *state)
721 {
722 	struct intel_crtc *crtc;
723 	const struct intel_crtc_state *old_crtc_state;
724 	const struct intel_crtc_state *new_crtc_state;
725 	int i;
726 
727 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
728 		const struct drm_dp_tunnel_state *new_tunnel_state;
729 		struct drm_dp_tunnel *tunnel;
730 		int old_bw;
731 		int new_bw;
732 
733 		if (!intel_crtc_needs_modeset(new_crtc_state))
734 			continue;
735 
736 		tunnel = get_inherited_tunnel(state, crtc);
737 		if (!tunnel)
738 			tunnel = old_crtc_state->dp_tunnel_ref.tunnel;
739 
740 		if (!tunnel)
741 			continue;
742 
743 		old_bw = drm_dp_tunnel_get_allocated_bw(tunnel);
744 
745 		new_tunnel_state = drm_dp_tunnel_atomic_get_new_state(&state->base, tunnel);
746 		new_bw = drm_dp_tunnel_atomic_get_required_bw(new_tunnel_state);
747 
748 		if (new_bw >= old_bw)
749 			continue;
750 
751 		drm_dp_tunnel_alloc_bw(tunnel, new_bw);
752 	}
753 }
754 
queue_retry_work(struct intel_atomic_state * state,struct drm_dp_tunnel * tunnel,const struct intel_crtc_state * crtc_state)755 static void queue_retry_work(struct intel_atomic_state *state,
756 			     struct drm_dp_tunnel *tunnel,
757 			     const struct intel_crtc_state *crtc_state)
758 {
759 	struct intel_display *display = to_intel_display(state);
760 	struct intel_encoder *encoder;
761 
762 	encoder = intel_get_crtc_new_encoder(state, crtc_state);
763 
764 	if (!intel_digital_port_connected(encoder))
765 		return;
766 
767 	drm_dbg_kms(display->drm,
768 		    "[DPTUN %s][ENCODER:%d:%s] BW allocation failed on a connected sink\n",
769 		    drm_dp_tunnel_name(tunnel),
770 		    encoder->base.base.id,
771 		    encoder->base.name);
772 
773 	intel_dp_queue_modeset_retry_for_link(state, encoder, crtc_state);
774 }
775 
atomic_increase_bw(struct intel_atomic_state * state)776 static void atomic_increase_bw(struct intel_atomic_state *state)
777 {
778 	struct intel_crtc *crtc;
779 	const struct intel_crtc_state *crtc_state;
780 	int i;
781 
782 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
783 		struct drm_dp_tunnel_state *tunnel_state;
784 		struct drm_dp_tunnel *tunnel = crtc_state->dp_tunnel_ref.tunnel;
785 		int bw;
786 
787 		if (!intel_crtc_needs_modeset(crtc_state))
788 			continue;
789 
790 		if (!tunnel)
791 			continue;
792 
793 		tunnel_state = drm_dp_tunnel_atomic_get_new_state(&state->base, tunnel);
794 
795 		bw = drm_dp_tunnel_atomic_get_required_bw(tunnel_state);
796 
797 		if (drm_dp_tunnel_alloc_bw(tunnel, bw) != 0)
798 			queue_retry_work(state, tunnel, crtc_state);
799 	}
800 }
801 
802 /**
803  * intel_dp_tunnel_atomic_alloc_bw - Allocate the BW for all modeset tunnels
804  * @state: Atomic state
805  *
806  * Allocate the required BW for all tunnels in @state.
807  */
intel_dp_tunnel_atomic_alloc_bw(struct intel_atomic_state * state)808 void intel_dp_tunnel_atomic_alloc_bw(struct intel_atomic_state *state)
809 {
810 	atomic_decrease_bw(state);
811 	atomic_increase_bw(state);
812 }
813 
814 /**
815  * intel_dp_tunnel_mgr_init - Initialize the DP tunnel manager
816  * @display: display device
817  *
818  * Initialize the DP tunnel manager. The tunnel manager will support the
819  * detection/management of DP tunnels on all DP connectors, so the function
820  * must be called after all these connectors have been registered already.
821  *
822  * Return 0 in case of success, a negative error code otherwise.
823  */
intel_dp_tunnel_mgr_init(struct intel_display * display)824 int intel_dp_tunnel_mgr_init(struct intel_display *display)
825 {
826 	struct drm_dp_tunnel_mgr *tunnel_mgr;
827 	struct drm_connector_list_iter connector_list_iter;
828 	struct intel_connector *connector;
829 	int dp_connectors = 0;
830 
831 	drm_connector_list_iter_begin(display->drm, &connector_list_iter);
832 	for_each_intel_connector_iter(connector, &connector_list_iter) {
833 		if (connector->base.connector_type != DRM_MODE_CONNECTOR_DisplayPort)
834 			continue;
835 
836 		dp_connectors++;
837 	}
838 	drm_connector_list_iter_end(&connector_list_iter);
839 
840 	tunnel_mgr = drm_dp_tunnel_mgr_create(display->drm, dp_connectors);
841 	if (IS_ERR(tunnel_mgr))
842 		return PTR_ERR(tunnel_mgr);
843 
844 	display->dp_tunnel_mgr = tunnel_mgr;
845 
846 	return 0;
847 }
848 
849 /**
850  * intel_dp_tunnel_mgr_cleanup - Clean up the DP tunnel manager state
851  * @display: display device
852  *
853  * Clean up the DP tunnel manager state.
854  */
intel_dp_tunnel_mgr_cleanup(struct intel_display * display)855 void intel_dp_tunnel_mgr_cleanup(struct intel_display *display)
856 {
857 	drm_dp_tunnel_mgr_destroy(display->dp_tunnel_mgr);
858 	display->dp_tunnel_mgr = NULL;
859 }
860