xref: /linux/drivers/gpu/drm/i915/display/intel_dp_tunnel.c (revision 2c1ed907520c50326b8f604907a8478b27881a2e)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2023 Intel Corporation
4  */
5 
6 #include <drm/display/drm_dp_tunnel.h>
7 
8 #include "intel_atomic.h"
9 #include "intel_display_core.h"
10 #include "intel_display_limits.h"
11 #include "intel_display_types.h"
12 #include "intel_dp.h"
13 #include "intel_dp_link_training.h"
14 #include "intel_dp_mst.h"
15 #include "intel_dp_tunnel.h"
16 #include "intel_link_bw.h"
17 
18 struct intel_dp_tunnel_inherited_state {
19 	struct drm_dp_tunnel_ref ref[I915_MAX_PIPES];
20 };
21 
22 /**
23  * intel_dp_tunnel_disconnect - Disconnect a DP tunnel from a port
24  * @intel_dp: DP port object the tunnel is connected to
25  *
26  * Disconnect a DP tunnel from @intel_dp, destroying any related state. This
27  * should be called after detecting a sink-disconnect event from the port.
28  */
intel_dp_tunnel_disconnect(struct intel_dp * intel_dp)29 void intel_dp_tunnel_disconnect(struct intel_dp *intel_dp)
30 {
31 	drm_dp_tunnel_destroy(intel_dp->tunnel);
32 	intel_dp->tunnel = NULL;
33 }
34 
35 /**
36  * intel_dp_tunnel_destroy - Destroy a DP tunnel
37  * @intel_dp: DP port object the tunnel is connected to
38  *
39  * Destroy a DP tunnel connected to @intel_dp, after disabling the BW
40  * allocation mode on the tunnel. This should be called while destroying the
41  * port.
42  */
intel_dp_tunnel_destroy(struct intel_dp * intel_dp)43 void intel_dp_tunnel_destroy(struct intel_dp *intel_dp)
44 {
45 	if (intel_dp_tunnel_bw_alloc_is_enabled(intel_dp))
46 		drm_dp_tunnel_disable_bw_alloc(intel_dp->tunnel);
47 
48 	intel_dp_tunnel_disconnect(intel_dp);
49 }
50 
kbytes_to_mbits(int kbytes)51 static int kbytes_to_mbits(int kbytes)
52 {
53 	return DIV_ROUND_UP(kbytes * 8, 1000);
54 }
55 
get_current_link_bw(struct intel_dp * intel_dp,bool * below_dprx_bw)56 static int get_current_link_bw(struct intel_dp *intel_dp,
57 			       bool *below_dprx_bw)
58 {
59 	int rate = intel_dp_max_common_rate(intel_dp);
60 	int lane_count = intel_dp_max_common_lane_count(intel_dp);
61 	int bw;
62 
63 	bw = intel_dp_max_link_data_rate(intel_dp, rate, lane_count);
64 	*below_dprx_bw = bw < drm_dp_max_dprx_data_rate(rate, lane_count);
65 
66 	return bw;
67 }
68 
update_tunnel_state(struct intel_dp * intel_dp)69 static int update_tunnel_state(struct intel_dp *intel_dp)
70 {
71 	struct intel_display *display = to_intel_display(intel_dp);
72 	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
73 	bool old_bw_below_dprx;
74 	bool new_bw_below_dprx;
75 	int old_bw;
76 	int new_bw;
77 	int ret;
78 
79 	old_bw = get_current_link_bw(intel_dp, &old_bw_below_dprx);
80 
81 	ret = drm_dp_tunnel_update_state(intel_dp->tunnel);
82 	if (ret < 0) {
83 		drm_dbg_kms(display->drm,
84 			    "[DPTUN %s][ENCODER:%d:%s] State update failed (err %pe)\n",
85 			    drm_dp_tunnel_name(intel_dp->tunnel),
86 			    encoder->base.base.id, encoder->base.name,
87 			    ERR_PTR(ret));
88 
89 		return ret;
90 	}
91 
92 	if (ret == 0 ||
93 	    !drm_dp_tunnel_bw_alloc_is_enabled(intel_dp->tunnel))
94 		return 0;
95 
96 	intel_dp_update_sink_caps(intel_dp);
97 
98 	new_bw = get_current_link_bw(intel_dp, &new_bw_below_dprx);
99 
100 	/* Suppress the notification if the mode list can't change due to bw. */
101 	if (old_bw_below_dprx == new_bw_below_dprx &&
102 	    !new_bw_below_dprx)
103 		return 0;
104 
105 	drm_dbg_kms(display->drm,
106 		    "[DPTUN %s][ENCODER:%d:%s] Notify users about BW change: %d -> %d\n",
107 		    drm_dp_tunnel_name(intel_dp->tunnel),
108 		    encoder->base.base.id, encoder->base.name,
109 		    kbytes_to_mbits(old_bw), kbytes_to_mbits(new_bw));
110 
111 	return 1;
112 }
113 
114 /*
115  * Allocate the BW for a tunnel on a DP connector/port if the connector/port
116  * was already active when detecting the tunnel. The allocated BW must be
117  * freed by the next atomic modeset, storing the BW in the
118  * intel_atomic_state::inherited_dp_tunnels, and calling
119  * intel_dp_tunnel_atomic_free_bw().
120  */
allocate_initial_tunnel_bw_for_pipes(struct intel_dp * intel_dp,u8 pipe_mask)121 static int allocate_initial_tunnel_bw_for_pipes(struct intel_dp *intel_dp, u8 pipe_mask)
122 {
123 	struct intel_display *display = to_intel_display(intel_dp);
124 	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
125 	struct intel_crtc *crtc;
126 	int tunnel_bw = 0;
127 	int err;
128 
129 	for_each_intel_crtc_in_pipe_mask(display->drm, crtc, pipe_mask) {
130 		const struct intel_crtc_state *crtc_state =
131 			to_intel_crtc_state(crtc->base.state);
132 		int stream_bw = intel_dp_config_required_rate(crtc_state);
133 
134 		tunnel_bw += stream_bw;
135 
136 		drm_dbg_kms(display->drm,
137 			    "[DPTUN %s][ENCODER:%d:%s][CRTC:%d:%s] Initial BW for stream %d: %d/%d Mb/s\n",
138 			    drm_dp_tunnel_name(intel_dp->tunnel),
139 			    encoder->base.base.id, encoder->base.name,
140 			    crtc->base.base.id, crtc->base.name,
141 			    crtc->pipe,
142 			    kbytes_to_mbits(stream_bw), kbytes_to_mbits(tunnel_bw));
143 	}
144 
145 	err = drm_dp_tunnel_alloc_bw(intel_dp->tunnel, tunnel_bw);
146 	if (err) {
147 		drm_dbg_kms(display->drm,
148 			    "[DPTUN %s][ENCODER:%d:%s] Initial BW allocation failed (err %pe)\n",
149 			    drm_dp_tunnel_name(intel_dp->tunnel),
150 			    encoder->base.base.id, encoder->base.name,
151 			    ERR_PTR(err));
152 
153 		return err;
154 	}
155 
156 	return update_tunnel_state(intel_dp);
157 }
158 
allocate_initial_tunnel_bw(struct intel_dp * intel_dp,struct drm_modeset_acquire_ctx * ctx)159 static int allocate_initial_tunnel_bw(struct intel_dp *intel_dp,
160 				      struct drm_modeset_acquire_ctx *ctx)
161 {
162 	u8 pipe_mask;
163 	int err;
164 
165 	err = intel_dp_get_active_pipes(intel_dp, ctx, &pipe_mask);
166 	if (err)
167 		return err;
168 
169 	return allocate_initial_tunnel_bw_for_pipes(intel_dp, pipe_mask);
170 }
171 
detect_new_tunnel(struct intel_dp * intel_dp,struct drm_modeset_acquire_ctx * ctx)172 static int detect_new_tunnel(struct intel_dp *intel_dp, struct drm_modeset_acquire_ctx *ctx)
173 {
174 	struct intel_display *display = to_intel_display(intel_dp);
175 	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
176 	struct drm_dp_tunnel *tunnel;
177 	int ret;
178 
179 	tunnel = drm_dp_tunnel_detect(display->dp_tunnel_mgr,
180 				      &intel_dp->aux);
181 	if (IS_ERR(tunnel))
182 		return PTR_ERR(tunnel);
183 
184 	intel_dp->tunnel = tunnel;
185 
186 	ret = drm_dp_tunnel_enable_bw_alloc(intel_dp->tunnel);
187 	if (ret) {
188 		if (ret == -EOPNOTSUPP)
189 			return 0;
190 
191 		drm_dbg_kms(display->drm,
192 			    "[DPTUN %s][ENCODER:%d:%s] Failed to enable BW allocation mode (ret %pe)\n",
193 			    drm_dp_tunnel_name(intel_dp->tunnel),
194 			    encoder->base.base.id, encoder->base.name,
195 			    ERR_PTR(ret));
196 
197 		/* Keep the tunnel with BWA disabled */
198 		return 0;
199 	}
200 
201 	ret = allocate_initial_tunnel_bw(intel_dp, ctx);
202 	if (ret < 0)
203 		intel_dp_tunnel_destroy(intel_dp);
204 
205 	return ret;
206 }
207 
208 /**
209  * intel_dp_tunnel_detect - Detect a DP tunnel on a port
210  * @intel_dp: DP port object
211  * @ctx: lock context acquired by the connector detection handler
212  *
213  * Detect a DP tunnel on the @intel_dp port, enabling the BW allocation mode
214  * on it if supported and allocating the BW required on an already active port.
215  * The BW allocated this way must be freed by the next atomic modeset calling
216  * intel_dp_tunnel_atomic_free_bw().
217  *
218  * If @intel_dp has already a tunnel detected on it, update the tunnel's state
219  * wrt. its support for BW allocation mode and the available BW via the
220  * tunnel. If the tunnel's state change requires this - for instance the
221  * tunnel's group ID has changed - the tunnel will be dropped and recreated.
222  *
223  * Return 0 in case of success - after any tunnel detected and added to
224  * @intel_dp - 1 in case the BW on an already existing tunnel has changed in a
225  * way that requires notifying user space.
226  */
intel_dp_tunnel_detect(struct intel_dp * intel_dp,struct drm_modeset_acquire_ctx * ctx)227 int intel_dp_tunnel_detect(struct intel_dp *intel_dp, struct drm_modeset_acquire_ctx *ctx)
228 {
229 	int ret;
230 
231 	if (intel_dp_is_edp(intel_dp))
232 		return 0;
233 
234 	if (intel_dp->tunnel) {
235 		ret = update_tunnel_state(intel_dp);
236 		if (ret >= 0)
237 			return ret;
238 
239 		/* Try to recreate the tunnel after an update error. */
240 		intel_dp_tunnel_destroy(intel_dp);
241 	}
242 
243 	return detect_new_tunnel(intel_dp, ctx);
244 }
245 
246 /**
247  * intel_dp_tunnel_bw_alloc_is_enabled - Query the BW allocation support on a tunnel
248  * @intel_dp: DP port object
249  *
250  * Query whether a DP tunnel is connected on @intel_dp and the tunnel supports
251  * the BW allocation mode.
252  *
253  * Returns %true if the BW allocation mode is supported on @intel_dp.
254  */
intel_dp_tunnel_bw_alloc_is_enabled(struct intel_dp * intel_dp)255 bool intel_dp_tunnel_bw_alloc_is_enabled(struct intel_dp *intel_dp)
256 {
257 	return drm_dp_tunnel_bw_alloc_is_enabled(intel_dp->tunnel);
258 }
259 
260 /**
261  * intel_dp_tunnel_suspend - Suspend a DP tunnel connected on a port
262  * @intel_dp: DP port object
263  *
264  * Suspend a DP tunnel on @intel_dp with BW allocation mode enabled on it.
265  */
intel_dp_tunnel_suspend(struct intel_dp * intel_dp)266 void intel_dp_tunnel_suspend(struct intel_dp *intel_dp)
267 {
268 	struct intel_display *display = to_intel_display(intel_dp);
269 	struct intel_connector *connector = intel_dp->attached_connector;
270 	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
271 
272 	if (!intel_dp_tunnel_bw_alloc_is_enabled(intel_dp))
273 		return;
274 
275 	drm_dbg_kms(display->drm,
276 		    "[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s] Suspend\n",
277 		    drm_dp_tunnel_name(intel_dp->tunnel),
278 		    connector->base.base.id, connector->base.name,
279 		    encoder->base.base.id, encoder->base.name);
280 
281 	drm_dp_tunnel_disable_bw_alloc(intel_dp->tunnel);
282 
283 	intel_dp->tunnel_suspended = true;
284 }
285 
286 /**
287  * intel_dp_tunnel_resume - Resume a DP tunnel connected on a port
288  * @intel_dp: DP port object
289  * @crtc_state: CRTC state
290  * @dpcd_updated: the DPCD DPRX capabilities got updated during resume
291  *
292  * Resume a DP tunnel on @intel_dp with BW allocation mode enabled on it.
293  */
intel_dp_tunnel_resume(struct intel_dp * intel_dp,const struct intel_crtc_state * crtc_state,bool dpcd_updated)294 void intel_dp_tunnel_resume(struct intel_dp *intel_dp,
295 			    const struct intel_crtc_state *crtc_state,
296 			    bool dpcd_updated)
297 {
298 	struct intel_display *display = to_intel_display(intel_dp);
299 	struct intel_connector *connector = intel_dp->attached_connector;
300 	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
301 	u8 dpcd[DP_RECEIVER_CAP_SIZE];
302 	u8 pipe_mask;
303 	int err = 0;
304 
305 	if (!intel_dp->tunnel_suspended)
306 		return;
307 
308 	intel_dp->tunnel_suspended = false;
309 
310 	drm_dbg_kms(display->drm,
311 		    "[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s] Resume\n",
312 		    drm_dp_tunnel_name(intel_dp->tunnel),
313 		    connector->base.base.id, connector->base.name,
314 		    encoder->base.base.id, encoder->base.name);
315 
316 	/*
317 	 * The TBT Connection Manager requires the GFX driver to read out
318 	 * the sink's DPRX caps to be able to service any BW requests later.
319 	 * During resume overriding the caps in @intel_dp cached before
320 	 * suspend must be avoided, so do here only a dummy read, unless the
321 	 * capabilities were updated already during resume.
322 	 */
323 	if (!dpcd_updated) {
324 		err = intel_dp_read_dprx_caps(intel_dp, dpcd);
325 
326 		if (err) {
327 			drm_dp_tunnel_set_io_error(intel_dp->tunnel);
328 			goto out_err;
329 		}
330 	}
331 
332 	err = drm_dp_tunnel_enable_bw_alloc(intel_dp->tunnel);
333 	if (err)
334 		goto out_err;
335 
336 	pipe_mask = 0;
337 	if (crtc_state) {
338 		struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
339 
340 		/* TODO: Add support for MST */
341 		pipe_mask |= BIT(crtc->pipe);
342 	}
343 
344 	err = allocate_initial_tunnel_bw_for_pipes(intel_dp, pipe_mask);
345 	if (err < 0)
346 		goto out_err;
347 
348 	return;
349 
350 out_err:
351 	drm_dbg_kms(display->drm,
352 		    "[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s] Tunnel can't be resumed, will drop and reject it (err %pe)\n",
353 		    drm_dp_tunnel_name(intel_dp->tunnel),
354 		    connector->base.base.id, connector->base.name,
355 		    encoder->base.base.id, encoder->base.name,
356 		    ERR_PTR(err));
357 }
358 
359 static struct drm_dp_tunnel *
get_inherited_tunnel(struct intel_atomic_state * state,struct intel_crtc * crtc)360 get_inherited_tunnel(struct intel_atomic_state *state, struct intel_crtc *crtc)
361 {
362 	if (!state->inherited_dp_tunnels)
363 		return NULL;
364 
365 	return state->inherited_dp_tunnels->ref[crtc->pipe].tunnel;
366 }
367 
368 static int
add_inherited_tunnel(struct intel_atomic_state * state,struct drm_dp_tunnel * tunnel,struct intel_crtc * crtc)369 add_inherited_tunnel(struct intel_atomic_state *state,
370 		     struct drm_dp_tunnel *tunnel,
371 		     struct intel_crtc *crtc)
372 {
373 	struct intel_display *display = to_intel_display(state);
374 	struct drm_dp_tunnel *old_tunnel;
375 
376 	old_tunnel = get_inherited_tunnel(state, crtc);
377 	if (old_tunnel) {
378 		drm_WARN_ON(display->drm, old_tunnel != tunnel);
379 		return 0;
380 	}
381 
382 	if (!state->inherited_dp_tunnels) {
383 		state->inherited_dp_tunnels = kzalloc(sizeof(*state->inherited_dp_tunnels),
384 						      GFP_KERNEL);
385 		if (!state->inherited_dp_tunnels)
386 			return -ENOMEM;
387 	}
388 
389 	drm_dp_tunnel_ref_get(tunnel, &state->inherited_dp_tunnels->ref[crtc->pipe]);
390 
391 	return 0;
392 }
393 
check_inherited_tunnel_state(struct intel_atomic_state * state,struct intel_dp * intel_dp,const struct intel_digital_connector_state * old_conn_state)394 static int check_inherited_tunnel_state(struct intel_atomic_state *state,
395 					struct intel_dp *intel_dp,
396 					const struct intel_digital_connector_state *old_conn_state)
397 {
398 	struct intel_display *display = to_intel_display(state);
399 	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
400 	struct intel_connector *connector =
401 		to_intel_connector(old_conn_state->base.connector);
402 	struct intel_crtc *old_crtc;
403 	const struct intel_crtc_state *old_crtc_state;
404 
405 	/*
406 	 * If a BWA tunnel gets detected only after the corresponding
407 	 * connector got enabled already without a BWA tunnel, or a different
408 	 * BWA tunnel (which was removed meanwhile) the old CRTC state won't
409 	 * contain the state of the current tunnel. This tunnel still has a
410 	 * reserved BW, which needs to be released, add the state for such
411 	 * inherited tunnels separately only to this atomic state.
412 	 */
413 	if (!intel_dp_tunnel_bw_alloc_is_enabled(intel_dp))
414 		return 0;
415 
416 	if (!old_conn_state->base.crtc)
417 		return 0;
418 
419 	old_crtc = to_intel_crtc(old_conn_state->base.crtc);
420 	old_crtc_state = intel_atomic_get_old_crtc_state(state, old_crtc);
421 
422 	if (!old_crtc_state->hw.active ||
423 	    old_crtc_state->dp_tunnel_ref.tunnel == intel_dp->tunnel)
424 		return 0;
425 
426 	drm_dbg_kms(display->drm,
427 		    "[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s][CRTC:%d:%s] Adding state for inherited tunnel %p\n",
428 		    drm_dp_tunnel_name(intel_dp->tunnel),
429 		    connector->base.base.id, connector->base.name,
430 		    encoder->base.base.id, encoder->base.name,
431 		    old_crtc->base.base.id, old_crtc->base.name,
432 		    intel_dp->tunnel);
433 
434 	return add_inherited_tunnel(state, intel_dp->tunnel, old_crtc);
435 }
436 
437 /**
438  * intel_dp_tunnel_atomic_cleanup_inherited_state - Free any inherited DP tunnel state
439  * @state: Atomic state
440  *
441  * Free the inherited DP tunnel state in @state.
442  */
intel_dp_tunnel_atomic_cleanup_inherited_state(struct intel_atomic_state * state)443 void intel_dp_tunnel_atomic_cleanup_inherited_state(struct intel_atomic_state *state)
444 {
445 	struct intel_display *display = to_intel_display(state);
446 	enum pipe pipe;
447 
448 	if (!state->inherited_dp_tunnels)
449 		return;
450 
451 	for_each_pipe(display, pipe)
452 		if (state->inherited_dp_tunnels->ref[pipe].tunnel)
453 			drm_dp_tunnel_ref_put(&state->inherited_dp_tunnels->ref[pipe]);
454 
455 	kfree(state->inherited_dp_tunnels);
456 	state->inherited_dp_tunnels = NULL;
457 }
458 
intel_dp_tunnel_atomic_add_group_state(struct intel_atomic_state * state,struct drm_dp_tunnel * tunnel)459 static int intel_dp_tunnel_atomic_add_group_state(struct intel_atomic_state *state,
460 						  struct drm_dp_tunnel *tunnel)
461 {
462 	struct intel_display *display = to_intel_display(state);
463 	u32 pipe_mask;
464 	int err;
465 
466 	err = drm_dp_tunnel_atomic_get_group_streams_in_state(&state->base,
467 							      tunnel, &pipe_mask);
468 	if (err)
469 		return err;
470 
471 	drm_WARN_ON(display->drm, pipe_mask & ~((1 << I915_MAX_PIPES) - 1));
472 
473 	return intel_modeset_pipes_in_mask_early(state, "DPTUN", pipe_mask);
474 }
475 
476 /**
477  * intel_dp_tunnel_atomic_add_state_for_crtc - Add CRTC specific DP tunnel state
478  * @state: Atomic state
479  * @crtc: CRTC to add the tunnel state for
480  *
481  * Add the DP tunnel state for @crtc if the CRTC (aka DP tunnel stream) is enabled
482  * via a DP tunnel.
483  *
484  * Return 0 in case of success, a negative error code otherwise.
485  */
intel_dp_tunnel_atomic_add_state_for_crtc(struct intel_atomic_state * state,struct intel_crtc * crtc)486 int intel_dp_tunnel_atomic_add_state_for_crtc(struct intel_atomic_state *state,
487 					      struct intel_crtc *crtc)
488 {
489 	const struct intel_crtc_state *new_crtc_state =
490 		intel_atomic_get_new_crtc_state(state, crtc);
491 	const struct drm_dp_tunnel_state *tunnel_state;
492 	struct drm_dp_tunnel *tunnel = new_crtc_state->dp_tunnel_ref.tunnel;
493 
494 	if (!tunnel)
495 		return 0;
496 
497 	tunnel_state = drm_dp_tunnel_atomic_get_state(&state->base, tunnel);
498 	if (IS_ERR(tunnel_state))
499 		return PTR_ERR(tunnel_state);
500 
501 	return 0;
502 }
503 
check_group_state(struct intel_atomic_state * state,struct intel_dp * intel_dp,struct intel_connector * connector,struct intel_crtc * crtc)504 static int check_group_state(struct intel_atomic_state *state,
505 			     struct intel_dp *intel_dp,
506 			     struct intel_connector *connector,
507 			     struct intel_crtc *crtc)
508 {
509 	struct intel_display *display = to_intel_display(state);
510 	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
511 	const struct intel_crtc_state *crtc_state =
512 		intel_atomic_get_new_crtc_state(state, crtc);
513 
514 	if (!crtc_state->dp_tunnel_ref.tunnel)
515 		return 0;
516 
517 	drm_dbg_kms(display->drm,
518 		    "[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s][CRTC:%d:%s] Adding group state for tunnel %p\n",
519 		    drm_dp_tunnel_name(intel_dp->tunnel),
520 		    connector->base.base.id, connector->base.name,
521 		    encoder->base.base.id, encoder->base.name,
522 		    crtc->base.base.id, crtc->base.name,
523 		    crtc_state->dp_tunnel_ref.tunnel);
524 
525 	return intel_dp_tunnel_atomic_add_group_state(state, crtc_state->dp_tunnel_ref.tunnel);
526 }
527 
528 /**
529  * intel_dp_tunnel_atomic_check_state - Check a connector's DP tunnel specific state
530  * @state: Atomic state
531  * @intel_dp: DP port object
532  * @connector: connector using @intel_dp
533  *
534  * Check and add the DP tunnel atomic state for @intel_dp/@connector to
535  * @state, if there is a DP tunnel detected on @intel_dp with BW allocation
536  * mode enabled on it, or if @intel_dp/@connector was previously enabled via a
537  * DP tunnel.
538  *
539  * Returns 0 in case of success, or a negative error code otherwise.
540  */
intel_dp_tunnel_atomic_check_state(struct intel_atomic_state * state,struct intel_dp * intel_dp,struct intel_connector * connector)541 int intel_dp_tunnel_atomic_check_state(struct intel_atomic_state *state,
542 				       struct intel_dp *intel_dp,
543 				       struct intel_connector *connector)
544 {
545 	const struct intel_digital_connector_state *old_conn_state =
546 		intel_atomic_get_old_connector_state(state, connector);
547 	const struct intel_digital_connector_state *new_conn_state =
548 		intel_atomic_get_new_connector_state(state, connector);
549 	int err;
550 
551 	if (old_conn_state->base.crtc) {
552 		err = check_group_state(state, intel_dp, connector,
553 					to_intel_crtc(old_conn_state->base.crtc));
554 		if (err)
555 			return err;
556 	}
557 
558 	if (new_conn_state->base.crtc &&
559 	    new_conn_state->base.crtc != old_conn_state->base.crtc) {
560 		err = check_group_state(state, intel_dp, connector,
561 					to_intel_crtc(new_conn_state->base.crtc));
562 		if (err)
563 			return err;
564 	}
565 
566 	return check_inherited_tunnel_state(state, intel_dp, old_conn_state);
567 }
568 
569 /**
570  * intel_dp_tunnel_atomic_compute_stream_bw - Compute the BW required by a DP tunnel stream
571  * @state: Atomic state
572  * @intel_dp: DP object
573  * @connector: connector using @intel_dp
574  * @crtc_state: state of CRTC of the given DP tunnel stream
575  *
576  * Compute the required BW of CRTC (aka DP tunnel stream), storing this BW to
577  * the DP tunnel state containing the stream in @state. Before re-calculating a
578  * BW requirement in the crtc_state state the old BW requirement computed by this
579  * function must be cleared by calling intel_dp_tunnel_atomic_clear_stream_bw().
580  *
581  * Returns 0 in case of success, a negative error code otherwise.
582  */
intel_dp_tunnel_atomic_compute_stream_bw(struct intel_atomic_state * state,struct intel_dp * intel_dp,const struct intel_connector * connector,struct intel_crtc_state * crtc_state)583 int intel_dp_tunnel_atomic_compute_stream_bw(struct intel_atomic_state *state,
584 					     struct intel_dp *intel_dp,
585 					     const struct intel_connector *connector,
586 					     struct intel_crtc_state *crtc_state)
587 {
588 	struct intel_display *display = to_intel_display(state);
589 	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
590 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
591 	int required_rate = intel_dp_config_required_rate(crtc_state);
592 	int ret;
593 
594 	if (!intel_dp_tunnel_bw_alloc_is_enabled(intel_dp))
595 		return 0;
596 
597 	drm_dbg_kms(display->drm,
598 		    "[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s][CRTC:%d:%s] Stream %d required BW %d Mb/s\n",
599 		    drm_dp_tunnel_name(intel_dp->tunnel),
600 		    connector->base.base.id, connector->base.name,
601 		    encoder->base.base.id, encoder->base.name,
602 		    crtc->base.base.id, crtc->base.name,
603 		    crtc->pipe,
604 		    kbytes_to_mbits(required_rate));
605 
606 	ret = drm_dp_tunnel_atomic_set_stream_bw(&state->base, intel_dp->tunnel,
607 						 crtc->pipe, required_rate);
608 	if (ret < 0)
609 		return ret;
610 
611 	drm_dp_tunnel_ref_get(intel_dp->tunnel,
612 			      &crtc_state->dp_tunnel_ref);
613 
614 	return 0;
615 }
616 
617 /**
618  * intel_dp_tunnel_atomic_clear_stream_bw - Clear any DP tunnel stream BW requirement
619  * @state: Atomic state
620  * @crtc_state: state of CRTC of the given DP tunnel stream
621  *
622  * Clear any DP tunnel stream BW requirement set by
623  * intel_dp_tunnel_atomic_compute_stream_bw().
624  */
intel_dp_tunnel_atomic_clear_stream_bw(struct intel_atomic_state * state,struct intel_crtc_state * crtc_state)625 void intel_dp_tunnel_atomic_clear_stream_bw(struct intel_atomic_state *state,
626 					    struct intel_crtc_state *crtc_state)
627 {
628 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
629 
630 	if (!crtc_state->dp_tunnel_ref.tunnel)
631 		return;
632 
633 	drm_dp_tunnel_atomic_set_stream_bw(&state->base,
634 					   crtc_state->dp_tunnel_ref.tunnel,
635 					   crtc->pipe, 0);
636 	drm_dp_tunnel_ref_put(&crtc_state->dp_tunnel_ref);
637 }
638 
639 /**
640  * intel_dp_tunnel_atomic_check_link - Check the DP tunnel atomic state
641  * @state: intel atomic state
642  * @limits: link BW limits
643  *
644  * Check the link configuration for all DP tunnels in @state. If the
645  * configuration is invalid @limits will be updated if possible to
646  * reduce the total BW, after which the configuration for all CRTCs in
647  * @state must be recomputed with the updated @limits.
648  *
649  * Returns:
650  *   - 0 if the confugration is valid
651  *   - %-EAGAIN, if the configuration is invalid and @limits got updated
652  *     with fallback values with which the configuration of all CRTCs in
653  *     @state must be recomputed
654  *   - Other negative error, if the configuration is invalid without a
655  *     fallback possibility, or the check failed for another reason
656  */
intel_dp_tunnel_atomic_check_link(struct intel_atomic_state * state,struct intel_link_bw_limits * limits)657 int intel_dp_tunnel_atomic_check_link(struct intel_atomic_state *state,
658 				      struct intel_link_bw_limits *limits)
659 {
660 	u32 failed_stream_mask;
661 	int err;
662 
663 	err = drm_dp_tunnel_atomic_check_stream_bws(&state->base,
664 						    &failed_stream_mask);
665 	if (err != -ENOSPC)
666 		return err;
667 
668 	err = intel_link_bw_reduce_bpp(state, limits,
669 				       failed_stream_mask, "DP tunnel link BW");
670 
671 	return err ? : -EAGAIN;
672 }
673 
atomic_decrease_bw(struct intel_atomic_state * state)674 static void atomic_decrease_bw(struct intel_atomic_state *state)
675 {
676 	struct intel_crtc *crtc;
677 	const struct intel_crtc_state *old_crtc_state;
678 	const struct intel_crtc_state *new_crtc_state;
679 	int i;
680 
681 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
682 		const struct drm_dp_tunnel_state *new_tunnel_state;
683 		struct drm_dp_tunnel *tunnel;
684 		int old_bw;
685 		int new_bw;
686 
687 		if (!intel_crtc_needs_modeset(new_crtc_state))
688 			continue;
689 
690 		tunnel = get_inherited_tunnel(state, crtc);
691 		if (!tunnel)
692 			tunnel = old_crtc_state->dp_tunnel_ref.tunnel;
693 
694 		if (!tunnel)
695 			continue;
696 
697 		old_bw = drm_dp_tunnel_get_allocated_bw(tunnel);
698 
699 		new_tunnel_state = drm_dp_tunnel_atomic_get_new_state(&state->base, tunnel);
700 		new_bw = drm_dp_tunnel_atomic_get_required_bw(new_tunnel_state);
701 
702 		if (new_bw >= old_bw)
703 			continue;
704 
705 		drm_dp_tunnel_alloc_bw(tunnel, new_bw);
706 	}
707 }
708 
queue_retry_work(struct intel_atomic_state * state,struct drm_dp_tunnel * tunnel,const struct intel_crtc_state * crtc_state)709 static void queue_retry_work(struct intel_atomic_state *state,
710 			     struct drm_dp_tunnel *tunnel,
711 			     const struct intel_crtc_state *crtc_state)
712 {
713 	struct intel_display *display = to_intel_display(state);
714 	struct intel_encoder *encoder;
715 
716 	encoder = intel_get_crtc_new_encoder(state, crtc_state);
717 
718 	if (!intel_digital_port_connected(encoder))
719 		return;
720 
721 	drm_dbg_kms(display->drm,
722 		    "[DPTUN %s][ENCODER:%d:%s] BW allocation failed on a connected sink\n",
723 		    drm_dp_tunnel_name(tunnel),
724 		    encoder->base.base.id,
725 		    encoder->base.name);
726 
727 	intel_dp_queue_modeset_retry_for_link(state, encoder, crtc_state);
728 }
729 
atomic_increase_bw(struct intel_atomic_state * state)730 static void atomic_increase_bw(struct intel_atomic_state *state)
731 {
732 	struct intel_crtc *crtc;
733 	const struct intel_crtc_state *crtc_state;
734 	int i;
735 
736 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
737 		struct drm_dp_tunnel_state *tunnel_state;
738 		struct drm_dp_tunnel *tunnel = crtc_state->dp_tunnel_ref.tunnel;
739 		int bw;
740 
741 		if (!intel_crtc_needs_modeset(crtc_state))
742 			continue;
743 
744 		if (!tunnel)
745 			continue;
746 
747 		tunnel_state = drm_dp_tunnel_atomic_get_new_state(&state->base, tunnel);
748 
749 		bw = drm_dp_tunnel_atomic_get_required_bw(tunnel_state);
750 
751 		if (drm_dp_tunnel_alloc_bw(tunnel, bw) != 0)
752 			queue_retry_work(state, tunnel, crtc_state);
753 	}
754 }
755 
756 /**
757  * intel_dp_tunnel_atomic_alloc_bw - Allocate the BW for all modeset tunnels
758  * @state: Atomic state
759  *
760  * Allocate the required BW for all tunnels in @state.
761  */
intel_dp_tunnel_atomic_alloc_bw(struct intel_atomic_state * state)762 void intel_dp_tunnel_atomic_alloc_bw(struct intel_atomic_state *state)
763 {
764 	atomic_decrease_bw(state);
765 	atomic_increase_bw(state);
766 }
767 
768 /**
769  * intel_dp_tunnel_mgr_init - Initialize the DP tunnel manager
770  * @display: display device
771  *
772  * Initialize the DP tunnel manager. The tunnel manager will support the
773  * detection/management of DP tunnels on all DP connectors, so the function
774  * must be called after all these connectors have been registered already.
775  *
776  * Return 0 in case of success, a negative error code otherwise.
777  */
intel_dp_tunnel_mgr_init(struct intel_display * display)778 int intel_dp_tunnel_mgr_init(struct intel_display *display)
779 {
780 	struct drm_dp_tunnel_mgr *tunnel_mgr;
781 	struct drm_connector_list_iter connector_list_iter;
782 	struct intel_connector *connector;
783 	int dp_connectors = 0;
784 
785 	drm_connector_list_iter_begin(display->drm, &connector_list_iter);
786 	for_each_intel_connector_iter(connector, &connector_list_iter) {
787 		if (connector->base.connector_type != DRM_MODE_CONNECTOR_DisplayPort)
788 			continue;
789 
790 		dp_connectors++;
791 	}
792 	drm_connector_list_iter_end(&connector_list_iter);
793 
794 	tunnel_mgr = drm_dp_tunnel_mgr_create(display->drm, dp_connectors);
795 	if (IS_ERR(tunnel_mgr))
796 		return PTR_ERR(tunnel_mgr);
797 
798 	display->dp_tunnel_mgr = tunnel_mgr;
799 
800 	return 0;
801 }
802 
803 /**
804  * intel_dp_tunnel_mgr_cleanup - Clean up the DP tunnel manager state
805  * @display: display device
806  *
807  * Clean up the DP tunnel manager state.
808  */
intel_dp_tunnel_mgr_cleanup(struct intel_display * display)809 void intel_dp_tunnel_mgr_cleanup(struct intel_display *display)
810 {
811 	drm_dp_tunnel_mgr_destroy(display->dp_tunnel_mgr);
812 	display->dp_tunnel_mgr = NULL;
813 }
814