1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2023 Intel Corporation
4 */
5
6 #include <drm/display/drm_dp_tunnel.h>
7 #include <drm/drm_print.h>
8
9 #include "intel_atomic.h"
10 #include "intel_display_core.h"
11 #include "intel_display_limits.h"
12 #include "intel_display_types.h"
13 #include "intel_dp.h"
14 #include "intel_dp_link_training.h"
15 #include "intel_dp_mst.h"
16 #include "intel_dp_tunnel.h"
17 #include "intel_link_bw.h"
18
19 struct intel_dp_tunnel_inherited_state {
20 struct drm_dp_tunnel_ref ref[I915_MAX_PIPES];
21 };
22
23 /**
24 * intel_dp_tunnel_disconnect - Disconnect a DP tunnel from a port
25 * @intel_dp: DP port object the tunnel is connected to
26 *
27 * Disconnect a DP tunnel from @intel_dp, destroying any related state. This
28 * should be called after detecting a sink-disconnect event from the port.
29 */
intel_dp_tunnel_disconnect(struct intel_dp * intel_dp)30 void intel_dp_tunnel_disconnect(struct intel_dp *intel_dp)
31 {
32 drm_dp_tunnel_destroy(intel_dp->tunnel);
33 intel_dp->tunnel = NULL;
34 }
35
36 /**
37 * intel_dp_tunnel_destroy - Destroy a DP tunnel
38 * @intel_dp: DP port object the tunnel is connected to
39 *
40 * Destroy a DP tunnel connected to @intel_dp, after disabling the BW
41 * allocation mode on the tunnel. This should be called while destroying the
42 * port.
43 */
intel_dp_tunnel_destroy(struct intel_dp * intel_dp)44 void intel_dp_tunnel_destroy(struct intel_dp *intel_dp)
45 {
46 if (intel_dp_tunnel_bw_alloc_is_enabled(intel_dp))
47 drm_dp_tunnel_disable_bw_alloc(intel_dp->tunnel);
48
49 intel_dp_tunnel_disconnect(intel_dp);
50 }
51
kbytes_to_mbits(int kbytes)52 static int kbytes_to_mbits(int kbytes)
53 {
54 return DIV_ROUND_UP(kbytes * 8, 1000);
55 }
56
get_current_link_bw(struct intel_dp * intel_dp,bool * below_dprx_bw)57 static int get_current_link_bw(struct intel_dp *intel_dp,
58 bool *below_dprx_bw)
59 {
60 int rate = intel_dp_max_common_rate(intel_dp);
61 int lane_count = intel_dp_max_common_lane_count(intel_dp);
62 int bw;
63
64 bw = intel_dp_max_link_data_rate(intel_dp, rate, lane_count);
65 *below_dprx_bw = bw < drm_dp_max_dprx_data_rate(rate, lane_count);
66
67 return bw;
68 }
69
update_tunnel_state(struct intel_dp * intel_dp)70 static int update_tunnel_state(struct intel_dp *intel_dp)
71 {
72 struct intel_display *display = to_intel_display(intel_dp);
73 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
74 bool old_bw_below_dprx;
75 bool new_bw_below_dprx;
76 int old_bw;
77 int new_bw;
78 int ret;
79
80 old_bw = get_current_link_bw(intel_dp, &old_bw_below_dprx);
81
82 ret = drm_dp_tunnel_update_state(intel_dp->tunnel);
83 if (ret < 0) {
84 drm_dbg_kms(display->drm,
85 "[DPTUN %s][ENCODER:%d:%s] State update failed (err %pe)\n",
86 drm_dp_tunnel_name(intel_dp->tunnel),
87 encoder->base.base.id, encoder->base.name,
88 ERR_PTR(ret));
89
90 return ret;
91 }
92
93 if (ret == 0 ||
94 !drm_dp_tunnel_bw_alloc_is_enabled(intel_dp->tunnel))
95 return 0;
96
97 intel_dp_update_sink_caps(intel_dp);
98
99 new_bw = get_current_link_bw(intel_dp, &new_bw_below_dprx);
100
101 /* Suppress the notification if the mode list can't change due to bw. */
102 if (old_bw_below_dprx == new_bw_below_dprx &&
103 !new_bw_below_dprx)
104 return 0;
105
106 drm_dbg_kms(display->drm,
107 "[DPTUN %s][ENCODER:%d:%s] Notify users about BW change: %d -> %d\n",
108 drm_dp_tunnel_name(intel_dp->tunnel),
109 encoder->base.base.id, encoder->base.name,
110 kbytes_to_mbits(old_bw), kbytes_to_mbits(new_bw));
111
112 return 1;
113 }
114
115 /*
116 * Allocate the BW for a tunnel on a DP connector/port if the connector/port
117 * was already active when detecting the tunnel. The allocated BW must be
118 * freed by the next atomic modeset, storing the BW in the
119 * intel_atomic_state::inherited_dp_tunnels, and calling
120 * intel_dp_tunnel_atomic_free_bw().
121 */
allocate_initial_tunnel_bw_for_pipes(struct intel_dp * intel_dp,u8 pipe_mask)122 static int allocate_initial_tunnel_bw_for_pipes(struct intel_dp *intel_dp, u8 pipe_mask)
123 {
124 struct intel_display *display = to_intel_display(intel_dp);
125 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
126 struct intel_crtc *crtc;
127 int tunnel_bw = 0;
128 int err;
129
130 for_each_intel_crtc_in_pipe_mask(display->drm, crtc, pipe_mask) {
131 const struct intel_crtc_state *crtc_state =
132 to_intel_crtc_state(crtc->base.state);
133 int stream_bw = intel_dp_config_required_rate(crtc_state);
134
135 tunnel_bw += stream_bw;
136
137 drm_dbg_kms(display->drm,
138 "[DPTUN %s][ENCODER:%d:%s][CRTC:%d:%s] Initial BW for stream %d: %d/%d Mb/s\n",
139 drm_dp_tunnel_name(intel_dp->tunnel),
140 encoder->base.base.id, encoder->base.name,
141 crtc->base.base.id, crtc->base.name,
142 crtc->pipe,
143 kbytes_to_mbits(stream_bw), kbytes_to_mbits(tunnel_bw));
144 }
145
146 err = drm_dp_tunnel_alloc_bw(intel_dp->tunnel, tunnel_bw);
147 if (err) {
148 drm_dbg_kms(display->drm,
149 "[DPTUN %s][ENCODER:%d:%s] Initial BW allocation failed (err %pe)\n",
150 drm_dp_tunnel_name(intel_dp->tunnel),
151 encoder->base.base.id, encoder->base.name,
152 ERR_PTR(err));
153
154 return err;
155 }
156
157 return update_tunnel_state(intel_dp);
158 }
159
allocate_initial_tunnel_bw(struct intel_dp * intel_dp,struct drm_modeset_acquire_ctx * ctx)160 static int allocate_initial_tunnel_bw(struct intel_dp *intel_dp,
161 struct drm_modeset_acquire_ctx *ctx)
162 {
163 u8 pipe_mask;
164 int err;
165
166 err = intel_dp_get_active_pipes(intel_dp, ctx, &pipe_mask);
167 if (err)
168 return err;
169
170 return allocate_initial_tunnel_bw_for_pipes(intel_dp, pipe_mask);
171 }
172
detect_new_tunnel(struct intel_dp * intel_dp,struct drm_modeset_acquire_ctx * ctx)173 static int detect_new_tunnel(struct intel_dp *intel_dp, struct drm_modeset_acquire_ctx *ctx)
174 {
175 struct intel_display *display = to_intel_display(intel_dp);
176 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
177 struct drm_dp_tunnel *tunnel;
178 int ret;
179
180 tunnel = drm_dp_tunnel_detect(display->dp_tunnel_mgr,
181 &intel_dp->aux);
182 if (IS_ERR(tunnel))
183 return PTR_ERR(tunnel);
184
185 intel_dp->tunnel = tunnel;
186
187 ret = drm_dp_tunnel_enable_bw_alloc(intel_dp->tunnel);
188 if (ret) {
189 if (ret == -EOPNOTSUPP)
190 return 0;
191
192 drm_dbg_kms(display->drm,
193 "[DPTUN %s][ENCODER:%d:%s] Failed to enable BW allocation mode (ret %pe)\n",
194 drm_dp_tunnel_name(intel_dp->tunnel),
195 encoder->base.base.id, encoder->base.name,
196 ERR_PTR(ret));
197
198 /* Keep the tunnel with BWA disabled */
199 return 0;
200 }
201
202 ret = allocate_initial_tunnel_bw(intel_dp, ctx);
203 if (ret < 0)
204 intel_dp_tunnel_destroy(intel_dp);
205
206 return ret;
207 }
208
209 /**
210 * intel_dp_tunnel_detect - Detect a DP tunnel on a port
211 * @intel_dp: DP port object
212 * @ctx: lock context acquired by the connector detection handler
213 *
214 * Detect a DP tunnel on the @intel_dp port, enabling the BW allocation mode
215 * on it if supported and allocating the BW required on an already active port.
216 * The BW allocated this way must be freed by the next atomic modeset calling
217 * intel_dp_tunnel_atomic_free_bw().
218 *
219 * If @intel_dp has already a tunnel detected on it, update the tunnel's state
220 * wrt. its support for BW allocation mode and the available BW via the
221 * tunnel. If the tunnel's state change requires this - for instance the
222 * tunnel's group ID has changed - the tunnel will be dropped and recreated.
223 *
224 * Return 0 in case of success - after any tunnel detected and added to
225 * @intel_dp - 1 in case the BW on an already existing tunnel has changed in a
226 * way that requires notifying user space.
227 */
intel_dp_tunnel_detect(struct intel_dp * intel_dp,struct drm_modeset_acquire_ctx * ctx)228 int intel_dp_tunnel_detect(struct intel_dp *intel_dp, struct drm_modeset_acquire_ctx *ctx)
229 {
230 int ret;
231
232 if (intel_dp_is_edp(intel_dp))
233 return 0;
234
235 if (intel_dp->tunnel) {
236 ret = update_tunnel_state(intel_dp);
237 if (ret >= 0)
238 return ret;
239
240 /* Try to recreate the tunnel after an update error. */
241 intel_dp_tunnel_destroy(intel_dp);
242 }
243
244 return detect_new_tunnel(intel_dp, ctx);
245 }
246
247 /**
248 * intel_dp_tunnel_bw_alloc_is_enabled - Query the BW allocation support on a tunnel
249 * @intel_dp: DP port object
250 *
251 * Query whether a DP tunnel is connected on @intel_dp and the tunnel supports
252 * the BW allocation mode.
253 *
254 * Returns %true if the BW allocation mode is supported on @intel_dp.
255 */
intel_dp_tunnel_bw_alloc_is_enabled(struct intel_dp * intel_dp)256 bool intel_dp_tunnel_bw_alloc_is_enabled(struct intel_dp *intel_dp)
257 {
258 return drm_dp_tunnel_bw_alloc_is_enabled(intel_dp->tunnel);
259 }
260
261 /**
262 * intel_dp_tunnel_suspend - Suspend a DP tunnel connected on a port
263 * @intel_dp: DP port object
264 *
265 * Suspend a DP tunnel on @intel_dp with BW allocation mode enabled on it.
266 */
intel_dp_tunnel_suspend(struct intel_dp * intel_dp)267 void intel_dp_tunnel_suspend(struct intel_dp *intel_dp)
268 {
269 struct intel_display *display = to_intel_display(intel_dp);
270 struct intel_connector *connector = intel_dp->attached_connector;
271 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
272
273 if (!intel_dp_tunnel_bw_alloc_is_enabled(intel_dp))
274 return;
275
276 drm_dbg_kms(display->drm,
277 "[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s] Suspend\n",
278 drm_dp_tunnel_name(intel_dp->tunnel),
279 connector->base.base.id, connector->base.name,
280 encoder->base.base.id, encoder->base.name);
281
282 drm_dp_tunnel_disable_bw_alloc(intel_dp->tunnel);
283
284 intel_dp->tunnel_suspended = true;
285 }
286
287 /**
288 * intel_dp_tunnel_resume - Resume a DP tunnel connected on a port
289 * @intel_dp: DP port object
290 * @crtc_state: CRTC state
291 * @dpcd_updated: the DPCD DPRX capabilities got updated during resume
292 *
293 * Resume a DP tunnel on @intel_dp with BW allocation mode enabled on it.
294 */
intel_dp_tunnel_resume(struct intel_dp * intel_dp,const struct intel_crtc_state * crtc_state,bool dpcd_updated)295 void intel_dp_tunnel_resume(struct intel_dp *intel_dp,
296 const struct intel_crtc_state *crtc_state,
297 bool dpcd_updated)
298 {
299 struct intel_display *display = to_intel_display(intel_dp);
300 struct intel_connector *connector = intel_dp->attached_connector;
301 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
302 u8 dpcd[DP_RECEIVER_CAP_SIZE];
303 u8 pipe_mask;
304 int err = 0;
305
306 if (!intel_dp->tunnel_suspended)
307 return;
308
309 intel_dp->tunnel_suspended = false;
310
311 drm_dbg_kms(display->drm,
312 "[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s] Resume\n",
313 drm_dp_tunnel_name(intel_dp->tunnel),
314 connector->base.base.id, connector->base.name,
315 encoder->base.base.id, encoder->base.name);
316
317 /*
318 * The TBT Connection Manager requires the GFX driver to read out
319 * the sink's DPRX caps to be able to service any BW requests later.
320 * During resume overriding the caps in @intel_dp cached before
321 * suspend must be avoided, so do here only a dummy read, unless the
322 * capabilities were updated already during resume.
323 */
324 if (!dpcd_updated) {
325 err = intel_dp_read_dprx_caps(intel_dp, dpcd);
326
327 if (err) {
328 drm_dp_tunnel_set_io_error(intel_dp->tunnel);
329 goto out_err;
330 }
331 }
332
333 err = drm_dp_tunnel_enable_bw_alloc(intel_dp->tunnel);
334 if (err)
335 goto out_err;
336
337 pipe_mask = 0;
338 if (crtc_state) {
339 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
340
341 /* TODO: Add support for MST */
342 pipe_mask |= BIT(crtc->pipe);
343 }
344
345 err = allocate_initial_tunnel_bw_for_pipes(intel_dp, pipe_mask);
346 if (err < 0)
347 goto out_err;
348
349 return;
350
351 out_err:
352 drm_dbg_kms(display->drm,
353 "[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s] Tunnel can't be resumed, will drop and reject it (err %pe)\n",
354 drm_dp_tunnel_name(intel_dp->tunnel),
355 connector->base.base.id, connector->base.name,
356 encoder->base.base.id, encoder->base.name,
357 ERR_PTR(err));
358 }
359
360 static struct drm_dp_tunnel *
get_inherited_tunnel(struct intel_atomic_state * state,struct intel_crtc * crtc)361 get_inherited_tunnel(struct intel_atomic_state *state, struct intel_crtc *crtc)
362 {
363 if (!state->inherited_dp_tunnels)
364 return NULL;
365
366 return state->inherited_dp_tunnels->ref[crtc->pipe].tunnel;
367 }
368
369 static int
add_inherited_tunnel(struct intel_atomic_state * state,struct drm_dp_tunnel * tunnel,struct intel_crtc * crtc)370 add_inherited_tunnel(struct intel_atomic_state *state,
371 struct drm_dp_tunnel *tunnel,
372 struct intel_crtc *crtc)
373 {
374 struct intel_display *display = to_intel_display(state);
375 struct drm_dp_tunnel *old_tunnel;
376
377 old_tunnel = get_inherited_tunnel(state, crtc);
378 if (old_tunnel) {
379 drm_WARN_ON(display->drm, old_tunnel != tunnel);
380 return 0;
381 }
382
383 if (!state->inherited_dp_tunnels) {
384 state->inherited_dp_tunnels = kzalloc_obj(*state->inherited_dp_tunnels);
385 if (!state->inherited_dp_tunnels)
386 return -ENOMEM;
387 }
388
389 drm_dp_tunnel_ref_get(tunnel, &state->inherited_dp_tunnels->ref[crtc->pipe]);
390
391 return 0;
392 }
393
check_inherited_tunnel_state(struct intel_atomic_state * state,struct intel_dp * intel_dp,const struct intel_digital_connector_state * old_conn_state)394 static int check_inherited_tunnel_state(struct intel_atomic_state *state,
395 struct intel_dp *intel_dp,
396 const struct intel_digital_connector_state *old_conn_state)
397 {
398 struct intel_display *display = to_intel_display(state);
399 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
400 struct intel_connector *connector =
401 to_intel_connector(old_conn_state->base.connector);
402 struct intel_crtc *old_crtc;
403 const struct intel_crtc_state *old_crtc_state;
404
405 /*
406 * If a BWA tunnel gets detected only after the corresponding
407 * connector got enabled already without a BWA tunnel, or a different
408 * BWA tunnel (which was removed meanwhile) the old CRTC state won't
409 * contain the state of the current tunnel. This tunnel still has a
410 * reserved BW, which needs to be released, add the state for such
411 * inherited tunnels separately only to this atomic state.
412 */
413 if (!intel_dp_tunnel_bw_alloc_is_enabled(intel_dp))
414 return 0;
415
416 if (!old_conn_state->base.crtc)
417 return 0;
418
419 old_crtc = to_intel_crtc(old_conn_state->base.crtc);
420 old_crtc_state = intel_atomic_get_old_crtc_state(state, old_crtc);
421
422 if (!old_crtc_state->hw.active ||
423 old_crtc_state->dp_tunnel_ref.tunnel == intel_dp->tunnel)
424 return 0;
425
426 drm_dbg_kms(display->drm,
427 "[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s][CRTC:%d:%s] Adding state for inherited tunnel %p\n",
428 drm_dp_tunnel_name(intel_dp->tunnel),
429 connector->base.base.id, connector->base.name,
430 encoder->base.base.id, encoder->base.name,
431 old_crtc->base.base.id, old_crtc->base.name,
432 intel_dp->tunnel);
433
434 return add_inherited_tunnel(state, intel_dp->tunnel, old_crtc);
435 }
436
437 /**
438 * intel_dp_tunnel_atomic_cleanup_inherited_state - Free any inherited DP tunnel state
439 * @state: Atomic state
440 *
441 * Free the inherited DP tunnel state in @state.
442 */
intel_dp_tunnel_atomic_cleanup_inherited_state(struct intel_atomic_state * state)443 void intel_dp_tunnel_atomic_cleanup_inherited_state(struct intel_atomic_state *state)
444 {
445 struct intel_display *display = to_intel_display(state);
446 enum pipe pipe;
447
448 if (!state->inherited_dp_tunnels)
449 return;
450
451 for_each_pipe(display, pipe)
452 if (state->inherited_dp_tunnels->ref[pipe].tunnel)
453 drm_dp_tunnel_ref_put(&state->inherited_dp_tunnels->ref[pipe]);
454
455 kfree(state->inherited_dp_tunnels);
456 state->inherited_dp_tunnels = NULL;
457 }
458
intel_dp_tunnel_atomic_add_group_state(struct intel_atomic_state * state,struct drm_dp_tunnel * tunnel)459 static int intel_dp_tunnel_atomic_add_group_state(struct intel_atomic_state *state,
460 struct drm_dp_tunnel *tunnel)
461 {
462 struct intel_display *display = to_intel_display(state);
463 u32 pipe_mask;
464 int err;
465
466 err = drm_dp_tunnel_atomic_get_group_streams_in_state(&state->base,
467 tunnel, &pipe_mask);
468 if (err)
469 return err;
470
471 drm_WARN_ON(display->drm, pipe_mask & ~((1 << I915_MAX_PIPES) - 1));
472
473 return intel_modeset_pipes_in_mask_early(state, "DPTUN", pipe_mask);
474 }
475
476 /**
477 * intel_dp_tunnel_atomic_add_state_for_crtc - Add CRTC specific DP tunnel state
478 * @state: Atomic state
479 * @crtc: CRTC to add the tunnel state for
480 *
481 * Add the DP tunnel state for @crtc if the CRTC (aka DP tunnel stream) is enabled
482 * via a DP tunnel.
483 *
484 * Return 0 in case of success, a negative error code otherwise.
485 */
intel_dp_tunnel_atomic_add_state_for_crtc(struct intel_atomic_state * state,struct intel_crtc * crtc)486 int intel_dp_tunnel_atomic_add_state_for_crtc(struct intel_atomic_state *state,
487 struct intel_crtc *crtc)
488 {
489 const struct intel_crtc_state *new_crtc_state =
490 intel_atomic_get_new_crtc_state(state, crtc);
491 const struct drm_dp_tunnel_state *tunnel_state;
492 struct drm_dp_tunnel *tunnel = new_crtc_state->dp_tunnel_ref.tunnel;
493
494 if (!tunnel)
495 return 0;
496
497 tunnel_state = drm_dp_tunnel_atomic_get_state(&state->base, tunnel);
498 if (IS_ERR(tunnel_state))
499 return PTR_ERR(tunnel_state);
500
501 return 0;
502 }
503
check_group_state(struct intel_atomic_state * state,struct intel_dp * intel_dp,struct intel_connector * connector,struct intel_crtc * crtc)504 static int check_group_state(struct intel_atomic_state *state,
505 struct intel_dp *intel_dp,
506 struct intel_connector *connector,
507 struct intel_crtc *crtc)
508 {
509 struct intel_display *display = to_intel_display(state);
510 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
511 const struct intel_crtc_state *crtc_state =
512 intel_atomic_get_new_crtc_state(state, crtc);
513
514 if (!crtc_state->dp_tunnel_ref.tunnel)
515 return 0;
516
517 drm_dbg_kms(display->drm,
518 "[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s][CRTC:%d:%s] Adding group state for tunnel %p\n",
519 drm_dp_tunnel_name(intel_dp->tunnel),
520 connector->base.base.id, connector->base.name,
521 encoder->base.base.id, encoder->base.name,
522 crtc->base.base.id, crtc->base.name,
523 crtc_state->dp_tunnel_ref.tunnel);
524
525 return intel_dp_tunnel_atomic_add_group_state(state, crtc_state->dp_tunnel_ref.tunnel);
526 }
527
528 /**
529 * intel_dp_tunnel_atomic_check_state - Check a connector's DP tunnel specific state
530 * @state: Atomic state
531 * @intel_dp: DP port object
532 * @connector: connector using @intel_dp
533 *
534 * Check and add the DP tunnel atomic state for @intel_dp/@connector to
535 * @state, if there is a DP tunnel detected on @intel_dp with BW allocation
536 * mode enabled on it, or if @intel_dp/@connector was previously enabled via a
537 * DP tunnel.
538 *
539 * Returns 0 in case of success, or a negative error code otherwise.
540 */
intel_dp_tunnel_atomic_check_state(struct intel_atomic_state * state,struct intel_dp * intel_dp,struct intel_connector * connector)541 int intel_dp_tunnel_atomic_check_state(struct intel_atomic_state *state,
542 struct intel_dp *intel_dp,
543 struct intel_connector *connector)
544 {
545 const struct intel_digital_connector_state *old_conn_state =
546 intel_atomic_get_old_connector_state(state, connector);
547 const struct intel_digital_connector_state *new_conn_state =
548 intel_atomic_get_new_connector_state(state, connector);
549 int err;
550
551 if (old_conn_state->base.crtc) {
552 err = check_group_state(state, intel_dp, connector,
553 to_intel_crtc(old_conn_state->base.crtc));
554 if (err)
555 return err;
556 }
557
558 if (new_conn_state->base.crtc &&
559 new_conn_state->base.crtc != old_conn_state->base.crtc) {
560 err = check_group_state(state, intel_dp, connector,
561 to_intel_crtc(new_conn_state->base.crtc));
562 if (err)
563 return err;
564 }
565
566 return check_inherited_tunnel_state(state, intel_dp, old_conn_state);
567 }
568
569 /**
570 * intel_dp_tunnel_atomic_compute_stream_bw - Compute the BW required by a DP tunnel stream
571 * @state: Atomic state
572 * @intel_dp: DP object
573 * @connector: connector using @intel_dp
574 * @crtc_state: state of CRTC of the given DP tunnel stream
575 *
576 * Compute the required BW of CRTC (aka DP tunnel stream), storing this BW to
577 * the DP tunnel state containing the stream in @state. Before re-calculating a
578 * BW requirement in the crtc_state state the old BW requirement computed by this
579 * function must be cleared by calling intel_dp_tunnel_atomic_clear_stream_bw().
580 *
581 * Returns 0 in case of success, a negative error code otherwise.
582 */
intel_dp_tunnel_atomic_compute_stream_bw(struct intel_atomic_state * state,struct intel_dp * intel_dp,const struct intel_connector * connector,struct intel_crtc_state * crtc_state)583 int intel_dp_tunnel_atomic_compute_stream_bw(struct intel_atomic_state *state,
584 struct intel_dp *intel_dp,
585 const struct intel_connector *connector,
586 struct intel_crtc_state *crtc_state)
587 {
588 struct intel_display *display = to_intel_display(state);
589 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
590 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
591 int required_rate = intel_dp_config_required_rate(crtc_state);
592 int ret;
593
594 if (!intel_dp_tunnel_bw_alloc_is_enabled(intel_dp))
595 return 0;
596
597 drm_dbg_kms(display->drm,
598 "[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s][CRTC:%d:%s] Stream %d required BW %d Mb/s\n",
599 drm_dp_tunnel_name(intel_dp->tunnel),
600 connector->base.base.id, connector->base.name,
601 encoder->base.base.id, encoder->base.name,
602 crtc->base.base.id, crtc->base.name,
603 crtc->pipe,
604 kbytes_to_mbits(required_rate));
605
606 ret = drm_dp_tunnel_atomic_set_stream_bw(&state->base, intel_dp->tunnel,
607 crtc->pipe, required_rate);
608 if (ret < 0)
609 return ret;
610
611 drm_dp_tunnel_ref_get(intel_dp->tunnel,
612 &crtc_state->dp_tunnel_ref);
613
614 return 0;
615 }
616
617 /**
618 * intel_dp_tunnel_atomic_clear_stream_bw - Clear any DP tunnel stream BW requirement
619 * @state: Atomic state
620 * @crtc_state: state of CRTC of the given DP tunnel stream
621 *
622 * Clear any DP tunnel stream BW requirement set by
623 * intel_dp_tunnel_atomic_compute_stream_bw().
624 */
intel_dp_tunnel_atomic_clear_stream_bw(struct intel_atomic_state * state,struct intel_crtc_state * crtc_state)625 void intel_dp_tunnel_atomic_clear_stream_bw(struct intel_atomic_state *state,
626 struct intel_crtc_state *crtc_state)
627 {
628 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
629
630 if (!crtc_state->dp_tunnel_ref.tunnel)
631 return;
632
633 drm_dp_tunnel_atomic_set_stream_bw(&state->base,
634 crtc_state->dp_tunnel_ref.tunnel,
635 crtc->pipe, 0);
636 drm_dp_tunnel_ref_put(&crtc_state->dp_tunnel_ref);
637 }
638
639 /**
640 * intel_dp_tunnel_atomic_check_link - Check the DP tunnel atomic state
641 * @state: intel atomic state
642 * @limits: link BW limits
643 *
644 * Check the link configuration for all DP tunnels in @state. If the
645 * configuration is invalid @limits will be updated if possible to
646 * reduce the total BW, after which the configuration for all CRTCs in
647 * @state must be recomputed with the updated @limits.
648 *
649 * Returns:
650 * - 0 if the configuration is valid
651 * - %-EAGAIN, if the configuration is invalid and @limits got updated
652 * with fallback values with which the configuration of all CRTCs in
653 * @state must be recomputed
654 * - Other negative error, if the configuration is invalid without a
655 * fallback possibility, or the check failed for another reason
656 */
intel_dp_tunnel_atomic_check_link(struct intel_atomic_state * state,struct intel_link_bw_limits * limits)657 int intel_dp_tunnel_atomic_check_link(struct intel_atomic_state *state,
658 struct intel_link_bw_limits *limits)
659 {
660 u32 failed_stream_mask;
661 int err;
662
663 err = drm_dp_tunnel_atomic_check_stream_bws(&state->base,
664 &failed_stream_mask);
665 if (err != -ENOSPC)
666 return err;
667
668 err = intel_link_bw_reduce_bpp(state, limits,
669 failed_stream_mask, "DP tunnel link BW");
670
671 return err ? : -EAGAIN;
672 }
673
atomic_decrease_bw(struct intel_atomic_state * state)674 static void atomic_decrease_bw(struct intel_atomic_state *state)
675 {
676 struct intel_crtc *crtc;
677 const struct intel_crtc_state *old_crtc_state;
678 const struct intel_crtc_state *new_crtc_state;
679 int i;
680
681 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
682 const struct drm_dp_tunnel_state *new_tunnel_state;
683 struct drm_dp_tunnel *tunnel;
684 int old_bw;
685 int new_bw;
686
687 if (!intel_crtc_needs_modeset(new_crtc_state))
688 continue;
689
690 tunnel = get_inherited_tunnel(state, crtc);
691 if (!tunnel)
692 tunnel = old_crtc_state->dp_tunnel_ref.tunnel;
693
694 if (!tunnel)
695 continue;
696
697 old_bw = drm_dp_tunnel_get_allocated_bw(tunnel);
698
699 new_tunnel_state = drm_dp_tunnel_atomic_get_new_state(&state->base, tunnel);
700 new_bw = drm_dp_tunnel_atomic_get_required_bw(new_tunnel_state);
701
702 if (new_bw >= old_bw)
703 continue;
704
705 drm_dp_tunnel_alloc_bw(tunnel, new_bw);
706 }
707 }
708
queue_retry_work(struct intel_atomic_state * state,struct drm_dp_tunnel * tunnel,const struct intel_crtc_state * crtc_state)709 static void queue_retry_work(struct intel_atomic_state *state,
710 struct drm_dp_tunnel *tunnel,
711 const struct intel_crtc_state *crtc_state)
712 {
713 struct intel_display *display = to_intel_display(state);
714 struct intel_encoder *encoder;
715
716 encoder = intel_get_crtc_new_encoder(state, crtc_state);
717
718 if (!intel_digital_port_connected(encoder))
719 return;
720
721 drm_dbg_kms(display->drm,
722 "[DPTUN %s][ENCODER:%d:%s] BW allocation failed on a connected sink\n",
723 drm_dp_tunnel_name(tunnel),
724 encoder->base.base.id,
725 encoder->base.name);
726
727 intel_dp_queue_modeset_retry_for_link(state, encoder, crtc_state);
728 }
729
atomic_increase_bw(struct intel_atomic_state * state)730 static void atomic_increase_bw(struct intel_atomic_state *state)
731 {
732 struct intel_crtc *crtc;
733 const struct intel_crtc_state *crtc_state;
734 int i;
735
736 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
737 struct drm_dp_tunnel_state *tunnel_state;
738 struct drm_dp_tunnel *tunnel = crtc_state->dp_tunnel_ref.tunnel;
739 int bw;
740
741 if (!intel_crtc_needs_modeset(crtc_state))
742 continue;
743
744 if (!tunnel)
745 continue;
746
747 tunnel_state = drm_dp_tunnel_atomic_get_new_state(&state->base, tunnel);
748
749 bw = drm_dp_tunnel_atomic_get_required_bw(tunnel_state);
750
751 if (drm_dp_tunnel_alloc_bw(tunnel, bw) != 0)
752 queue_retry_work(state, tunnel, crtc_state);
753 }
754 }
755
756 /**
757 * intel_dp_tunnel_atomic_alloc_bw - Allocate the BW for all modeset tunnels
758 * @state: Atomic state
759 *
760 * Allocate the required BW for all tunnels in @state.
761 */
intel_dp_tunnel_atomic_alloc_bw(struct intel_atomic_state * state)762 void intel_dp_tunnel_atomic_alloc_bw(struct intel_atomic_state *state)
763 {
764 atomic_decrease_bw(state);
765 atomic_increase_bw(state);
766 }
767
768 /**
769 * intel_dp_tunnel_mgr_init - Initialize the DP tunnel manager
770 * @display: display device
771 *
772 * Initialize the DP tunnel manager. The tunnel manager will support the
773 * detection/management of DP tunnels on all DP connectors, so the function
774 * must be called after all these connectors have been registered already.
775 *
776 * Return 0 in case of success, a negative error code otherwise.
777 */
intel_dp_tunnel_mgr_init(struct intel_display * display)778 int intel_dp_tunnel_mgr_init(struct intel_display *display)
779 {
780 struct drm_dp_tunnel_mgr *tunnel_mgr;
781 struct drm_connector_list_iter connector_list_iter;
782 struct intel_connector *connector;
783 int dp_connectors = 0;
784
785 drm_connector_list_iter_begin(display->drm, &connector_list_iter);
786 for_each_intel_connector_iter(connector, &connector_list_iter) {
787 if (connector->base.connector_type != DRM_MODE_CONNECTOR_DisplayPort)
788 continue;
789
790 dp_connectors++;
791 }
792 drm_connector_list_iter_end(&connector_list_iter);
793
794 tunnel_mgr = drm_dp_tunnel_mgr_create(display->drm, dp_connectors);
795 if (IS_ERR(tunnel_mgr))
796 return PTR_ERR(tunnel_mgr);
797
798 display->dp_tunnel_mgr = tunnel_mgr;
799
800 return 0;
801 }
802
803 /**
804 * intel_dp_tunnel_mgr_cleanup - Clean up the DP tunnel manager state
805 * @display: display device
806 *
807 * Clean up the DP tunnel manager state.
808 */
intel_dp_tunnel_mgr_cleanup(struct intel_display * display)809 void intel_dp_tunnel_mgr_cleanup(struct intel_display *display)
810 {
811 drm_dp_tunnel_mgr_destroy(display->dp_tunnel_mgr);
812 display->dp_tunnel_mgr = NULL;
813 }
814