xref: /linux/drivers/gpu/drm/display/drm_dp_tunnel.c (revision db5d28c0bfe566908719bec8e25443aabecbb802)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2023 Intel Corporation
4  */
5 
6 #include <linux/ref_tracker.h>
7 #include <linux/types.h>
8 
9 #include <drm/drm_atomic_state_helper.h>
10 
11 #include <drm/drm_atomic.h>
12 #include <drm/drm_print.h>
13 #include <drm/display/drm_dp.h>
14 #include <drm/display/drm_dp_helper.h>
15 #include <drm/display/drm_dp_tunnel.h>
16 
17 #define to_group(__private_obj) \
18 	container_of(__private_obj, struct drm_dp_tunnel_group, base)
19 
20 #define to_group_state(__private_state) \
21 	container_of(__private_state, struct drm_dp_tunnel_group_state, base)
22 
23 #define is_dp_tunnel_private_obj(__obj) \
24 	((__obj)->funcs == &tunnel_group_funcs)
25 
26 #define for_each_new_group_in_state(__state, __new_group_state, __i) \
27 	for ((__i) = 0; \
28 	     (__i) < (__state)->num_private_objs; \
29 	     (__i)++) \
30 		for_each_if ((__state)->private_objs[__i].ptr && \
31 			     is_dp_tunnel_private_obj((__state)->private_objs[__i].ptr) && \
32 			     ((__new_group_state) = \
33 				to_group_state((__state)->private_objs[__i].new_state), 1))
34 
35 #define for_each_old_group_in_state(__state, __old_group_state, __i) \
36 	for ((__i) = 0; \
37 	     (__i) < (__state)->num_private_objs; \
38 	     (__i)++) \
39 		for_each_if ((__state)->private_objs[__i].ptr && \
40 			     is_dp_tunnel_private_obj((__state)->private_objs[__i].ptr) && \
41 			     ((__old_group_state) = \
42 				to_group_state((__state)->private_objs[__i].old_state), 1))
43 
44 #define for_each_tunnel_in_group(__group, __tunnel) \
45 	list_for_each_entry(__tunnel, &(__group)->tunnels, node)
46 
47 #define for_each_tunnel_state(__group_state, __tunnel_state) \
48 	list_for_each_entry(__tunnel_state, &(__group_state)->tunnel_states, node)
49 
50 #define for_each_tunnel_state_safe(__group_state, __tunnel_state, __tunnel_state_tmp) \
51 	list_for_each_entry_safe(__tunnel_state, __tunnel_state_tmp, \
52 				 &(__group_state)->tunnel_states, node)
53 
54 #define kbytes_to_mbits(__kbytes) \
55 	DIV_ROUND_UP((__kbytes) * 8, 1000)
56 
57 #define DPTUN_BW_ARG(__bw) ((__bw) < 0 ? (__bw) : kbytes_to_mbits(__bw))
58 
59 #define __tun_prn(__tunnel, __level, __type, __fmt, ...) \
60 	drm_##__level##__type((__tunnel)->group->mgr->dev, \
61 			      "[DPTUN %s][%s] " __fmt, \
62 			      drm_dp_tunnel_name(__tunnel), \
63 			      (__tunnel)->aux->name, ## \
64 			      __VA_ARGS__)
65 
66 #define tun_dbg(__tunnel, __fmt, ...) \
67 	__tun_prn(__tunnel, dbg, _kms, __fmt, ## __VA_ARGS__)
68 
69 #define tun_dbg_stat(__tunnel, __err, __fmt, ...) do { \
70 	if (__err) \
71 		__tun_prn(__tunnel, dbg, _kms, __fmt " (Failed, err: %pe)\n", \
72 			  ## __VA_ARGS__, ERR_PTR(__err)); \
73 	else \
74 		__tun_prn(__tunnel, dbg, _kms, __fmt " (Ok)\n", \
75 			  ## __VA_ARGS__); \
76 } while (0)
77 
78 #define tun_dbg_atomic(__tunnel, __fmt, ...) \
79 	__tun_prn(__tunnel, dbg, _atomic, __fmt, ## __VA_ARGS__)
80 
81 #define tun_grp_dbg(__group, __fmt, ...) \
82 	drm_dbg_kms((__group)->mgr->dev, \
83 		    "[DPTUN %s] " __fmt, \
84 		    drm_dp_tunnel_group_name(__group), ## \
85 		    __VA_ARGS__)
86 
87 #define DP_TUNNELING_BASE DP_TUNNELING_OUI
88 
89 #define __DPTUN_REG_RANGE(__start, __size) \
90 	GENMASK_ULL((__start) + (__size) - 1, (__start))
91 
92 #define DPTUN_REG_RANGE(__addr, __size) \
93 	__DPTUN_REG_RANGE((__addr) - DP_TUNNELING_BASE, (__size))
94 
95 #define DPTUN_REG(__addr) DPTUN_REG_RANGE(__addr, 1)
96 
97 #define DPTUN_INFO_REG_MASK ( \
98 	DPTUN_REG_RANGE(DP_TUNNELING_OUI, DP_TUNNELING_OUI_BYTES) | \
99 	DPTUN_REG_RANGE(DP_TUNNELING_DEV_ID, DP_TUNNELING_DEV_ID_BYTES) | \
100 	DPTUN_REG(DP_TUNNELING_HW_REV) | \
101 	DPTUN_REG(DP_TUNNELING_SW_REV_MAJOR) | \
102 	DPTUN_REG(DP_TUNNELING_SW_REV_MINOR) | \
103 	DPTUN_REG(DP_TUNNELING_CAPABILITIES) | \
104 	DPTUN_REG(DP_IN_ADAPTER_INFO) | \
105 	DPTUN_REG(DP_USB4_DRIVER_ID) | \
106 	DPTUN_REG(DP_USB4_DRIVER_BW_CAPABILITY) | \
107 	DPTUN_REG(DP_IN_ADAPTER_TUNNEL_INFORMATION) | \
108 	DPTUN_REG(DP_BW_GRANULARITY) | \
109 	DPTUN_REG(DP_ESTIMATED_BW) | \
110 	DPTUN_REG(DP_ALLOCATED_BW) | \
111 	DPTUN_REG(DP_TUNNELING_MAX_LINK_RATE) | \
112 	DPTUN_REG(DP_TUNNELING_MAX_LANE_COUNT) | \
113 	DPTUN_REG(DP_DPTX_BW_ALLOCATION_MODE_CONTROL))
114 
115 static const DECLARE_BITMAP(dptun_info_regs, 64) = {
116 	DPTUN_INFO_REG_MASK & -1UL,
117 #if BITS_PER_LONG == 32
118 	DPTUN_INFO_REG_MASK >> 32,
119 #endif
120 };
121 
122 struct drm_dp_tunnel_regs {
123 	u8 buf[HWEIGHT64(DPTUN_INFO_REG_MASK)];
124 };
125 
126 struct drm_dp_tunnel_group;
127 
128 struct drm_dp_tunnel {
129 	struct drm_dp_tunnel_group *group;
130 
131 	struct list_head node;
132 
133 	struct kref kref;
134 	struct ref_tracker *tracker;
135 	struct drm_dp_aux *aux;
136 	char name[8];
137 
138 	int bw_granularity;
139 	int estimated_bw;
140 	int allocated_bw;
141 
142 	int max_dprx_rate;
143 	u8 max_dprx_lane_count;
144 
145 	u8 adapter_id;
146 
147 	bool bw_alloc_supported:1;
148 	bool bw_alloc_enabled:1;
149 	bool has_io_error:1;
150 	bool destroyed:1;
151 };
152 
153 struct drm_dp_tunnel_group_state;
154 
155 struct drm_dp_tunnel_state {
156 	struct drm_dp_tunnel_group_state *group_state;
157 
158 	struct drm_dp_tunnel_ref tunnel_ref;
159 
160 	struct list_head node;
161 
162 	u32 stream_mask;
163 	int *stream_bw;
164 };
165 
166 struct drm_dp_tunnel_group_state {
167 	struct drm_private_state base;
168 
169 	struct list_head tunnel_states;
170 };
171 
172 struct drm_dp_tunnel_group {
173 	struct drm_private_obj base;
174 	struct drm_dp_tunnel_mgr *mgr;
175 
176 	struct list_head tunnels;
177 
178 	/* available BW including the allocated_bw of all tunnels in the group */
179 	int available_bw;
180 
181 	u8 drv_group_id;
182 	char name[8];
183 
184 	bool active:1;
185 };
186 
187 struct drm_dp_tunnel_mgr {
188 	struct drm_device *dev;
189 
190 	int group_count;
191 	struct drm_dp_tunnel_group *groups;
192 	wait_queue_head_t bw_req_queue;
193 
194 #ifdef CONFIG_DRM_DISPLAY_DP_TUNNEL_STATE_DEBUG
195 	struct ref_tracker_dir ref_tracker;
196 #endif
197 };
198 
199 /*
200  * The following helpers provide a way to read out the tunneling DPCD
201  * registers with a minimal amount of AUX transfers (1 transfer per contiguous
202  * range, as permitted by the 16 byte per transfer AUX limit), not accessing
203  * other registers to avoid any read side-effects.
204  */
next_reg_area(int * offset)205 static int next_reg_area(int *offset)
206 {
207 	*offset = find_next_bit(dptun_info_regs, 64, *offset);
208 
209 	return find_next_zero_bit(dptun_info_regs, 64, *offset + 1) - *offset;
210 }
211 
212 #define tunnel_reg_ptr(__regs, __address) ({ \
213 	WARN_ON(!test_bit((__address) - DP_TUNNELING_BASE, dptun_info_regs)); \
214 	&(__regs)->buf[bitmap_weight(dptun_info_regs, (__address) - DP_TUNNELING_BASE)]; \
215 })
216 
read_tunnel_regs(struct drm_dp_aux * aux,struct drm_dp_tunnel_regs * regs)217 static int read_tunnel_regs(struct drm_dp_aux *aux, struct drm_dp_tunnel_regs *regs)
218 {
219 	int offset = 0;
220 	int len;
221 
222 	while ((len = next_reg_area(&offset))) {
223 		int address = DP_TUNNELING_BASE + offset;
224 
225 		if (drm_dp_dpcd_read(aux, address, tunnel_reg_ptr(regs, address), len) < 0)
226 			return -EIO;
227 
228 		offset += len;
229 	}
230 
231 	return 0;
232 }
233 
tunnel_reg(const struct drm_dp_tunnel_regs * regs,int address)234 static u8 tunnel_reg(const struct drm_dp_tunnel_regs *regs, int address)
235 {
236 	return *tunnel_reg_ptr(regs, address);
237 }
238 
tunnel_reg_drv_group_id(const struct drm_dp_tunnel_regs * regs)239 static u8 tunnel_reg_drv_group_id(const struct drm_dp_tunnel_regs *regs)
240 {
241 	u8 drv_id = tunnel_reg(regs, DP_USB4_DRIVER_ID) & DP_USB4_DRIVER_ID_MASK;
242 	u8 group_id = tunnel_reg(regs, DP_IN_ADAPTER_TUNNEL_INFORMATION) & DP_GROUP_ID_MASK;
243 
244 	if (!group_id)
245 		return 0;
246 
247 	return (drv_id << DP_GROUP_ID_BITS) | group_id;
248 }
249 
250 /* Return granularity in kB/s units */
tunnel_reg_bw_granularity(const struct drm_dp_tunnel_regs * regs)251 static int tunnel_reg_bw_granularity(const struct drm_dp_tunnel_regs *regs)
252 {
253 	int gr = tunnel_reg(regs, DP_BW_GRANULARITY) & DP_BW_GRANULARITY_MASK;
254 
255 	if (gr > 2)
256 		return -1;
257 
258 	return (250000 << gr) / 8;
259 }
260 
tunnel_reg_max_dprx_rate(const struct drm_dp_tunnel_regs * regs)261 static int tunnel_reg_max_dprx_rate(const struct drm_dp_tunnel_regs *regs)
262 {
263 	u8 bw_code = tunnel_reg(regs, DP_TUNNELING_MAX_LINK_RATE);
264 
265 	return drm_dp_bw_code_to_link_rate(bw_code);
266 }
267 
tunnel_reg_max_dprx_lane_count(const struct drm_dp_tunnel_regs * regs)268 static int tunnel_reg_max_dprx_lane_count(const struct drm_dp_tunnel_regs *regs)
269 {
270 	return tunnel_reg(regs, DP_TUNNELING_MAX_LANE_COUNT) &
271 	       DP_TUNNELING_MAX_LANE_COUNT_MASK;
272 }
273 
tunnel_reg_bw_alloc_supported(const struct drm_dp_tunnel_regs * regs)274 static bool tunnel_reg_bw_alloc_supported(const struct drm_dp_tunnel_regs *regs)
275 {
276 	u8 cap_mask = DP_TUNNELING_SUPPORT | DP_IN_BW_ALLOCATION_MODE_SUPPORT;
277 
278 	if ((tunnel_reg(regs, DP_TUNNELING_CAPABILITIES) & cap_mask) != cap_mask)
279 		return false;
280 
281 	return tunnel_reg(regs, DP_USB4_DRIVER_BW_CAPABILITY) &
282 	       DP_USB4_DRIVER_BW_ALLOCATION_MODE_SUPPORT;
283 }
284 
tunnel_reg_bw_alloc_enabled(const struct drm_dp_tunnel_regs * regs)285 static bool tunnel_reg_bw_alloc_enabled(const struct drm_dp_tunnel_regs *regs)
286 {
287 	return tunnel_reg(regs, DP_DPTX_BW_ALLOCATION_MODE_CONTROL) &
288 	       DP_DISPLAY_DRIVER_BW_ALLOCATION_MODE_ENABLE;
289 }
290 
tunnel_group_drv_id(u8 drv_group_id)291 static u8 tunnel_group_drv_id(u8 drv_group_id)
292 {
293 	return drv_group_id >> DP_GROUP_ID_BITS;
294 }
295 
tunnel_group_id(u8 drv_group_id)296 static u8 tunnel_group_id(u8 drv_group_id)
297 {
298 	return drv_group_id & DP_GROUP_ID_MASK;
299 }
300 
drm_dp_tunnel_name(const struct drm_dp_tunnel * tunnel)301 const char *drm_dp_tunnel_name(const struct drm_dp_tunnel *tunnel)
302 {
303 	return tunnel->name;
304 }
305 EXPORT_SYMBOL(drm_dp_tunnel_name);
306 
drm_dp_tunnel_group_name(const struct drm_dp_tunnel_group * group)307 static const char *drm_dp_tunnel_group_name(const struct drm_dp_tunnel_group *group)
308 {
309 	return group->name;
310 }
311 
312 static struct drm_dp_tunnel_group *
lookup_or_alloc_group(struct drm_dp_tunnel_mgr * mgr,u8 drv_group_id)313 lookup_or_alloc_group(struct drm_dp_tunnel_mgr *mgr, u8 drv_group_id)
314 {
315 	struct drm_dp_tunnel_group *group = NULL;
316 	int i;
317 
318 	for (i = 0; i < mgr->group_count; i++) {
319 		/*
320 		 * A tunnel group with 0 group ID shouldn't have more than one
321 		 * tunnels.
322 		 */
323 		if (tunnel_group_id(drv_group_id) &&
324 		    mgr->groups[i].drv_group_id == drv_group_id)
325 			return &mgr->groups[i];
326 
327 		if (!group && !mgr->groups[i].active)
328 			group = &mgr->groups[i];
329 	}
330 
331 	if (!group) {
332 		drm_dbg_kms(mgr->dev,
333 			    "DPTUN: Can't allocate more tunnel groups\n");
334 		return NULL;
335 	}
336 
337 	group->drv_group_id = drv_group_id;
338 	group->active = true;
339 
340 	/*
341 	 * The group name format here and elsewhere: Driver-ID:Group-ID:*
342 	 * (* standing for all DP-Adapters/tunnels in the group).
343 	 */
344 	snprintf(group->name, sizeof(group->name), "%d:%d:*",
345 		 tunnel_group_drv_id(drv_group_id) & ((1 << DP_GROUP_ID_BITS) - 1),
346 		 tunnel_group_id(drv_group_id) & ((1 << DP_USB4_DRIVER_ID_BITS) - 1));
347 
348 	return group;
349 }
350 
free_group(struct drm_dp_tunnel_group * group)351 static void free_group(struct drm_dp_tunnel_group *group)
352 {
353 	struct drm_dp_tunnel_mgr *mgr = group->mgr;
354 
355 	if (drm_WARN_ON(mgr->dev, !list_empty(&group->tunnels)))
356 		return;
357 
358 	group->drv_group_id = 0;
359 	group->available_bw = -1;
360 	group->active = false;
361 }
362 
363 static struct drm_dp_tunnel *
tunnel_get(struct drm_dp_tunnel * tunnel)364 tunnel_get(struct drm_dp_tunnel *tunnel)
365 {
366 	kref_get(&tunnel->kref);
367 
368 	return tunnel;
369 }
370 
free_tunnel(struct kref * kref)371 static void free_tunnel(struct kref *kref)
372 {
373 	struct drm_dp_tunnel *tunnel = container_of(kref, typeof(*tunnel), kref);
374 	struct drm_dp_tunnel_group *group = tunnel->group;
375 
376 	list_del(&tunnel->node);
377 	if (list_empty(&group->tunnels))
378 		free_group(group);
379 
380 	kfree(tunnel);
381 }
382 
tunnel_put(struct drm_dp_tunnel * tunnel)383 static void tunnel_put(struct drm_dp_tunnel *tunnel)
384 {
385 	kref_put(&tunnel->kref, free_tunnel);
386 }
387 
388 #ifdef CONFIG_DRM_DISPLAY_DP_TUNNEL_STATE_DEBUG
track_tunnel_ref(struct drm_dp_tunnel * tunnel,struct ref_tracker ** tracker)389 static void track_tunnel_ref(struct drm_dp_tunnel *tunnel,
390 			     struct ref_tracker **tracker)
391 {
392 	ref_tracker_alloc(&tunnel->group->mgr->ref_tracker,
393 			  tracker, GFP_KERNEL);
394 }
395 
untrack_tunnel_ref(struct drm_dp_tunnel * tunnel,struct ref_tracker ** tracker)396 static void untrack_tunnel_ref(struct drm_dp_tunnel *tunnel,
397 			       struct ref_tracker **tracker)
398 {
399 	ref_tracker_free(&tunnel->group->mgr->ref_tracker,
400 			 tracker);
401 }
402 #else
track_tunnel_ref(struct drm_dp_tunnel * tunnel,struct ref_tracker ** tracker)403 static void track_tunnel_ref(struct drm_dp_tunnel *tunnel,
404 			     struct ref_tracker **tracker)
405 {
406 }
407 
untrack_tunnel_ref(struct drm_dp_tunnel * tunnel,struct ref_tracker ** tracker)408 static void untrack_tunnel_ref(struct drm_dp_tunnel *tunnel,
409 			       struct ref_tracker **tracker)
410 {
411 }
412 #endif
413 
414 /**
415  * drm_dp_tunnel_get - Get a reference for a DP tunnel
416  * @tunnel: Tunnel object
417  * @tracker: Debug tracker for the reference
418  *
419  * Get a reference for @tunnel, along with a debug tracker to help locating
420  * the source of a reference leak/double reference put etc. issue.
421  *
422  * The reference must be dropped after use calling drm_dp_tunnel_put()
423  * passing @tunnel and *@tracker returned from here.
424  *
425  * Returns @tunnel - as a convenience - along with *@tracker.
426  */
427 struct drm_dp_tunnel *
drm_dp_tunnel_get(struct drm_dp_tunnel * tunnel,struct ref_tracker ** tracker)428 drm_dp_tunnel_get(struct drm_dp_tunnel *tunnel,
429 		  struct ref_tracker **tracker)
430 {
431 	track_tunnel_ref(tunnel, tracker);
432 
433 	return tunnel_get(tunnel);
434 }
435 EXPORT_SYMBOL(drm_dp_tunnel_get);
436 
437 /**
438  * drm_dp_tunnel_put - Put a reference for a DP tunnel
439  * @tunnel: Tunnel object
440  * @tracker: Debug tracker for the reference
441  *
442  * Put a reference for @tunnel along with its debug *@tracker, which
443  * was obtained with drm_dp_tunnel_get().
444  */
drm_dp_tunnel_put(struct drm_dp_tunnel * tunnel,struct ref_tracker ** tracker)445 void drm_dp_tunnel_put(struct drm_dp_tunnel *tunnel,
446 		       struct ref_tracker **tracker)
447 {
448 	untrack_tunnel_ref(tunnel, tracker);
449 
450 	tunnel_put(tunnel);
451 }
452 EXPORT_SYMBOL(drm_dp_tunnel_put);
453 
add_tunnel_to_group(struct drm_dp_tunnel_mgr * mgr,u8 drv_group_id,struct drm_dp_tunnel * tunnel)454 static bool add_tunnel_to_group(struct drm_dp_tunnel_mgr *mgr,
455 				u8 drv_group_id,
456 				struct drm_dp_tunnel *tunnel)
457 {
458 	struct drm_dp_tunnel_group *group;
459 
460 	group = lookup_or_alloc_group(mgr, drv_group_id);
461 	if (!group)
462 		return false;
463 
464 	tunnel->group = group;
465 	list_add(&tunnel->node, &group->tunnels);
466 
467 	return true;
468 }
469 
470 static struct drm_dp_tunnel *
create_tunnel(struct drm_dp_tunnel_mgr * mgr,struct drm_dp_aux * aux,const struct drm_dp_tunnel_regs * regs)471 create_tunnel(struct drm_dp_tunnel_mgr *mgr,
472 	      struct drm_dp_aux *aux,
473 	      const struct drm_dp_tunnel_regs *regs)
474 {
475 	u8 drv_group_id = tunnel_reg_drv_group_id(regs);
476 	struct drm_dp_tunnel *tunnel;
477 
478 	tunnel = kzalloc(sizeof(*tunnel), GFP_KERNEL);
479 	if (!tunnel)
480 		return NULL;
481 
482 	INIT_LIST_HEAD(&tunnel->node);
483 
484 	kref_init(&tunnel->kref);
485 
486 	tunnel->aux = aux;
487 
488 	tunnel->adapter_id = tunnel_reg(regs, DP_IN_ADAPTER_INFO) & DP_IN_ADAPTER_NUMBER_MASK;
489 
490 	snprintf(tunnel->name, sizeof(tunnel->name), "%d:%d:%d",
491 		 tunnel_group_drv_id(drv_group_id) & ((1 << DP_GROUP_ID_BITS) - 1),
492 		 tunnel_group_id(drv_group_id) & ((1 << DP_USB4_DRIVER_ID_BITS) - 1),
493 		 tunnel->adapter_id & ((1 << DP_IN_ADAPTER_NUMBER_BITS) - 1));
494 
495 	tunnel->bw_granularity = tunnel_reg_bw_granularity(regs);
496 	tunnel->allocated_bw = tunnel_reg(regs, DP_ALLOCATED_BW) *
497 			       tunnel->bw_granularity;
498 	/*
499 	 * An initial allocated BW of 0 indicates an undefined state: the
500 	 * actual allocation is determined by the TBT CM, usually following a
501 	 * legacy allocation policy (based on the max DPRX caps). From the
502 	 * driver's POV the state becomes defined only after the first
503 	 * allocation request.
504 	 */
505 	if (!tunnel->allocated_bw)
506 		tunnel->allocated_bw = -1;
507 
508 	tunnel->bw_alloc_supported = tunnel_reg_bw_alloc_supported(regs);
509 	tunnel->bw_alloc_enabled = tunnel_reg_bw_alloc_enabled(regs);
510 
511 	if (!add_tunnel_to_group(mgr, drv_group_id, tunnel)) {
512 		kfree(tunnel);
513 
514 		return NULL;
515 	}
516 
517 	track_tunnel_ref(tunnel, &tunnel->tracker);
518 
519 	return tunnel;
520 }
521 
destroy_tunnel(struct drm_dp_tunnel * tunnel)522 static void destroy_tunnel(struct drm_dp_tunnel *tunnel)
523 {
524 	untrack_tunnel_ref(tunnel, &tunnel->tracker);
525 	tunnel_put(tunnel);
526 }
527 
528 /**
529  * drm_dp_tunnel_set_io_error - Set the IO error flag for a DP tunnel
530  * @tunnel: Tunnel object
531  *
532  * Set the IO error flag for @tunnel. Drivers can call this function upon
533  * detecting a failure that affects the tunnel functionality, for instance
534  * after a DP AUX transfer failure on the port @tunnel is connected to.
535  *
536  * This disables further management of @tunnel, including any related
537  * AUX accesses for tunneling DPCD registers, returning error to the
538  * initiators of these. The driver is supposed to drop this tunnel and -
539  * optionally - recreate it.
540  */
drm_dp_tunnel_set_io_error(struct drm_dp_tunnel * tunnel)541 void drm_dp_tunnel_set_io_error(struct drm_dp_tunnel *tunnel)
542 {
543 	tunnel->has_io_error = true;
544 }
545 EXPORT_SYMBOL(drm_dp_tunnel_set_io_error);
546 
547 #define SKIP_DPRX_CAPS_CHECK		BIT(0)
548 #define ALLOW_ALLOCATED_BW_CHANGE	BIT(1)
tunnel_regs_are_valid(struct drm_dp_tunnel_mgr * mgr,const struct drm_dp_tunnel_regs * regs,unsigned int flags)549 static bool tunnel_regs_are_valid(struct drm_dp_tunnel_mgr *mgr,
550 				  const struct drm_dp_tunnel_regs *regs,
551 				  unsigned int flags)
552 {
553 	u8 drv_group_id = tunnel_reg_drv_group_id(regs);
554 	bool check_dprx = !(flags & SKIP_DPRX_CAPS_CHECK);
555 	bool ret = true;
556 
557 	if (!tunnel_reg_bw_alloc_supported(regs)) {
558 		if (tunnel_group_id(drv_group_id)) {
559 			drm_dbg_kms(mgr->dev,
560 				    "DPTUN: A non-zero group ID is only allowed with BWA support\n");
561 			ret = false;
562 		}
563 
564 		if (tunnel_reg(regs, DP_ALLOCATED_BW)) {
565 			drm_dbg_kms(mgr->dev,
566 				    "DPTUN: BW is allocated without BWA support\n");
567 			ret = false;
568 		}
569 
570 		return ret;
571 	}
572 
573 	if (!tunnel_group_id(drv_group_id)) {
574 		drm_dbg_kms(mgr->dev,
575 			    "DPTUN: BWA support requires a non-zero group ID\n");
576 		ret = false;
577 	}
578 
579 	if (check_dprx && hweight8(tunnel_reg_max_dprx_lane_count(regs)) != 1) {
580 		drm_dbg_kms(mgr->dev,
581 			    "DPTUN: Invalid DPRX lane count: %d\n",
582 			    tunnel_reg_max_dprx_lane_count(regs));
583 
584 		ret = false;
585 	}
586 
587 	if (check_dprx && !tunnel_reg_max_dprx_rate(regs)) {
588 		drm_dbg_kms(mgr->dev,
589 			    "DPTUN: DPRX rate is 0\n");
590 
591 		ret = false;
592 	}
593 
594 	if (tunnel_reg_bw_granularity(regs) < 0) {
595 		drm_dbg_kms(mgr->dev,
596 			    "DPTUN: Invalid BW granularity\n");
597 
598 		ret = false;
599 	}
600 
601 	if (tunnel_reg(regs, DP_ALLOCATED_BW) > tunnel_reg(regs, DP_ESTIMATED_BW)) {
602 		drm_dbg_kms(mgr->dev,
603 			    "DPTUN: Allocated BW %d > estimated BW %d Mb/s\n",
604 			    DPTUN_BW_ARG(tunnel_reg(regs, DP_ALLOCATED_BW) *
605 					 tunnel_reg_bw_granularity(regs)),
606 			    DPTUN_BW_ARG(tunnel_reg(regs, DP_ESTIMATED_BW) *
607 					 tunnel_reg_bw_granularity(regs)));
608 
609 		ret = false;
610 	}
611 
612 	return ret;
613 }
614 
tunnel_allocated_bw(const struct drm_dp_tunnel * tunnel)615 static int tunnel_allocated_bw(const struct drm_dp_tunnel *tunnel)
616 {
617 	return max(tunnel->allocated_bw, 0);
618 }
619 
tunnel_info_changes_are_valid(struct drm_dp_tunnel * tunnel,const struct drm_dp_tunnel_regs * regs,unsigned int flags)620 static bool tunnel_info_changes_are_valid(struct drm_dp_tunnel *tunnel,
621 					  const struct drm_dp_tunnel_regs *regs,
622 					  unsigned int flags)
623 {
624 	u8 new_drv_group_id = tunnel_reg_drv_group_id(regs);
625 	bool ret = true;
626 
627 	if (tunnel->bw_alloc_supported != tunnel_reg_bw_alloc_supported(regs)) {
628 		tun_dbg(tunnel,
629 			"BW alloc support has changed %s -> %s\n",
630 			str_yes_no(tunnel->bw_alloc_supported),
631 			str_yes_no(tunnel_reg_bw_alloc_supported(regs)));
632 
633 		ret = false;
634 	}
635 
636 	if (tunnel->group->drv_group_id != new_drv_group_id) {
637 		tun_dbg(tunnel,
638 			"Driver/group ID has changed %d:%d:* -> %d:%d:*\n",
639 			tunnel_group_drv_id(tunnel->group->drv_group_id),
640 			tunnel_group_id(tunnel->group->drv_group_id),
641 			tunnel_group_drv_id(new_drv_group_id),
642 			tunnel_group_id(new_drv_group_id));
643 
644 		ret = false;
645 	}
646 
647 	if (!tunnel->bw_alloc_supported)
648 		return ret;
649 
650 	if (tunnel->bw_granularity != tunnel_reg_bw_granularity(regs)) {
651 		tun_dbg(tunnel,
652 			"BW granularity has changed: %d -> %d Mb/s\n",
653 			DPTUN_BW_ARG(tunnel->bw_granularity),
654 			DPTUN_BW_ARG(tunnel_reg_bw_granularity(regs)));
655 
656 		ret = false;
657 	}
658 
659 	/*
660 	 * On some devices at least the BW alloc mode enabled status is always
661 	 * reported as 0, so skip checking that here.
662 	 */
663 
664 	if (!(flags & ALLOW_ALLOCATED_BW_CHANGE) &&
665 	    tunnel_allocated_bw(tunnel) !=
666 	    tunnel_reg(regs, DP_ALLOCATED_BW) * tunnel->bw_granularity) {
667 		tun_dbg(tunnel,
668 			"Allocated BW has changed: %d -> %d Mb/s\n",
669 			DPTUN_BW_ARG(tunnel->allocated_bw),
670 			DPTUN_BW_ARG(tunnel_reg(regs, DP_ALLOCATED_BW) * tunnel->bw_granularity));
671 
672 		ret = false;
673 	}
674 
675 	return ret;
676 }
677 
678 static int
read_and_verify_tunnel_regs(struct drm_dp_tunnel * tunnel,struct drm_dp_tunnel_regs * regs,unsigned int flags)679 read_and_verify_tunnel_regs(struct drm_dp_tunnel *tunnel,
680 			    struct drm_dp_tunnel_regs *regs,
681 			    unsigned int flags)
682 {
683 	int err;
684 
685 	err = read_tunnel_regs(tunnel->aux, regs);
686 	if (err < 0) {
687 		drm_dp_tunnel_set_io_error(tunnel);
688 
689 		return err;
690 	}
691 
692 	if (!tunnel_regs_are_valid(tunnel->group->mgr, regs, flags))
693 		return -EINVAL;
694 
695 	if (!tunnel_info_changes_are_valid(tunnel, regs, flags))
696 		return -EINVAL;
697 
698 	return 0;
699 }
700 
update_dprx_caps(struct drm_dp_tunnel * tunnel,const struct drm_dp_tunnel_regs * regs)701 static bool update_dprx_caps(struct drm_dp_tunnel *tunnel, const struct drm_dp_tunnel_regs *regs)
702 {
703 	bool changed = false;
704 
705 	if (tunnel_reg_max_dprx_rate(regs) != tunnel->max_dprx_rate) {
706 		tunnel->max_dprx_rate = tunnel_reg_max_dprx_rate(regs);
707 		changed = true;
708 	}
709 
710 	if (tunnel_reg_max_dprx_lane_count(regs) != tunnel->max_dprx_lane_count) {
711 		tunnel->max_dprx_lane_count = tunnel_reg_max_dprx_lane_count(regs);
712 		changed = true;
713 	}
714 
715 	return changed;
716 }
717 
dev_id_len(const u8 * dev_id,int max_len)718 static int dev_id_len(const u8 *dev_id, int max_len)
719 {
720 	while (max_len && dev_id[max_len - 1] == '\0')
721 		max_len--;
722 
723 	return max_len;
724 }
725 
get_max_dprx_bw(const struct drm_dp_tunnel * tunnel)726 static int get_max_dprx_bw(const struct drm_dp_tunnel *tunnel)
727 {
728 	int max_dprx_bw = drm_dp_max_dprx_data_rate(tunnel->max_dprx_rate,
729 						    tunnel->max_dprx_lane_count);
730 
731 	/*
732 	 * A BW request of roundup(max_dprx_bw, tunnel->bw_granularity) results in
733 	 * an allocation of max_dprx_bw. A BW request above this rounded-up
734 	 * value will fail.
735 	 */
736 	return min(roundup(max_dprx_bw, tunnel->bw_granularity),
737 		   MAX_DP_REQUEST_BW * tunnel->bw_granularity);
738 }
739 
get_max_tunnel_bw(const struct drm_dp_tunnel * tunnel)740 static int get_max_tunnel_bw(const struct drm_dp_tunnel *tunnel)
741 {
742 	return min(get_max_dprx_bw(tunnel), tunnel->group->available_bw);
743 }
744 
745 /**
746  * drm_dp_tunnel_detect - Detect DP tunnel on the link
747  * @mgr: Tunnel manager
748  * @aux: DP AUX on which the tunnel will be detected
749  *
750  * Detect if there is any DP tunnel on the link and add it to the tunnel
751  * group's tunnel list.
752  *
753  * Returns a pointer to a tunnel on success, or an ERR_PTR() error on
754  * failure.
755  */
756 struct drm_dp_tunnel *
drm_dp_tunnel_detect(struct drm_dp_tunnel_mgr * mgr,struct drm_dp_aux * aux)757 drm_dp_tunnel_detect(struct drm_dp_tunnel_mgr *mgr,
758 		     struct drm_dp_aux *aux)
759 {
760 	struct drm_dp_tunnel_regs regs;
761 	struct drm_dp_tunnel *tunnel;
762 	int err;
763 
764 	err = read_tunnel_regs(aux, &regs);
765 	if (err)
766 		return ERR_PTR(err);
767 
768 	if (!(tunnel_reg(&regs, DP_TUNNELING_CAPABILITIES) &
769 	      DP_TUNNELING_SUPPORT))
770 		return ERR_PTR(-ENODEV);
771 
772 	/* The DPRX caps are valid only after enabling BW alloc mode. */
773 	if (!tunnel_regs_are_valid(mgr, &regs, SKIP_DPRX_CAPS_CHECK))
774 		return ERR_PTR(-EINVAL);
775 
776 	tunnel = create_tunnel(mgr, aux, &regs);
777 	if (!tunnel)
778 		return ERR_PTR(-ENOMEM);
779 
780 	tun_dbg(tunnel,
781 		"OUI:%*phD DevID:%*pE Rev-HW:%d.%d SW:%d.%d PR-Sup:%s BWA-Sup:%s BWA-En:%s\n",
782 		DP_TUNNELING_OUI_BYTES,
783 			tunnel_reg_ptr(&regs, DP_TUNNELING_OUI),
784 		dev_id_len(tunnel_reg_ptr(&regs, DP_TUNNELING_DEV_ID), DP_TUNNELING_DEV_ID_BYTES),
785 			tunnel_reg_ptr(&regs, DP_TUNNELING_DEV_ID),
786 		(tunnel_reg(&regs, DP_TUNNELING_HW_REV) & DP_TUNNELING_HW_REV_MAJOR_MASK) >>
787 			DP_TUNNELING_HW_REV_MAJOR_SHIFT,
788 		(tunnel_reg(&regs, DP_TUNNELING_HW_REV) & DP_TUNNELING_HW_REV_MINOR_MASK) >>
789 			DP_TUNNELING_HW_REV_MINOR_SHIFT,
790 		tunnel_reg(&regs, DP_TUNNELING_SW_REV_MAJOR),
791 		tunnel_reg(&regs, DP_TUNNELING_SW_REV_MINOR),
792 		str_yes_no(tunnel_reg(&regs, DP_TUNNELING_CAPABILITIES) &
793 			   DP_PANEL_REPLAY_OPTIMIZATION_SUPPORT),
794 		str_yes_no(tunnel->bw_alloc_supported),
795 		str_yes_no(tunnel->bw_alloc_enabled));
796 
797 	return tunnel;
798 }
799 EXPORT_SYMBOL(drm_dp_tunnel_detect);
800 
801 /**
802  * drm_dp_tunnel_destroy - Destroy tunnel object
803  * @tunnel: Tunnel object
804  *
805  * Remove the tunnel from the tunnel topology and destroy it.
806  *
807  * Returns 0 on success, -ENODEV if the tunnel has been destroyed already.
808  */
drm_dp_tunnel_destroy(struct drm_dp_tunnel * tunnel)809 int drm_dp_tunnel_destroy(struct drm_dp_tunnel *tunnel)
810 {
811 	if (!tunnel)
812 		return 0;
813 
814 	if (drm_WARN_ON(tunnel->group->mgr->dev, tunnel->destroyed))
815 		return -ENODEV;
816 
817 	tun_dbg(tunnel, "destroying\n");
818 
819 	tunnel->destroyed = true;
820 	destroy_tunnel(tunnel);
821 
822 	return 0;
823 }
824 EXPORT_SYMBOL(drm_dp_tunnel_destroy);
825 
check_tunnel(const struct drm_dp_tunnel * tunnel)826 static int check_tunnel(const struct drm_dp_tunnel *tunnel)
827 {
828 	if (tunnel->destroyed)
829 		return -ENODEV;
830 
831 	if (tunnel->has_io_error)
832 		return -EIO;
833 
834 	return 0;
835 }
836 
group_allocated_bw(struct drm_dp_tunnel_group * group)837 static int group_allocated_bw(struct drm_dp_tunnel_group *group)
838 {
839 	struct drm_dp_tunnel *tunnel;
840 	int group_allocated_bw = 0;
841 
842 	for_each_tunnel_in_group(group, tunnel) {
843 		if (check_tunnel(tunnel) == 0 &&
844 		    tunnel->bw_alloc_enabled)
845 			group_allocated_bw += tunnel_allocated_bw(tunnel);
846 	}
847 
848 	return group_allocated_bw;
849 }
850 
851 /*
852  * The estimated BW reported by the TBT Connection Manager for each tunnel in
853  * a group includes the BW already allocated for the given tunnel and the
854  * unallocated BW which is free to be used by any tunnel in the group.
855  */
group_free_bw(const struct drm_dp_tunnel * tunnel)856 static int group_free_bw(const struct drm_dp_tunnel *tunnel)
857 {
858 	return tunnel->estimated_bw - tunnel_allocated_bw(tunnel);
859 }
860 
calc_group_available_bw(const struct drm_dp_tunnel * tunnel)861 static int calc_group_available_bw(const struct drm_dp_tunnel *tunnel)
862 {
863 	return group_allocated_bw(tunnel->group) +
864 	       group_free_bw(tunnel);
865 }
866 
update_group_available_bw(struct drm_dp_tunnel * tunnel,const struct drm_dp_tunnel_regs * regs)867 static int update_group_available_bw(struct drm_dp_tunnel *tunnel,
868 				     const struct drm_dp_tunnel_regs *regs)
869 {
870 	struct drm_dp_tunnel *tunnel_iter;
871 	int group_available_bw;
872 	bool changed;
873 
874 	tunnel->estimated_bw = tunnel_reg(regs, DP_ESTIMATED_BW) * tunnel->bw_granularity;
875 
876 	if (calc_group_available_bw(tunnel) == tunnel->group->available_bw)
877 		return 0;
878 
879 	for_each_tunnel_in_group(tunnel->group, tunnel_iter) {
880 		int err;
881 
882 		if (tunnel_iter == tunnel)
883 			continue;
884 
885 		if (check_tunnel(tunnel_iter) != 0 ||
886 		    !tunnel_iter->bw_alloc_enabled)
887 			continue;
888 
889 		err = drm_dp_dpcd_probe(tunnel_iter->aux, DP_DPCD_REV);
890 		if (err) {
891 			tun_dbg(tunnel_iter,
892 				"Probe failed, assume disconnected (err %pe)\n",
893 				ERR_PTR(err));
894 			drm_dp_tunnel_set_io_error(tunnel_iter);
895 		}
896 	}
897 
898 	group_available_bw = calc_group_available_bw(tunnel);
899 
900 	tun_dbg(tunnel, "Updated group available BW: %d->%d\n",
901 		DPTUN_BW_ARG(tunnel->group->available_bw),
902 		DPTUN_BW_ARG(group_available_bw));
903 
904 	changed = tunnel->group->available_bw != group_available_bw;
905 
906 	tunnel->group->available_bw = group_available_bw;
907 
908 	return changed ? 1 : 0;
909 }
910 
set_bw_alloc_mode(struct drm_dp_tunnel * tunnel,bool enable)911 static int set_bw_alloc_mode(struct drm_dp_tunnel *tunnel, bool enable)
912 {
913 	u8 mask = DP_DISPLAY_DRIVER_BW_ALLOCATION_MODE_ENABLE | DP_UNMASK_BW_ALLOCATION_IRQ;
914 	u8 val;
915 
916 	if (drm_dp_dpcd_readb(tunnel->aux, DP_DPTX_BW_ALLOCATION_MODE_CONTROL, &val) < 0)
917 		goto out_err;
918 
919 	if (enable)
920 		val |= mask;
921 	else
922 		val &= ~mask;
923 
924 	if (drm_dp_dpcd_writeb(tunnel->aux, DP_DPTX_BW_ALLOCATION_MODE_CONTROL, val) < 0)
925 		goto out_err;
926 
927 	tunnel->bw_alloc_enabled = enable;
928 
929 	return 0;
930 
931 out_err:
932 	drm_dp_tunnel_set_io_error(tunnel);
933 
934 	return -EIO;
935 }
936 
937 /**
938  * drm_dp_tunnel_enable_bw_alloc - Enable DP tunnel BW allocation mode
939  * @tunnel: Tunnel object
940  *
941  * Enable the DP tunnel BW allocation mode on @tunnel if it supports it.
942  *
943  * Returns 0 in case of success, negative error code otherwise.
944  */
drm_dp_tunnel_enable_bw_alloc(struct drm_dp_tunnel * tunnel)945 int drm_dp_tunnel_enable_bw_alloc(struct drm_dp_tunnel *tunnel)
946 {
947 	struct drm_dp_tunnel_regs regs;
948 	int err;
949 
950 	err = check_tunnel(tunnel);
951 	if (err)
952 		return err;
953 
954 	if (!tunnel->bw_alloc_supported)
955 		return -EOPNOTSUPP;
956 
957 	if (!tunnel_group_id(tunnel->group->drv_group_id))
958 		return -EINVAL;
959 
960 	err = set_bw_alloc_mode(tunnel, true);
961 	if (err)
962 		goto out;
963 
964 	/*
965 	 * After a BWA disable/re-enable sequence the allocated BW can either
966 	 * stay at its last requested value or, for instance after system
967 	 * suspend/resume, TBT CM can reset back the allocation to the amount
968 	 * allocated in the legacy/non-BWA mode. Accordingly allow for the
969 	 * allocation to change wrt. the last SW state.
970 	 */
971 	err = read_and_verify_tunnel_regs(tunnel, &regs,
972 					  ALLOW_ALLOCATED_BW_CHANGE);
973 	if (err) {
974 		set_bw_alloc_mode(tunnel, false);
975 
976 		goto out;
977 	}
978 
979 	if (!tunnel->max_dprx_rate)
980 		update_dprx_caps(tunnel, &regs);
981 
982 	if (tunnel->group->available_bw == -1) {
983 		err = update_group_available_bw(tunnel, &regs);
984 		if (err > 0)
985 			err = 0;
986 	}
987 out:
988 	tun_dbg_stat(tunnel, err,
989 		     "Enabling BW alloc mode: DPRX:%dx%d Group alloc:%d/%d Mb/s",
990 		     tunnel->max_dprx_rate / 100, tunnel->max_dprx_lane_count,
991 		     DPTUN_BW_ARG(group_allocated_bw(tunnel->group)),
992 		     DPTUN_BW_ARG(tunnel->group->available_bw));
993 
994 	return err;
995 }
996 EXPORT_SYMBOL(drm_dp_tunnel_enable_bw_alloc);
997 
998 /**
999  * drm_dp_tunnel_disable_bw_alloc - Disable DP tunnel BW allocation mode
1000  * @tunnel: Tunnel object
1001  *
1002  * Disable the DP tunnel BW allocation mode on @tunnel.
1003  *
1004  * Returns 0 in case of success, negative error code otherwise.
1005  */
drm_dp_tunnel_disable_bw_alloc(struct drm_dp_tunnel * tunnel)1006 int drm_dp_tunnel_disable_bw_alloc(struct drm_dp_tunnel *tunnel)
1007 {
1008 	int err;
1009 
1010 	err = check_tunnel(tunnel);
1011 	if (err)
1012 		return err;
1013 
1014 	tunnel->allocated_bw = -1;
1015 
1016 	err = set_bw_alloc_mode(tunnel, false);
1017 
1018 	tun_dbg_stat(tunnel, err, "Disabling BW alloc mode");
1019 
1020 	return err;
1021 }
1022 EXPORT_SYMBOL(drm_dp_tunnel_disable_bw_alloc);
1023 
1024 /**
1025  * drm_dp_tunnel_bw_alloc_is_enabled - Query the BW allocation mode enabled state
1026  * @tunnel: Tunnel object
1027  *
1028  * Query if the BW allocation mode is enabled for @tunnel.
1029  *
1030  * Returns %true if the BW allocation mode is enabled for @tunnel.
1031  */
drm_dp_tunnel_bw_alloc_is_enabled(const struct drm_dp_tunnel * tunnel)1032 bool drm_dp_tunnel_bw_alloc_is_enabled(const struct drm_dp_tunnel *tunnel)
1033 {
1034 	return tunnel && tunnel->bw_alloc_enabled;
1035 }
1036 EXPORT_SYMBOL(drm_dp_tunnel_bw_alloc_is_enabled);
1037 
clear_bw_req_state(struct drm_dp_aux * aux)1038 static int clear_bw_req_state(struct drm_dp_aux *aux)
1039 {
1040 	u8 bw_req_mask = DP_BW_REQUEST_SUCCEEDED | DP_BW_REQUEST_FAILED;
1041 
1042 	if (drm_dp_dpcd_writeb(aux, DP_TUNNELING_STATUS, bw_req_mask) < 0)
1043 		return -EIO;
1044 
1045 	return 0;
1046 }
1047 
bw_req_complete(struct drm_dp_aux * aux,bool * status_changed)1048 static int bw_req_complete(struct drm_dp_aux *aux, bool *status_changed)
1049 {
1050 	u8 bw_req_mask = DP_BW_REQUEST_SUCCEEDED | DP_BW_REQUEST_FAILED;
1051 	u8 status_change_mask = DP_BW_ALLOCATION_CAPABILITY_CHANGED | DP_ESTIMATED_BW_CHANGED;
1052 	u8 val;
1053 	int err;
1054 
1055 	if (drm_dp_dpcd_readb(aux, DP_TUNNELING_STATUS, &val) < 0)
1056 		return -EIO;
1057 
1058 	*status_changed = val & status_change_mask;
1059 
1060 	val &= bw_req_mask;
1061 
1062 	if (!val)
1063 		return -EAGAIN;
1064 
1065 	err = clear_bw_req_state(aux);
1066 	if (err < 0)
1067 		return err;
1068 
1069 	return val == DP_BW_REQUEST_SUCCEEDED ? 0 : -ENOSPC;
1070 }
1071 
allocate_tunnel_bw(struct drm_dp_tunnel * tunnel,int bw)1072 static int allocate_tunnel_bw(struct drm_dp_tunnel *tunnel, int bw)
1073 {
1074 	struct drm_dp_tunnel_mgr *mgr = tunnel->group->mgr;
1075 	int request_bw = DIV_ROUND_UP(bw, tunnel->bw_granularity);
1076 	DEFINE_WAIT_FUNC(wait, woken_wake_function);
1077 	long timeout;
1078 	int err;
1079 
1080 	if (bw < 0) {
1081 		err = -EINVAL;
1082 		goto out;
1083 	}
1084 
1085 	if (request_bw * tunnel->bw_granularity == tunnel->allocated_bw)
1086 		return 0;
1087 
1088 	/* Atomic check should prevent the following. */
1089 	if (drm_WARN_ON(mgr->dev, request_bw > MAX_DP_REQUEST_BW)) {
1090 		err = -EINVAL;
1091 		goto out;
1092 	}
1093 
1094 	err = clear_bw_req_state(tunnel->aux);
1095 	if (err)
1096 		goto out;
1097 
1098 	if (drm_dp_dpcd_writeb(tunnel->aux, DP_REQUEST_BW, request_bw) < 0) {
1099 		err = -EIO;
1100 		goto out;
1101 	}
1102 
1103 	timeout = msecs_to_jiffies(3000);
1104 	add_wait_queue(&mgr->bw_req_queue, &wait);
1105 
1106 	for (;;) {
1107 		bool status_changed;
1108 
1109 		err = bw_req_complete(tunnel->aux, &status_changed);
1110 		if (err != -EAGAIN)
1111 			break;
1112 
1113 		if (status_changed) {
1114 			struct drm_dp_tunnel_regs regs;
1115 
1116 			err = read_and_verify_tunnel_regs(tunnel, &regs,
1117 							  ALLOW_ALLOCATED_BW_CHANGE);
1118 			if (err)
1119 				break;
1120 		}
1121 
1122 		if (!timeout) {
1123 			err = -ETIMEDOUT;
1124 			break;
1125 		}
1126 
1127 		timeout = wait_woken(&wait, TASK_UNINTERRUPTIBLE, timeout);
1128 	};
1129 
1130 	remove_wait_queue(&mgr->bw_req_queue, &wait);
1131 
1132 	if (err)
1133 		goto out;
1134 
1135 	tunnel->allocated_bw = request_bw * tunnel->bw_granularity;
1136 
1137 out:
1138 	tun_dbg_stat(tunnel, err, "Allocating %d/%d Mb/s for tunnel: Group alloc:%d/%d Mb/s",
1139 		     DPTUN_BW_ARG(request_bw * tunnel->bw_granularity),
1140 		     DPTUN_BW_ARG(get_max_tunnel_bw(tunnel)),
1141 		     DPTUN_BW_ARG(group_allocated_bw(tunnel->group)),
1142 		     DPTUN_BW_ARG(tunnel->group->available_bw));
1143 
1144 	if (err == -EIO)
1145 		drm_dp_tunnel_set_io_error(tunnel);
1146 
1147 	return err;
1148 }
1149 
1150 /**
1151  * drm_dp_tunnel_alloc_bw - Allocate BW for a DP tunnel
1152  * @tunnel: Tunnel object
1153  * @bw: BW in kB/s units
1154  *
1155  * Allocate @bw kB/s for @tunnel. The allocated BW must be freed after use by
1156  * calling this function for the same tunnel setting @bw to 0.
1157  *
1158  * Returns 0 in case of success, a negative error code otherwise.
1159  */
drm_dp_tunnel_alloc_bw(struct drm_dp_tunnel * tunnel,int bw)1160 int drm_dp_tunnel_alloc_bw(struct drm_dp_tunnel *tunnel, int bw)
1161 {
1162 	int err;
1163 
1164 	err = check_tunnel(tunnel);
1165 	if (err)
1166 		return err;
1167 
1168 	return allocate_tunnel_bw(tunnel, bw);
1169 }
1170 EXPORT_SYMBOL(drm_dp_tunnel_alloc_bw);
1171 
1172 /**
1173  * drm_dp_tunnel_get_allocated_bw - Get the BW allocated for a DP tunnel
1174  * @tunnel: Tunnel object
1175  *
1176  * Get the current BW allocated for @tunnel. After the tunnel is created /
1177  * resumed and the BW allocation mode is enabled for it, the allocation
1178  * becomes determined only after the first allocation request by the driver
1179  * calling drm_dp_tunnel_alloc_bw().
1180  *
1181  * Return the BW allocated for the tunnel, or -1 if the allocation is
1182  * undetermined.
1183  */
drm_dp_tunnel_get_allocated_bw(struct drm_dp_tunnel * tunnel)1184 int drm_dp_tunnel_get_allocated_bw(struct drm_dp_tunnel *tunnel)
1185 {
1186 	return tunnel->allocated_bw;
1187 }
1188 EXPORT_SYMBOL(drm_dp_tunnel_get_allocated_bw);
1189 
1190 /*
1191  * Return 0 if the status hasn't changed, 1 if the status has changed, a
1192  * negative error code in case of an I/O failure.
1193  */
check_and_clear_status_change(struct drm_dp_tunnel * tunnel)1194 static int check_and_clear_status_change(struct drm_dp_tunnel *tunnel)
1195 {
1196 	u8 mask = DP_BW_ALLOCATION_CAPABILITY_CHANGED | DP_ESTIMATED_BW_CHANGED;
1197 	u8 val;
1198 
1199 	if (drm_dp_dpcd_readb(tunnel->aux, DP_TUNNELING_STATUS, &val) < 0)
1200 		goto out_err;
1201 
1202 	val &= mask;
1203 
1204 	if (val) {
1205 		if (drm_dp_dpcd_writeb(tunnel->aux, DP_TUNNELING_STATUS, val) < 0)
1206 			goto out_err;
1207 
1208 		return 1;
1209 	}
1210 
1211 	if (!drm_dp_tunnel_bw_alloc_is_enabled(tunnel))
1212 		return 0;
1213 
1214 	/*
1215 	 * Check for estimated BW changes explicitly to account for lost
1216 	 * BW change notifications.
1217 	 */
1218 	if (drm_dp_dpcd_readb(tunnel->aux, DP_ESTIMATED_BW, &val) < 0)
1219 		goto out_err;
1220 
1221 	if (val * tunnel->bw_granularity != tunnel->estimated_bw)
1222 		return 1;
1223 
1224 	return 0;
1225 
1226 out_err:
1227 	drm_dp_tunnel_set_io_error(tunnel);
1228 
1229 	return -EIO;
1230 }
1231 
1232 /**
1233  * drm_dp_tunnel_update_state - Update DP tunnel SW state with the HW state
1234  * @tunnel: Tunnel object
1235  *
1236  * Update the SW state of @tunnel with the HW state.
1237  *
1238  * Returns 0 if the state has not changed, 1 if it has changed and got updated
1239  * successfully and a negative error code otherwise.
1240  */
drm_dp_tunnel_update_state(struct drm_dp_tunnel * tunnel)1241 int drm_dp_tunnel_update_state(struct drm_dp_tunnel *tunnel)
1242 {
1243 	struct drm_dp_tunnel_regs regs;
1244 	bool changed = false;
1245 	int ret;
1246 
1247 	ret = check_tunnel(tunnel);
1248 	if (ret < 0)
1249 		return ret;
1250 
1251 	ret = check_and_clear_status_change(tunnel);
1252 	if (ret < 0)
1253 		goto out;
1254 
1255 	if (!ret)
1256 		return 0;
1257 
1258 	ret = read_and_verify_tunnel_regs(tunnel, &regs, 0);
1259 	if (ret)
1260 		goto out;
1261 
1262 	if (update_dprx_caps(tunnel, &regs))
1263 		changed = true;
1264 
1265 	ret = update_group_available_bw(tunnel, &regs);
1266 	if (ret == 1)
1267 		changed = true;
1268 
1269 out:
1270 	tun_dbg_stat(tunnel, ret < 0 ? ret : 0,
1271 		     "State update: Changed:%s DPRX:%dx%d Tunnel alloc:%d/%d Group alloc:%d/%d Mb/s",
1272 		     str_yes_no(changed),
1273 		     tunnel->max_dprx_rate / 100, tunnel->max_dprx_lane_count,
1274 		     DPTUN_BW_ARG(tunnel->allocated_bw),
1275 		     DPTUN_BW_ARG(get_max_tunnel_bw(tunnel)),
1276 		     DPTUN_BW_ARG(group_allocated_bw(tunnel->group)),
1277 		     DPTUN_BW_ARG(tunnel->group->available_bw));
1278 
1279 	if (ret < 0)
1280 		return ret;
1281 
1282 	if (changed)
1283 		return 1;
1284 
1285 	return 0;
1286 }
1287 EXPORT_SYMBOL(drm_dp_tunnel_update_state);
1288 
1289 /*
1290  * drm_dp_tunnel_handle_irq - Handle DP tunnel IRQs
1291  *
1292  * Handle any pending DP tunnel IRQs, waking up waiters for a completion
1293  * event.
1294  *
1295  * Returns 1 if the state of the tunnel has changed which requires calling
1296  * drm_dp_tunnel_update_state(), a negative error code in case of a failure,
1297  * 0 otherwise.
1298  */
drm_dp_tunnel_handle_irq(struct drm_dp_tunnel_mgr * mgr,struct drm_dp_aux * aux)1299 int drm_dp_tunnel_handle_irq(struct drm_dp_tunnel_mgr *mgr, struct drm_dp_aux *aux)
1300 {
1301 	u8 val;
1302 
1303 	if (drm_dp_dpcd_readb(aux, DP_TUNNELING_STATUS, &val) < 0)
1304 		return -EIO;
1305 
1306 	if (val & (DP_BW_REQUEST_SUCCEEDED | DP_BW_REQUEST_FAILED))
1307 		wake_up_all(&mgr->bw_req_queue);
1308 
1309 	if (val & (DP_BW_ALLOCATION_CAPABILITY_CHANGED | DP_ESTIMATED_BW_CHANGED))
1310 		return 1;
1311 
1312 	return 0;
1313 }
1314 EXPORT_SYMBOL(drm_dp_tunnel_handle_irq);
1315 
1316 /**
1317  * drm_dp_tunnel_max_dprx_rate - Query the maximum rate of the tunnel's DPRX
1318  * @tunnel: Tunnel object
1319  *
1320  * The function is used to query the maximum link rate of the DPRX connected
1321  * to @tunnel. Note that this rate will not be limited by the BW limit of the
1322  * tunnel, as opposed to the standard and extended DP_MAX_LINK_RATE DPCD
1323  * registers.
1324  *
1325  * Returns the maximum link rate in 10 kbit/s units.
1326  */
drm_dp_tunnel_max_dprx_rate(const struct drm_dp_tunnel * tunnel)1327 int drm_dp_tunnel_max_dprx_rate(const struct drm_dp_tunnel *tunnel)
1328 {
1329 	return tunnel->max_dprx_rate;
1330 }
1331 EXPORT_SYMBOL(drm_dp_tunnel_max_dprx_rate);
1332 
1333 /**
1334  * drm_dp_tunnel_max_dprx_lane_count - Query the maximum lane count of the tunnel's DPRX
1335  * @tunnel: Tunnel object
1336  *
1337  * The function is used to query the maximum lane count of the DPRX connected
1338  * to @tunnel. Note that this lane count will not be limited by the BW limit of
1339  * the tunnel, as opposed to the standard and extended DP_MAX_LANE_COUNT DPCD
1340  * registers.
1341  *
1342  * Returns the maximum lane count.
1343  */
drm_dp_tunnel_max_dprx_lane_count(const struct drm_dp_tunnel * tunnel)1344 int drm_dp_tunnel_max_dprx_lane_count(const struct drm_dp_tunnel *tunnel)
1345 {
1346 	return tunnel->max_dprx_lane_count;
1347 }
1348 EXPORT_SYMBOL(drm_dp_tunnel_max_dprx_lane_count);
1349 
1350 /**
1351  * drm_dp_tunnel_available_bw - Query the estimated total available BW of the tunnel
1352  * @tunnel: Tunnel object
1353  *
1354  * This function is used to query the estimated total available BW of the
1355  * tunnel. This includes the currently allocated and free BW for all the
1356  * tunnels in @tunnel's group. The available BW is valid only after the BW
1357  * allocation mode has been enabled for the tunnel and its state got updated
1358  * calling drm_dp_tunnel_update_state().
1359  *
1360  * Returns the @tunnel group's estimated total available bandwidth in kB/s
1361  * units, or -1 if the available BW isn't valid (the BW allocation mode is
1362  * not enabled or the tunnel's state hasn't been updated).
1363  */
drm_dp_tunnel_available_bw(const struct drm_dp_tunnel * tunnel)1364 int drm_dp_tunnel_available_bw(const struct drm_dp_tunnel *tunnel)
1365 {
1366 	return tunnel->group->available_bw;
1367 }
1368 EXPORT_SYMBOL(drm_dp_tunnel_available_bw);
1369 
1370 static struct drm_dp_tunnel_group_state *
drm_dp_tunnel_atomic_get_group_state(struct drm_atomic_state * state,const struct drm_dp_tunnel * tunnel)1371 drm_dp_tunnel_atomic_get_group_state(struct drm_atomic_state *state,
1372 				     const struct drm_dp_tunnel *tunnel)
1373 {
1374 	return (struct drm_dp_tunnel_group_state *)
1375 		drm_atomic_get_private_obj_state(state,
1376 						 &tunnel->group->base);
1377 }
1378 
1379 static struct drm_dp_tunnel_state *
add_tunnel_state(struct drm_dp_tunnel_group_state * group_state,struct drm_dp_tunnel * tunnel)1380 add_tunnel_state(struct drm_dp_tunnel_group_state *group_state,
1381 		 struct drm_dp_tunnel *tunnel)
1382 {
1383 	struct drm_dp_tunnel_state *tunnel_state;
1384 
1385 	tun_dbg_atomic(tunnel,
1386 		       "Adding state for tunnel %p to group state %p\n",
1387 		       tunnel, group_state);
1388 
1389 	tunnel_state = kzalloc(sizeof(*tunnel_state), GFP_KERNEL);
1390 	if (!tunnel_state)
1391 		return NULL;
1392 
1393 	tunnel_state->group_state = group_state;
1394 
1395 	drm_dp_tunnel_ref_get(tunnel, &tunnel_state->tunnel_ref);
1396 
1397 	INIT_LIST_HEAD(&tunnel_state->node);
1398 	list_add(&tunnel_state->node, &group_state->tunnel_states);
1399 
1400 	return tunnel_state;
1401 }
1402 
free_tunnel_state(struct drm_dp_tunnel_state * tunnel_state)1403 static void free_tunnel_state(struct drm_dp_tunnel_state *tunnel_state)
1404 {
1405 	tun_dbg_atomic(tunnel_state->tunnel_ref.tunnel,
1406 		       "Freeing state for tunnel %p\n",
1407 		       tunnel_state->tunnel_ref.tunnel);
1408 
1409 	list_del(&tunnel_state->node);
1410 
1411 	kfree(tunnel_state->stream_bw);
1412 	drm_dp_tunnel_ref_put(&tunnel_state->tunnel_ref);
1413 
1414 	kfree(tunnel_state);
1415 }
1416 
free_group_state(struct drm_dp_tunnel_group_state * group_state)1417 static void free_group_state(struct drm_dp_tunnel_group_state *group_state)
1418 {
1419 	struct drm_dp_tunnel_state *tunnel_state;
1420 	struct drm_dp_tunnel_state *tunnel_state_tmp;
1421 
1422 	for_each_tunnel_state_safe(group_state, tunnel_state, tunnel_state_tmp)
1423 		free_tunnel_state(tunnel_state);
1424 
1425 	kfree(group_state);
1426 }
1427 
1428 static struct drm_dp_tunnel_state *
get_tunnel_state(struct drm_dp_tunnel_group_state * group_state,const struct drm_dp_tunnel * tunnel)1429 get_tunnel_state(struct drm_dp_tunnel_group_state *group_state,
1430 		 const struct drm_dp_tunnel *tunnel)
1431 {
1432 	struct drm_dp_tunnel_state *tunnel_state;
1433 
1434 	for_each_tunnel_state(group_state, tunnel_state)
1435 		if (tunnel_state->tunnel_ref.tunnel == tunnel)
1436 			return tunnel_state;
1437 
1438 	return NULL;
1439 }
1440 
1441 static struct drm_dp_tunnel_state *
get_or_add_tunnel_state(struct drm_dp_tunnel_group_state * group_state,struct drm_dp_tunnel * tunnel)1442 get_or_add_tunnel_state(struct drm_dp_tunnel_group_state *group_state,
1443 			struct drm_dp_tunnel *tunnel)
1444 {
1445 	struct drm_dp_tunnel_state *tunnel_state;
1446 
1447 	tunnel_state = get_tunnel_state(group_state, tunnel);
1448 	if (tunnel_state)
1449 		return tunnel_state;
1450 
1451 	return add_tunnel_state(group_state, tunnel);
1452 }
1453 
1454 static struct drm_private_state *
tunnel_group_duplicate_state(struct drm_private_obj * obj)1455 tunnel_group_duplicate_state(struct drm_private_obj *obj)
1456 {
1457 	struct drm_dp_tunnel_group_state *group_state;
1458 	struct drm_dp_tunnel_state *tunnel_state;
1459 
1460 	group_state = kzalloc(sizeof(*group_state), GFP_KERNEL);
1461 	if (!group_state)
1462 		return NULL;
1463 
1464 	INIT_LIST_HEAD(&group_state->tunnel_states);
1465 
1466 	__drm_atomic_helper_private_obj_duplicate_state(obj, &group_state->base);
1467 
1468 	for_each_tunnel_state(to_group_state(obj->state), tunnel_state) {
1469 		struct drm_dp_tunnel_state *new_tunnel_state;
1470 
1471 		new_tunnel_state = get_or_add_tunnel_state(group_state,
1472 							   tunnel_state->tunnel_ref.tunnel);
1473 		if (!new_tunnel_state)
1474 			goto out_free_state;
1475 
1476 		new_tunnel_state->stream_mask = tunnel_state->stream_mask;
1477 		new_tunnel_state->stream_bw = kmemdup(tunnel_state->stream_bw,
1478 						      sizeof(*tunnel_state->stream_bw) *
1479 							hweight32(tunnel_state->stream_mask),
1480 						      GFP_KERNEL);
1481 
1482 		if (!new_tunnel_state->stream_bw)
1483 			goto out_free_state;
1484 	}
1485 
1486 	return &group_state->base;
1487 
1488 out_free_state:
1489 	free_group_state(group_state);
1490 
1491 	return NULL;
1492 }
1493 
tunnel_group_destroy_state(struct drm_private_obj * obj,struct drm_private_state * state)1494 static void tunnel_group_destroy_state(struct drm_private_obj *obj, struct drm_private_state *state)
1495 {
1496 	free_group_state(to_group_state(state));
1497 }
1498 
1499 static const struct drm_private_state_funcs tunnel_group_funcs = {
1500 	.atomic_duplicate_state = tunnel_group_duplicate_state,
1501 	.atomic_destroy_state = tunnel_group_destroy_state,
1502 };
1503 
1504 /**
1505  * drm_dp_tunnel_atomic_get_state - get/allocate the new atomic state for a tunnel
1506  * @state: Atomic state
1507  * @tunnel: Tunnel to get the state for
1508  *
1509  * Get the new atomic state for @tunnel, duplicating it from the old tunnel
1510  * state if not yet allocated.
1511  *
1512  * Return the state or an ERR_PTR() error on failure.
1513  */
1514 struct drm_dp_tunnel_state *
drm_dp_tunnel_atomic_get_state(struct drm_atomic_state * state,struct drm_dp_tunnel * tunnel)1515 drm_dp_tunnel_atomic_get_state(struct drm_atomic_state *state,
1516 			       struct drm_dp_tunnel *tunnel)
1517 {
1518 	struct drm_dp_tunnel_group_state *group_state;
1519 	struct drm_dp_tunnel_state *tunnel_state;
1520 
1521 	group_state = drm_dp_tunnel_atomic_get_group_state(state, tunnel);
1522 	if (IS_ERR(group_state))
1523 		return ERR_CAST(group_state);
1524 
1525 	tunnel_state = get_or_add_tunnel_state(group_state, tunnel);
1526 	if (!tunnel_state)
1527 		return ERR_PTR(-ENOMEM);
1528 
1529 	return tunnel_state;
1530 }
1531 EXPORT_SYMBOL(drm_dp_tunnel_atomic_get_state);
1532 
1533 /**
1534  * drm_dp_tunnel_atomic_get_old_state - get the old atomic state for a tunnel
1535  * @state: Atomic state
1536  * @tunnel: Tunnel to get the state for
1537  *
1538  * Get the old atomic state for @tunnel.
1539  *
1540  * Return the old state or NULL if the tunnel's atomic state is not in @state.
1541  */
1542 struct drm_dp_tunnel_state *
drm_dp_tunnel_atomic_get_old_state(struct drm_atomic_state * state,const struct drm_dp_tunnel * tunnel)1543 drm_dp_tunnel_atomic_get_old_state(struct drm_atomic_state *state,
1544 				   const struct drm_dp_tunnel *tunnel)
1545 {
1546 	struct drm_dp_tunnel_group_state *old_group_state;
1547 	int i;
1548 
1549 	for_each_old_group_in_state(state, old_group_state, i)
1550 		if (to_group(old_group_state->base.obj) == tunnel->group)
1551 			return get_tunnel_state(old_group_state, tunnel);
1552 
1553 	return NULL;
1554 }
1555 EXPORT_SYMBOL(drm_dp_tunnel_atomic_get_old_state);
1556 
1557 /**
1558  * drm_dp_tunnel_atomic_get_new_state - get the new atomic state for a tunnel
1559  * @state: Atomic state
1560  * @tunnel: Tunnel to get the state for
1561  *
1562  * Get the new atomic state for @tunnel.
1563  *
1564  * Return the new state or NULL if the tunnel's atomic state is not in @state.
1565  */
1566 struct drm_dp_tunnel_state *
drm_dp_tunnel_atomic_get_new_state(struct drm_atomic_state * state,const struct drm_dp_tunnel * tunnel)1567 drm_dp_tunnel_atomic_get_new_state(struct drm_atomic_state *state,
1568 				   const struct drm_dp_tunnel *tunnel)
1569 {
1570 	struct drm_dp_tunnel_group_state *new_group_state;
1571 	int i;
1572 
1573 	for_each_new_group_in_state(state, new_group_state, i)
1574 		if (to_group(new_group_state->base.obj) == tunnel->group)
1575 			return get_tunnel_state(new_group_state, tunnel);
1576 
1577 	return NULL;
1578 }
1579 EXPORT_SYMBOL(drm_dp_tunnel_atomic_get_new_state);
1580 
init_group(struct drm_dp_tunnel_mgr * mgr,struct drm_dp_tunnel_group * group)1581 static bool init_group(struct drm_dp_tunnel_mgr *mgr, struct drm_dp_tunnel_group *group)
1582 {
1583 	struct drm_dp_tunnel_group_state *group_state;
1584 
1585 	group_state = kzalloc(sizeof(*group_state), GFP_KERNEL);
1586 	if (!group_state)
1587 		return false;
1588 
1589 	INIT_LIST_HEAD(&group_state->tunnel_states);
1590 
1591 	group->mgr = mgr;
1592 	group->available_bw = -1;
1593 	INIT_LIST_HEAD(&group->tunnels);
1594 
1595 	drm_atomic_private_obj_init(mgr->dev, &group->base, &group_state->base,
1596 				    &tunnel_group_funcs);
1597 
1598 	return true;
1599 }
1600 
cleanup_group(struct drm_dp_tunnel_group * group)1601 static void cleanup_group(struct drm_dp_tunnel_group *group)
1602 {
1603 	drm_atomic_private_obj_fini(&group->base);
1604 }
1605 
1606 #ifdef CONFIG_DRM_DISPLAY_DP_TUNNEL_STATE_DEBUG
check_unique_stream_ids(const struct drm_dp_tunnel_group_state * group_state)1607 static void check_unique_stream_ids(const struct drm_dp_tunnel_group_state *group_state)
1608 {
1609 	const struct drm_dp_tunnel_state *tunnel_state;
1610 	u32 stream_mask = 0;
1611 
1612 	for_each_tunnel_state(group_state, tunnel_state) {
1613 		drm_WARN(to_group(group_state->base.obj)->mgr->dev,
1614 			 tunnel_state->stream_mask & stream_mask,
1615 			 "[DPTUN %s]: conflicting stream IDs %x (IDs in other tunnels %x)\n",
1616 			 tunnel_state->tunnel_ref.tunnel->name,
1617 			 tunnel_state->stream_mask,
1618 			 stream_mask);
1619 
1620 		stream_mask |= tunnel_state->stream_mask;
1621 	}
1622 }
1623 #else
check_unique_stream_ids(const struct drm_dp_tunnel_group_state * group_state)1624 static void check_unique_stream_ids(const struct drm_dp_tunnel_group_state *group_state)
1625 {
1626 }
1627 #endif
1628 
stream_id_to_idx(u32 stream_mask,u8 stream_id)1629 static int stream_id_to_idx(u32 stream_mask, u8 stream_id)
1630 {
1631 	return hweight32(stream_mask & (BIT(stream_id) - 1));
1632 }
1633 
resize_bw_array(struct drm_dp_tunnel_state * tunnel_state,unsigned long old_mask,unsigned long new_mask)1634 static int resize_bw_array(struct drm_dp_tunnel_state *tunnel_state,
1635 			   unsigned long old_mask, unsigned long new_mask)
1636 {
1637 	unsigned long move_mask = old_mask & new_mask;
1638 	int *new_bws = NULL;
1639 	int id;
1640 
1641 	WARN_ON(!new_mask);
1642 
1643 	if (old_mask == new_mask)
1644 		return 0;
1645 
1646 	new_bws = kcalloc(hweight32(new_mask), sizeof(*new_bws), GFP_KERNEL);
1647 	if (!new_bws)
1648 		return -ENOMEM;
1649 
1650 	for_each_set_bit(id, &move_mask, BITS_PER_TYPE(move_mask))
1651 		new_bws[stream_id_to_idx(new_mask, id)] =
1652 			tunnel_state->stream_bw[stream_id_to_idx(old_mask, id)];
1653 
1654 	kfree(tunnel_state->stream_bw);
1655 	tunnel_state->stream_bw = new_bws;
1656 	tunnel_state->stream_mask = new_mask;
1657 
1658 	return 0;
1659 }
1660 
set_stream_bw(struct drm_dp_tunnel_state * tunnel_state,u8 stream_id,int bw)1661 static int set_stream_bw(struct drm_dp_tunnel_state *tunnel_state,
1662 			 u8 stream_id, int bw)
1663 {
1664 	int err;
1665 
1666 	err = resize_bw_array(tunnel_state,
1667 			      tunnel_state->stream_mask,
1668 			      tunnel_state->stream_mask | BIT(stream_id));
1669 	if (err)
1670 		return err;
1671 
1672 	tunnel_state->stream_bw[stream_id_to_idx(tunnel_state->stream_mask, stream_id)] = bw;
1673 
1674 	return 0;
1675 }
1676 
clear_stream_bw(struct drm_dp_tunnel_state * tunnel_state,u8 stream_id)1677 static int clear_stream_bw(struct drm_dp_tunnel_state *tunnel_state,
1678 			   u8 stream_id)
1679 {
1680 	if (!(tunnel_state->stream_mask & ~BIT(stream_id))) {
1681 		free_tunnel_state(tunnel_state);
1682 		return 0;
1683 	}
1684 
1685 	return resize_bw_array(tunnel_state,
1686 			       tunnel_state->stream_mask,
1687 			       tunnel_state->stream_mask & ~BIT(stream_id));
1688 }
1689 
1690 /**
1691  * drm_dp_tunnel_atomic_set_stream_bw - Set the BW for a DP tunnel stream
1692  * @state: Atomic state
1693  * @tunnel: DP tunnel containing the stream
1694  * @stream_id: Stream ID
1695  * @bw: BW of the stream
1696  *
1697  * Set a DP tunnel stream's required BW in the atomic state.
1698  *
1699  * Returns 0 in case of success, a negative error code otherwise.
1700  */
drm_dp_tunnel_atomic_set_stream_bw(struct drm_atomic_state * state,struct drm_dp_tunnel * tunnel,u8 stream_id,int bw)1701 int drm_dp_tunnel_atomic_set_stream_bw(struct drm_atomic_state *state,
1702 				       struct drm_dp_tunnel *tunnel,
1703 				       u8 stream_id, int bw)
1704 {
1705 	struct drm_dp_tunnel_group_state *new_group_state;
1706 	struct drm_dp_tunnel_state *tunnel_state;
1707 	int err;
1708 
1709 	if (drm_WARN_ON(tunnel->group->mgr->dev,
1710 			stream_id > BITS_PER_TYPE(tunnel_state->stream_mask)))
1711 		return -EINVAL;
1712 
1713 	tun_dbg(tunnel,
1714 		"Setting %d Mb/s for stream %d\n",
1715 		DPTUN_BW_ARG(bw), stream_id);
1716 
1717 	new_group_state = drm_dp_tunnel_atomic_get_group_state(state, tunnel);
1718 	if (IS_ERR(new_group_state))
1719 		return PTR_ERR(new_group_state);
1720 
1721 	if (bw == 0) {
1722 		tunnel_state = get_tunnel_state(new_group_state, tunnel);
1723 		if (!tunnel_state)
1724 			return 0;
1725 
1726 		return clear_stream_bw(tunnel_state, stream_id);
1727 	}
1728 
1729 	tunnel_state = get_or_add_tunnel_state(new_group_state, tunnel);
1730 	if (drm_WARN_ON(state->dev, !tunnel_state))
1731 		return -EINVAL;
1732 
1733 	err = set_stream_bw(tunnel_state, stream_id, bw);
1734 	if (err)
1735 		return err;
1736 
1737 	check_unique_stream_ids(new_group_state);
1738 
1739 	return 0;
1740 }
1741 EXPORT_SYMBOL(drm_dp_tunnel_atomic_set_stream_bw);
1742 
1743 /**
1744  * drm_dp_tunnel_atomic_get_required_bw - Get the BW required by a DP tunnel
1745  * @tunnel_state: Atomic state of the queried tunnel
1746  *
1747  * Calculate the BW required by a tunnel adding up the required BW of all
1748  * the streams in the tunnel.
1749  *
1750  * Return the total BW required by the tunnel.
1751  */
drm_dp_tunnel_atomic_get_required_bw(const struct drm_dp_tunnel_state * tunnel_state)1752 int drm_dp_tunnel_atomic_get_required_bw(const struct drm_dp_tunnel_state *tunnel_state)
1753 {
1754 	int tunnel_bw = 0;
1755 	int i;
1756 
1757 	if (!tunnel_state || !tunnel_state->stream_mask)
1758 		return 0;
1759 
1760 	for (i = 0; i < hweight32(tunnel_state->stream_mask); i++)
1761 		tunnel_bw += tunnel_state->stream_bw[i];
1762 
1763 	return tunnel_bw;
1764 }
1765 EXPORT_SYMBOL(drm_dp_tunnel_atomic_get_required_bw);
1766 
1767 /**
1768  * drm_dp_tunnel_atomic_get_group_streams_in_state - Get mask of stream IDs in a group
1769  * @state: Atomic state
1770  * @tunnel: Tunnel object
1771  * @stream_mask: Mask of streams in @tunnel's group
1772  *
1773  * Get the mask of all the stream IDs in the tunnel group of @tunnel.
1774  *
1775  * Return 0 in case of success - with the stream IDs in @stream_mask - or a
1776  * negative error code in case of failure.
1777  */
drm_dp_tunnel_atomic_get_group_streams_in_state(struct drm_atomic_state * state,const struct drm_dp_tunnel * tunnel,u32 * stream_mask)1778 int drm_dp_tunnel_atomic_get_group_streams_in_state(struct drm_atomic_state *state,
1779 						    const struct drm_dp_tunnel *tunnel,
1780 						    u32 *stream_mask)
1781 {
1782 	struct drm_dp_tunnel_group_state *group_state;
1783 	struct drm_dp_tunnel_state *tunnel_state;
1784 
1785 	group_state = drm_dp_tunnel_atomic_get_group_state(state, tunnel);
1786 	if (IS_ERR(group_state))
1787 		return PTR_ERR(group_state);
1788 
1789 	*stream_mask = 0;
1790 	for_each_tunnel_state(group_state, tunnel_state)
1791 		*stream_mask |= tunnel_state->stream_mask;
1792 
1793 	return 0;
1794 }
1795 EXPORT_SYMBOL(drm_dp_tunnel_atomic_get_group_streams_in_state);
1796 
1797 static int
drm_dp_tunnel_atomic_check_group_bw(struct drm_dp_tunnel_group_state * new_group_state,u32 * failed_stream_mask)1798 drm_dp_tunnel_atomic_check_group_bw(struct drm_dp_tunnel_group_state *new_group_state,
1799 				    u32 *failed_stream_mask)
1800 {
1801 	struct drm_dp_tunnel_group *group = to_group(new_group_state->base.obj);
1802 	struct drm_dp_tunnel_state *new_tunnel_state;
1803 	u32 group_stream_mask = 0;
1804 	int group_bw = 0;
1805 
1806 	for_each_tunnel_state(new_group_state, new_tunnel_state) {
1807 		struct drm_dp_tunnel *tunnel = new_tunnel_state->tunnel_ref.tunnel;
1808 		int max_dprx_bw = get_max_dprx_bw(tunnel);
1809 		int tunnel_bw = drm_dp_tunnel_atomic_get_required_bw(new_tunnel_state);
1810 
1811 		tun_dbg(tunnel,
1812 			"%sRequired %d/%d Mb/s total for tunnel.\n",
1813 			tunnel_bw > max_dprx_bw ? "Not enough BW: " : "",
1814 			DPTUN_BW_ARG(tunnel_bw),
1815 			DPTUN_BW_ARG(max_dprx_bw));
1816 
1817 		if (tunnel_bw > max_dprx_bw) {
1818 			*failed_stream_mask = new_tunnel_state->stream_mask;
1819 			return -ENOSPC;
1820 		}
1821 
1822 		group_bw += min(roundup(tunnel_bw, tunnel->bw_granularity),
1823 				max_dprx_bw);
1824 		group_stream_mask |= new_tunnel_state->stream_mask;
1825 	}
1826 
1827 	tun_grp_dbg(group,
1828 		    "%sRequired %d/%d Mb/s total for tunnel group.\n",
1829 		    group_bw > group->available_bw ? "Not enough BW: " : "",
1830 		    DPTUN_BW_ARG(group_bw),
1831 		    DPTUN_BW_ARG(group->available_bw));
1832 
1833 	if (group_bw > group->available_bw) {
1834 		*failed_stream_mask = group_stream_mask;
1835 		return -ENOSPC;
1836 	}
1837 
1838 	return 0;
1839 }
1840 
1841 /**
1842  * drm_dp_tunnel_atomic_check_stream_bws - Check BW limit for all streams in state
1843  * @state: Atomic state
1844  * @failed_stream_mask: Mask of stream IDs with a BW limit failure
1845  *
1846  * Check the required BW of each DP tunnel in @state against both the DPRX BW
1847  * limit of the tunnel and the BW limit of the tunnel group. Return a mask of
1848  * stream IDs in @failed_stream_mask once a check fails. The mask will contain
1849  * either all the streams in a tunnel (in case a DPRX BW limit check failed) or
1850  * all the streams in a tunnel group (in case a group BW limit check failed).
1851  *
1852  * Return 0 if all the BW limit checks passed, -ENOSPC in case a BW limit
1853  * check failed - with @failed_stream_mask containing the streams failing the
1854  * check - or a negative error code otherwise.
1855  */
drm_dp_tunnel_atomic_check_stream_bws(struct drm_atomic_state * state,u32 * failed_stream_mask)1856 int drm_dp_tunnel_atomic_check_stream_bws(struct drm_atomic_state *state,
1857 					  u32 *failed_stream_mask)
1858 {
1859 	struct drm_dp_tunnel_group_state *new_group_state;
1860 	int i;
1861 
1862 	for_each_new_group_in_state(state, new_group_state, i) {
1863 		int ret;
1864 
1865 		ret = drm_dp_tunnel_atomic_check_group_bw(new_group_state,
1866 							  failed_stream_mask);
1867 		if (ret)
1868 			return ret;
1869 	}
1870 
1871 	return 0;
1872 }
1873 EXPORT_SYMBOL(drm_dp_tunnel_atomic_check_stream_bws);
1874 
destroy_mgr(struct drm_dp_tunnel_mgr * mgr)1875 static void destroy_mgr(struct drm_dp_tunnel_mgr *mgr)
1876 {
1877 	int i;
1878 
1879 	for (i = 0; i < mgr->group_count; i++) {
1880 		cleanup_group(&mgr->groups[i]);
1881 		drm_WARN_ON(mgr->dev, !list_empty(&mgr->groups[i].tunnels));
1882 	}
1883 
1884 #ifdef CONFIG_DRM_DISPLAY_DP_TUNNEL_STATE_DEBUG
1885 	ref_tracker_dir_exit(&mgr->ref_tracker);
1886 #endif
1887 
1888 	kfree(mgr->groups);
1889 	kfree(mgr);
1890 }
1891 
1892 /**
1893  * drm_dp_tunnel_mgr_create - Create a DP tunnel manager
1894  * @dev: DRM device object
1895  * @max_group_count: Maximum number of tunnel groups
1896  *
1897  * Creates a DP tunnel manager for @dev.
1898  *
1899  * Returns a pointer to the tunnel manager if created successfully or NULL in
1900  * case of an error.
1901  */
1902 struct drm_dp_tunnel_mgr *
drm_dp_tunnel_mgr_create(struct drm_device * dev,int max_group_count)1903 drm_dp_tunnel_mgr_create(struct drm_device *dev, int max_group_count)
1904 {
1905 	struct drm_dp_tunnel_mgr *mgr;
1906 	int i;
1907 
1908 	mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
1909 	if (!mgr)
1910 		return NULL;
1911 
1912 	mgr->dev = dev;
1913 	init_waitqueue_head(&mgr->bw_req_queue);
1914 
1915 	mgr->groups = kcalloc(max_group_count, sizeof(*mgr->groups), GFP_KERNEL);
1916 	if (!mgr->groups) {
1917 		kfree(mgr);
1918 
1919 		return NULL;
1920 	}
1921 
1922 #ifdef CONFIG_DRM_DISPLAY_DP_TUNNEL_STATE_DEBUG
1923 	ref_tracker_dir_init(&mgr->ref_tracker, 16, "dptun");
1924 #endif
1925 
1926 	for (i = 0; i < max_group_count; i++) {
1927 		if (!init_group(mgr, &mgr->groups[i])) {
1928 			destroy_mgr(mgr);
1929 
1930 			return NULL;
1931 		}
1932 
1933 		mgr->group_count++;
1934 	}
1935 
1936 	return mgr;
1937 }
1938 EXPORT_SYMBOL(drm_dp_tunnel_mgr_create);
1939 
1940 /**
1941  * drm_dp_tunnel_mgr_destroy - Destroy DP tunnel manager
1942  * @mgr: Tunnel manager object
1943  *
1944  * Destroy the tunnel manager.
1945  */
drm_dp_tunnel_mgr_destroy(struct drm_dp_tunnel_mgr * mgr)1946 void drm_dp_tunnel_mgr_destroy(struct drm_dp_tunnel_mgr *mgr)
1947 {
1948 	destroy_mgr(mgr);
1949 }
1950 EXPORT_SYMBOL(drm_dp_tunnel_mgr_destroy);
1951