xref: /linux/drivers/gpu/drm/display/drm_dp_mst_topology.c (revision 90d32e92011eaae8e70a9169b4e7acf4ca8f9d3a)
1 /*
2  * Copyright © 2014 Red Hat
3  *
4  * Permission to use, copy, modify, distribute, and sell this software and its
5  * documentation for any purpose is hereby granted without fee, provided that
6  * the above copyright notice appear in all copies and that both that copyright
7  * notice and this permission notice appear in supporting documentation, and
8  * that the name of the copyright holders not be used in advertising or
9  * publicity pertaining to distribution of the software without specific,
10  * written prior permission.  The copyright holders make no representations
11  * about the suitability of this software for any purpose.  It is provided "as
12  * is" without express or implied warranty.
13  *
14  * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
15  * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
16  * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
17  * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
18  * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
19  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
20  * OF THIS SOFTWARE.
21  */
22 
23 #include <linux/bitfield.h>
24 #include <linux/delay.h>
25 #include <linux/errno.h>
26 #include <linux/i2c.h>
27 #include <linux/init.h>
28 #include <linux/kernel.h>
29 #include <linux/random.h>
30 #include <linux/sched.h>
31 #include <linux/seq_file.h>
32 #include <linux/iopoll.h>
33 
34 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
35 #include <linux/stacktrace.h>
36 #include <linux/sort.h>
37 #include <linux/timekeeping.h>
38 #include <linux/math64.h>
39 #endif
40 
41 #include <drm/display/drm_dp_mst_helper.h>
42 #include <drm/drm_atomic.h>
43 #include <drm/drm_atomic_helper.h>
44 #include <drm/drm_drv.h>
45 #include <drm/drm_edid.h>
46 #include <drm/drm_fixed.h>
47 #include <drm/drm_print.h>
48 #include <drm/drm_probe_helper.h>
49 
50 #include "drm_dp_helper_internal.h"
51 #include "drm_dp_mst_topology_internal.h"
52 
53 /**
54  * DOC: dp mst helper
55  *
56  * These functions contain parts of the DisplayPort 1.2a MultiStream Transport
57  * protocol. The helpers contain a topology manager and bandwidth manager.
58  * The helpers encapsulate the sending and received of sideband msgs.
59  */
60 struct drm_dp_pending_up_req {
61 	struct drm_dp_sideband_msg_hdr hdr;
62 	struct drm_dp_sideband_msg_req_body msg;
63 	struct list_head next;
64 };
65 
66 static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
67 				  char *buf);
68 
69 static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port);
70 
71 static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
72 				     int id, u8 start_slot, u8 num_slots);
73 
74 static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
75 				 struct drm_dp_mst_port *port,
76 				 int offset, int size, u8 *bytes);
77 static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
78 				  struct drm_dp_mst_port *port,
79 				  int offset, int size, u8 *bytes);
80 
81 static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
82 				    struct drm_dp_mst_branch *mstb);
83 
84 static void
85 drm_dp_send_clear_payload_id_table(struct drm_dp_mst_topology_mgr *mgr,
86 				   struct drm_dp_mst_branch *mstb);
87 
88 static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
89 					   struct drm_dp_mst_branch *mstb,
90 					   struct drm_dp_mst_port *port);
91 static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
92 				 u8 *guid);
93 
94 static int drm_dp_mst_register_i2c_bus(struct drm_dp_mst_port *port);
95 static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_mst_port *port);
96 static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr);
97 
98 static bool drm_dp_mst_port_downstream_of_branch(struct drm_dp_mst_port *port,
99 						 struct drm_dp_mst_branch *branch);
100 
101 #define DBG_PREFIX "[dp_mst]"
102 
103 #define DP_STR(x) [DP_ ## x] = #x
104 
105 static const char *drm_dp_mst_req_type_str(u8 req_type)
106 {
107 	static const char * const req_type_str[] = {
108 		DP_STR(GET_MSG_TRANSACTION_VERSION),
109 		DP_STR(LINK_ADDRESS),
110 		DP_STR(CONNECTION_STATUS_NOTIFY),
111 		DP_STR(ENUM_PATH_RESOURCES),
112 		DP_STR(ALLOCATE_PAYLOAD),
113 		DP_STR(QUERY_PAYLOAD),
114 		DP_STR(RESOURCE_STATUS_NOTIFY),
115 		DP_STR(CLEAR_PAYLOAD_ID_TABLE),
116 		DP_STR(REMOTE_DPCD_READ),
117 		DP_STR(REMOTE_DPCD_WRITE),
118 		DP_STR(REMOTE_I2C_READ),
119 		DP_STR(REMOTE_I2C_WRITE),
120 		DP_STR(POWER_UP_PHY),
121 		DP_STR(POWER_DOWN_PHY),
122 		DP_STR(SINK_EVENT_NOTIFY),
123 		DP_STR(QUERY_STREAM_ENC_STATUS),
124 	};
125 
126 	if (req_type >= ARRAY_SIZE(req_type_str) ||
127 	    !req_type_str[req_type])
128 		return "unknown";
129 
130 	return req_type_str[req_type];
131 }
132 
133 #undef DP_STR
134 #define DP_STR(x) [DP_NAK_ ## x] = #x
135 
136 static const char *drm_dp_mst_nak_reason_str(u8 nak_reason)
137 {
138 	static const char * const nak_reason_str[] = {
139 		DP_STR(WRITE_FAILURE),
140 		DP_STR(INVALID_READ),
141 		DP_STR(CRC_FAILURE),
142 		DP_STR(BAD_PARAM),
143 		DP_STR(DEFER),
144 		DP_STR(LINK_FAILURE),
145 		DP_STR(NO_RESOURCES),
146 		DP_STR(DPCD_FAIL),
147 		DP_STR(I2C_NAK),
148 		DP_STR(ALLOCATE_FAIL),
149 	};
150 
151 	if (nak_reason >= ARRAY_SIZE(nak_reason_str) ||
152 	    !nak_reason_str[nak_reason])
153 		return "unknown";
154 
155 	return nak_reason_str[nak_reason];
156 }
157 
158 #undef DP_STR
159 #define DP_STR(x) [DRM_DP_SIDEBAND_TX_ ## x] = #x
160 
161 static const char *drm_dp_mst_sideband_tx_state_str(int state)
162 {
163 	static const char * const sideband_reason_str[] = {
164 		DP_STR(QUEUED),
165 		DP_STR(START_SEND),
166 		DP_STR(SENT),
167 		DP_STR(RX),
168 		DP_STR(TIMEOUT),
169 	};
170 
171 	if (state >= ARRAY_SIZE(sideband_reason_str) ||
172 	    !sideband_reason_str[state])
173 		return "unknown";
174 
175 	return sideband_reason_str[state];
176 }
177 
178 static int
179 drm_dp_mst_rad_to_str(const u8 rad[8], u8 lct, char *out, size_t len)
180 {
181 	int i;
182 	u8 unpacked_rad[16];
183 
184 	for (i = 0; i < lct; i++) {
185 		if (i % 2)
186 			unpacked_rad[i] = rad[i / 2] >> 4;
187 		else
188 			unpacked_rad[i] = rad[i / 2] & BIT_MASK(4);
189 	}
190 
191 	/* TODO: Eventually add something to printk so we can format the rad
192 	 * like this: 1.2.3
193 	 */
194 	return snprintf(out, len, "%*phC", lct, unpacked_rad);
195 }
196 
197 /* sideband msg handling */
198 static u8 drm_dp_msg_header_crc4(const uint8_t *data, size_t num_nibbles)
199 {
200 	u8 bitmask = 0x80;
201 	u8 bitshift = 7;
202 	u8 array_index = 0;
203 	int number_of_bits = num_nibbles * 4;
204 	u8 remainder = 0;
205 
206 	while (number_of_bits != 0) {
207 		number_of_bits--;
208 		remainder <<= 1;
209 		remainder |= (data[array_index] & bitmask) >> bitshift;
210 		bitmask >>= 1;
211 		bitshift--;
212 		if (bitmask == 0) {
213 			bitmask = 0x80;
214 			bitshift = 7;
215 			array_index++;
216 		}
217 		if ((remainder & 0x10) == 0x10)
218 			remainder ^= 0x13;
219 	}
220 
221 	number_of_bits = 4;
222 	while (number_of_bits != 0) {
223 		number_of_bits--;
224 		remainder <<= 1;
225 		if ((remainder & 0x10) != 0)
226 			remainder ^= 0x13;
227 	}
228 
229 	return remainder;
230 }
231 
232 static u8 drm_dp_msg_data_crc4(const uint8_t *data, u8 number_of_bytes)
233 {
234 	u8 bitmask = 0x80;
235 	u8 bitshift = 7;
236 	u8 array_index = 0;
237 	int number_of_bits = number_of_bytes * 8;
238 	u16 remainder = 0;
239 
240 	while (number_of_bits != 0) {
241 		number_of_bits--;
242 		remainder <<= 1;
243 		remainder |= (data[array_index] & bitmask) >> bitshift;
244 		bitmask >>= 1;
245 		bitshift--;
246 		if (bitmask == 0) {
247 			bitmask = 0x80;
248 			bitshift = 7;
249 			array_index++;
250 		}
251 		if ((remainder & 0x100) == 0x100)
252 			remainder ^= 0xd5;
253 	}
254 
255 	number_of_bits = 8;
256 	while (number_of_bits != 0) {
257 		number_of_bits--;
258 		remainder <<= 1;
259 		if ((remainder & 0x100) != 0)
260 			remainder ^= 0xd5;
261 	}
262 
263 	return remainder & 0xff;
264 }
265 static inline u8 drm_dp_calc_sb_hdr_size(struct drm_dp_sideband_msg_hdr *hdr)
266 {
267 	u8 size = 3;
268 
269 	size += (hdr->lct / 2);
270 	return size;
271 }
272 
273 static void drm_dp_encode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
274 					   u8 *buf, int *len)
275 {
276 	int idx = 0;
277 	int i;
278 	u8 crc4;
279 
280 	buf[idx++] = ((hdr->lct & 0xf) << 4) | (hdr->lcr & 0xf);
281 	for (i = 0; i < (hdr->lct / 2); i++)
282 		buf[idx++] = hdr->rad[i];
283 	buf[idx++] = (hdr->broadcast << 7) | (hdr->path_msg << 6) |
284 		(hdr->msg_len & 0x3f);
285 	buf[idx++] = (hdr->somt << 7) | (hdr->eomt << 6) | (hdr->seqno << 4);
286 
287 	crc4 = drm_dp_msg_header_crc4(buf, (idx * 2) - 1);
288 	buf[idx - 1] |= (crc4 & 0xf);
289 
290 	*len = idx;
291 }
292 
293 static bool drm_dp_decode_sideband_msg_hdr(const struct drm_dp_mst_topology_mgr *mgr,
294 					   struct drm_dp_sideband_msg_hdr *hdr,
295 					   u8 *buf, int buflen, u8 *hdrlen)
296 {
297 	u8 crc4;
298 	u8 len;
299 	int i;
300 	u8 idx;
301 
302 	if (buf[0] == 0)
303 		return false;
304 	len = 3;
305 	len += ((buf[0] & 0xf0) >> 4) / 2;
306 	if (len > buflen)
307 		return false;
308 	crc4 = drm_dp_msg_header_crc4(buf, (len * 2) - 1);
309 
310 	if ((crc4 & 0xf) != (buf[len - 1] & 0xf)) {
311 		drm_dbg_kms(mgr->dev, "crc4 mismatch 0x%x 0x%x\n", crc4, buf[len - 1]);
312 		return false;
313 	}
314 
315 	hdr->lct = (buf[0] & 0xf0) >> 4;
316 	hdr->lcr = (buf[0] & 0xf);
317 	idx = 1;
318 	for (i = 0; i < (hdr->lct / 2); i++)
319 		hdr->rad[i] = buf[idx++];
320 	hdr->broadcast = (buf[idx] >> 7) & 0x1;
321 	hdr->path_msg = (buf[idx] >> 6) & 0x1;
322 	hdr->msg_len = buf[idx] & 0x3f;
323 	idx++;
324 	hdr->somt = (buf[idx] >> 7) & 0x1;
325 	hdr->eomt = (buf[idx] >> 6) & 0x1;
326 	hdr->seqno = (buf[idx] >> 4) & 0x1;
327 	idx++;
328 	*hdrlen = idx;
329 	return true;
330 }
331 
332 void
333 drm_dp_encode_sideband_req(const struct drm_dp_sideband_msg_req_body *req,
334 			   struct drm_dp_sideband_msg_tx *raw)
335 {
336 	int idx = 0;
337 	int i;
338 	u8 *buf = raw->msg;
339 
340 	buf[idx++] = req->req_type & 0x7f;
341 
342 	switch (req->req_type) {
343 	case DP_ENUM_PATH_RESOURCES:
344 	case DP_POWER_DOWN_PHY:
345 	case DP_POWER_UP_PHY:
346 		buf[idx] = (req->u.port_num.port_number & 0xf) << 4;
347 		idx++;
348 		break;
349 	case DP_ALLOCATE_PAYLOAD:
350 		buf[idx] = (req->u.allocate_payload.port_number & 0xf) << 4 |
351 			(req->u.allocate_payload.number_sdp_streams & 0xf);
352 		idx++;
353 		buf[idx] = (req->u.allocate_payload.vcpi & 0x7f);
354 		idx++;
355 		buf[idx] = (req->u.allocate_payload.pbn >> 8);
356 		idx++;
357 		buf[idx] = (req->u.allocate_payload.pbn & 0xff);
358 		idx++;
359 		for (i = 0; i < req->u.allocate_payload.number_sdp_streams / 2; i++) {
360 			buf[idx] = ((req->u.allocate_payload.sdp_stream_sink[i * 2] & 0xf) << 4) |
361 				(req->u.allocate_payload.sdp_stream_sink[i * 2 + 1] & 0xf);
362 			idx++;
363 		}
364 		if (req->u.allocate_payload.number_sdp_streams & 1) {
365 			i = req->u.allocate_payload.number_sdp_streams - 1;
366 			buf[idx] = (req->u.allocate_payload.sdp_stream_sink[i] & 0xf) << 4;
367 			idx++;
368 		}
369 		break;
370 	case DP_QUERY_PAYLOAD:
371 		buf[idx] = (req->u.query_payload.port_number & 0xf) << 4;
372 		idx++;
373 		buf[idx] = (req->u.query_payload.vcpi & 0x7f);
374 		idx++;
375 		break;
376 	case DP_REMOTE_DPCD_READ:
377 		buf[idx] = (req->u.dpcd_read.port_number & 0xf) << 4;
378 		buf[idx] |= ((req->u.dpcd_read.dpcd_address & 0xf0000) >> 16) & 0xf;
379 		idx++;
380 		buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff00) >> 8;
381 		idx++;
382 		buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff);
383 		idx++;
384 		buf[idx] = (req->u.dpcd_read.num_bytes);
385 		idx++;
386 		break;
387 
388 	case DP_REMOTE_DPCD_WRITE:
389 		buf[idx] = (req->u.dpcd_write.port_number & 0xf) << 4;
390 		buf[idx] |= ((req->u.dpcd_write.dpcd_address & 0xf0000) >> 16) & 0xf;
391 		idx++;
392 		buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff00) >> 8;
393 		idx++;
394 		buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff);
395 		idx++;
396 		buf[idx] = (req->u.dpcd_write.num_bytes);
397 		idx++;
398 		memcpy(&buf[idx], req->u.dpcd_write.bytes, req->u.dpcd_write.num_bytes);
399 		idx += req->u.dpcd_write.num_bytes;
400 		break;
401 	case DP_REMOTE_I2C_READ:
402 		buf[idx] = (req->u.i2c_read.port_number & 0xf) << 4;
403 		buf[idx] |= (req->u.i2c_read.num_transactions & 0x3);
404 		idx++;
405 		for (i = 0; i < (req->u.i2c_read.num_transactions & 0x3); i++) {
406 			buf[idx] = req->u.i2c_read.transactions[i].i2c_dev_id & 0x7f;
407 			idx++;
408 			buf[idx] = req->u.i2c_read.transactions[i].num_bytes;
409 			idx++;
410 			memcpy(&buf[idx], req->u.i2c_read.transactions[i].bytes, req->u.i2c_read.transactions[i].num_bytes);
411 			idx += req->u.i2c_read.transactions[i].num_bytes;
412 
413 			buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 4;
414 			buf[idx] |= (req->u.i2c_read.transactions[i].i2c_transaction_delay & 0xf);
415 			idx++;
416 		}
417 		buf[idx] = (req->u.i2c_read.read_i2c_device_id) & 0x7f;
418 		idx++;
419 		buf[idx] = (req->u.i2c_read.num_bytes_read);
420 		idx++;
421 		break;
422 
423 	case DP_REMOTE_I2C_WRITE:
424 		buf[idx] = (req->u.i2c_write.port_number & 0xf) << 4;
425 		idx++;
426 		buf[idx] = (req->u.i2c_write.write_i2c_device_id) & 0x7f;
427 		idx++;
428 		buf[idx] = (req->u.i2c_write.num_bytes);
429 		idx++;
430 		memcpy(&buf[idx], req->u.i2c_write.bytes, req->u.i2c_write.num_bytes);
431 		idx += req->u.i2c_write.num_bytes;
432 		break;
433 	case DP_QUERY_STREAM_ENC_STATUS: {
434 		const struct drm_dp_query_stream_enc_status *msg;
435 
436 		msg = &req->u.enc_status;
437 		buf[idx] = msg->stream_id;
438 		idx++;
439 		memcpy(&buf[idx], msg->client_id, sizeof(msg->client_id));
440 		idx += sizeof(msg->client_id);
441 		buf[idx] = 0;
442 		buf[idx] |= FIELD_PREP(GENMASK(1, 0), msg->stream_event);
443 		buf[idx] |= msg->valid_stream_event ? BIT(2) : 0;
444 		buf[idx] |= FIELD_PREP(GENMASK(4, 3), msg->stream_behavior);
445 		buf[idx] |= msg->valid_stream_behavior ? BIT(5) : 0;
446 		idx++;
447 		}
448 		break;
449 	}
450 	raw->cur_len = idx;
451 }
452 EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_encode_sideband_req);
453 
454 /* Decode a sideband request we've encoded, mainly used for debugging */
455 int
456 drm_dp_decode_sideband_req(const struct drm_dp_sideband_msg_tx *raw,
457 			   struct drm_dp_sideband_msg_req_body *req)
458 {
459 	const u8 *buf = raw->msg;
460 	int i, idx = 0;
461 
462 	req->req_type = buf[idx++] & 0x7f;
463 	switch (req->req_type) {
464 	case DP_ENUM_PATH_RESOURCES:
465 	case DP_POWER_DOWN_PHY:
466 	case DP_POWER_UP_PHY:
467 		req->u.port_num.port_number = (buf[idx] >> 4) & 0xf;
468 		break;
469 	case DP_ALLOCATE_PAYLOAD:
470 		{
471 			struct drm_dp_allocate_payload *a =
472 				&req->u.allocate_payload;
473 
474 			a->number_sdp_streams = buf[idx] & 0xf;
475 			a->port_number = (buf[idx] >> 4) & 0xf;
476 
477 			WARN_ON(buf[++idx] & 0x80);
478 			a->vcpi = buf[idx] & 0x7f;
479 
480 			a->pbn = buf[++idx] << 8;
481 			a->pbn |= buf[++idx];
482 
483 			idx++;
484 			for (i = 0; i < a->number_sdp_streams; i++) {
485 				a->sdp_stream_sink[i] =
486 					(buf[idx + (i / 2)] >> ((i % 2) ? 0 : 4)) & 0xf;
487 			}
488 		}
489 		break;
490 	case DP_QUERY_PAYLOAD:
491 		req->u.query_payload.port_number = (buf[idx] >> 4) & 0xf;
492 		WARN_ON(buf[++idx] & 0x80);
493 		req->u.query_payload.vcpi = buf[idx] & 0x7f;
494 		break;
495 	case DP_REMOTE_DPCD_READ:
496 		{
497 			struct drm_dp_remote_dpcd_read *r = &req->u.dpcd_read;
498 
499 			r->port_number = (buf[idx] >> 4) & 0xf;
500 
501 			r->dpcd_address = (buf[idx] << 16) & 0xf0000;
502 			r->dpcd_address |= (buf[++idx] << 8) & 0xff00;
503 			r->dpcd_address |= buf[++idx] & 0xff;
504 
505 			r->num_bytes = buf[++idx];
506 		}
507 		break;
508 	case DP_REMOTE_DPCD_WRITE:
509 		{
510 			struct drm_dp_remote_dpcd_write *w =
511 				&req->u.dpcd_write;
512 
513 			w->port_number = (buf[idx] >> 4) & 0xf;
514 
515 			w->dpcd_address = (buf[idx] << 16) & 0xf0000;
516 			w->dpcd_address |= (buf[++idx] << 8) & 0xff00;
517 			w->dpcd_address |= buf[++idx] & 0xff;
518 
519 			w->num_bytes = buf[++idx];
520 
521 			w->bytes = kmemdup(&buf[++idx], w->num_bytes,
522 					   GFP_KERNEL);
523 			if (!w->bytes)
524 				return -ENOMEM;
525 		}
526 		break;
527 	case DP_REMOTE_I2C_READ:
528 		{
529 			struct drm_dp_remote_i2c_read *r = &req->u.i2c_read;
530 			struct drm_dp_remote_i2c_read_tx *tx;
531 			bool failed = false;
532 
533 			r->num_transactions = buf[idx] & 0x3;
534 			r->port_number = (buf[idx] >> 4) & 0xf;
535 			for (i = 0; i < r->num_transactions; i++) {
536 				tx = &r->transactions[i];
537 
538 				tx->i2c_dev_id = buf[++idx] & 0x7f;
539 				tx->num_bytes = buf[++idx];
540 				tx->bytes = kmemdup(&buf[++idx],
541 						    tx->num_bytes,
542 						    GFP_KERNEL);
543 				if (!tx->bytes) {
544 					failed = true;
545 					break;
546 				}
547 				idx += tx->num_bytes;
548 				tx->no_stop_bit = (buf[idx] >> 5) & 0x1;
549 				tx->i2c_transaction_delay = buf[idx] & 0xf;
550 			}
551 
552 			if (failed) {
553 				for (i = 0; i < r->num_transactions; i++) {
554 					tx = &r->transactions[i];
555 					kfree(tx->bytes);
556 				}
557 				return -ENOMEM;
558 			}
559 
560 			r->read_i2c_device_id = buf[++idx] & 0x7f;
561 			r->num_bytes_read = buf[++idx];
562 		}
563 		break;
564 	case DP_REMOTE_I2C_WRITE:
565 		{
566 			struct drm_dp_remote_i2c_write *w = &req->u.i2c_write;
567 
568 			w->port_number = (buf[idx] >> 4) & 0xf;
569 			w->write_i2c_device_id = buf[++idx] & 0x7f;
570 			w->num_bytes = buf[++idx];
571 			w->bytes = kmemdup(&buf[++idx], w->num_bytes,
572 					   GFP_KERNEL);
573 			if (!w->bytes)
574 				return -ENOMEM;
575 		}
576 		break;
577 	case DP_QUERY_STREAM_ENC_STATUS:
578 		req->u.enc_status.stream_id = buf[idx++];
579 		for (i = 0; i < sizeof(req->u.enc_status.client_id); i++)
580 			req->u.enc_status.client_id[i] = buf[idx++];
581 
582 		req->u.enc_status.stream_event = FIELD_GET(GENMASK(1, 0),
583 							   buf[idx]);
584 		req->u.enc_status.valid_stream_event = FIELD_GET(BIT(2),
585 								 buf[idx]);
586 		req->u.enc_status.stream_behavior = FIELD_GET(GENMASK(4, 3),
587 							      buf[idx]);
588 		req->u.enc_status.valid_stream_behavior = FIELD_GET(BIT(5),
589 								    buf[idx]);
590 		break;
591 	}
592 
593 	return 0;
594 }
595 EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_decode_sideband_req);
596 
597 void
598 drm_dp_dump_sideband_msg_req_body(const struct drm_dp_sideband_msg_req_body *req,
599 				  int indent, struct drm_printer *printer)
600 {
601 	int i;
602 
603 #define P(f, ...) drm_printf_indent(printer, indent, f, ##__VA_ARGS__)
604 	if (req->req_type == DP_LINK_ADDRESS) {
605 		/* No contents to print */
606 		P("type=%s\n", drm_dp_mst_req_type_str(req->req_type));
607 		return;
608 	}
609 
610 	P("type=%s contents:\n", drm_dp_mst_req_type_str(req->req_type));
611 	indent++;
612 
613 	switch (req->req_type) {
614 	case DP_ENUM_PATH_RESOURCES:
615 	case DP_POWER_DOWN_PHY:
616 	case DP_POWER_UP_PHY:
617 		P("port=%d\n", req->u.port_num.port_number);
618 		break;
619 	case DP_ALLOCATE_PAYLOAD:
620 		P("port=%d vcpi=%d pbn=%d sdp_streams=%d %*ph\n",
621 		  req->u.allocate_payload.port_number,
622 		  req->u.allocate_payload.vcpi, req->u.allocate_payload.pbn,
623 		  req->u.allocate_payload.number_sdp_streams,
624 		  req->u.allocate_payload.number_sdp_streams,
625 		  req->u.allocate_payload.sdp_stream_sink);
626 		break;
627 	case DP_QUERY_PAYLOAD:
628 		P("port=%d vcpi=%d\n",
629 		  req->u.query_payload.port_number,
630 		  req->u.query_payload.vcpi);
631 		break;
632 	case DP_REMOTE_DPCD_READ:
633 		P("port=%d dpcd_addr=%05x len=%d\n",
634 		  req->u.dpcd_read.port_number, req->u.dpcd_read.dpcd_address,
635 		  req->u.dpcd_read.num_bytes);
636 		break;
637 	case DP_REMOTE_DPCD_WRITE:
638 		P("port=%d addr=%05x len=%d: %*ph\n",
639 		  req->u.dpcd_write.port_number,
640 		  req->u.dpcd_write.dpcd_address,
641 		  req->u.dpcd_write.num_bytes, req->u.dpcd_write.num_bytes,
642 		  req->u.dpcd_write.bytes);
643 		break;
644 	case DP_REMOTE_I2C_READ:
645 		P("port=%d num_tx=%d id=%d size=%d:\n",
646 		  req->u.i2c_read.port_number,
647 		  req->u.i2c_read.num_transactions,
648 		  req->u.i2c_read.read_i2c_device_id,
649 		  req->u.i2c_read.num_bytes_read);
650 
651 		indent++;
652 		for (i = 0; i < req->u.i2c_read.num_transactions; i++) {
653 			const struct drm_dp_remote_i2c_read_tx *rtx =
654 				&req->u.i2c_read.transactions[i];
655 
656 			P("%d: id=%03d size=%03d no_stop_bit=%d tx_delay=%03d: %*ph\n",
657 			  i, rtx->i2c_dev_id, rtx->num_bytes,
658 			  rtx->no_stop_bit, rtx->i2c_transaction_delay,
659 			  rtx->num_bytes, rtx->bytes);
660 		}
661 		break;
662 	case DP_REMOTE_I2C_WRITE:
663 		P("port=%d id=%d size=%d: %*ph\n",
664 		  req->u.i2c_write.port_number,
665 		  req->u.i2c_write.write_i2c_device_id,
666 		  req->u.i2c_write.num_bytes, req->u.i2c_write.num_bytes,
667 		  req->u.i2c_write.bytes);
668 		break;
669 	case DP_QUERY_STREAM_ENC_STATUS:
670 		P("stream_id=%u client_id=%*ph stream_event=%x "
671 		  "valid_event=%d stream_behavior=%x valid_behavior=%d",
672 		  req->u.enc_status.stream_id,
673 		  (int)ARRAY_SIZE(req->u.enc_status.client_id),
674 		  req->u.enc_status.client_id, req->u.enc_status.stream_event,
675 		  req->u.enc_status.valid_stream_event,
676 		  req->u.enc_status.stream_behavior,
677 		  req->u.enc_status.valid_stream_behavior);
678 		break;
679 	default:
680 		P("???\n");
681 		break;
682 	}
683 #undef P
684 }
685 EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_dump_sideband_msg_req_body);
686 
687 static inline void
688 drm_dp_mst_dump_sideband_msg_tx(struct drm_printer *p,
689 				const struct drm_dp_sideband_msg_tx *txmsg)
690 {
691 	struct drm_dp_sideband_msg_req_body req;
692 	char buf[64];
693 	int ret;
694 	int i;
695 
696 	drm_dp_mst_rad_to_str(txmsg->dst->rad, txmsg->dst->lct, buf,
697 			      sizeof(buf));
698 	drm_printf(p, "txmsg cur_offset=%x cur_len=%x seqno=%x state=%s path_msg=%d dst=%s\n",
699 		   txmsg->cur_offset, txmsg->cur_len, txmsg->seqno,
700 		   drm_dp_mst_sideband_tx_state_str(txmsg->state),
701 		   txmsg->path_msg, buf);
702 
703 	ret = drm_dp_decode_sideband_req(txmsg, &req);
704 	if (ret) {
705 		drm_printf(p, "<failed to decode sideband req: %d>\n", ret);
706 		return;
707 	}
708 	drm_dp_dump_sideband_msg_req_body(&req, 1, p);
709 
710 	switch (req.req_type) {
711 	case DP_REMOTE_DPCD_WRITE:
712 		kfree(req.u.dpcd_write.bytes);
713 		break;
714 	case DP_REMOTE_I2C_READ:
715 		for (i = 0; i < req.u.i2c_read.num_transactions; i++)
716 			kfree(req.u.i2c_read.transactions[i].bytes);
717 		break;
718 	case DP_REMOTE_I2C_WRITE:
719 		kfree(req.u.i2c_write.bytes);
720 		break;
721 	}
722 }
723 
724 static void drm_dp_crc_sideband_chunk_req(u8 *msg, u8 len)
725 {
726 	u8 crc4;
727 
728 	crc4 = drm_dp_msg_data_crc4(msg, len);
729 	msg[len] = crc4;
730 }
731 
732 static void drm_dp_encode_sideband_reply(struct drm_dp_sideband_msg_reply_body *rep,
733 					 struct drm_dp_sideband_msg_tx *raw)
734 {
735 	int idx = 0;
736 	u8 *buf = raw->msg;
737 
738 	buf[idx++] = (rep->reply_type & 0x1) << 7 | (rep->req_type & 0x7f);
739 
740 	raw->cur_len = idx;
741 }
742 
743 static int drm_dp_sideband_msg_set_header(struct drm_dp_sideband_msg_rx *msg,
744 					  struct drm_dp_sideband_msg_hdr *hdr,
745 					  u8 hdrlen)
746 {
747 	/*
748 	 * ignore out-of-order messages or messages that are part of a
749 	 * failed transaction
750 	 */
751 	if (!hdr->somt && !msg->have_somt)
752 		return false;
753 
754 	/* get length contained in this portion */
755 	msg->curchunk_idx = 0;
756 	msg->curchunk_len = hdr->msg_len;
757 	msg->curchunk_hdrlen = hdrlen;
758 
759 	/* we have already gotten an somt - don't bother parsing */
760 	if (hdr->somt && msg->have_somt)
761 		return false;
762 
763 	if (hdr->somt) {
764 		memcpy(&msg->initial_hdr, hdr,
765 		       sizeof(struct drm_dp_sideband_msg_hdr));
766 		msg->have_somt = true;
767 	}
768 	if (hdr->eomt)
769 		msg->have_eomt = true;
770 
771 	return true;
772 }
773 
774 /* this adds a chunk of msg to the builder to get the final msg */
775 static bool drm_dp_sideband_append_payload(struct drm_dp_sideband_msg_rx *msg,
776 					   u8 *replybuf, u8 replybuflen)
777 {
778 	u8 crc4;
779 
780 	memcpy(&msg->chunk[msg->curchunk_idx], replybuf, replybuflen);
781 	msg->curchunk_idx += replybuflen;
782 
783 	if (msg->curchunk_idx >= msg->curchunk_len) {
784 		/* do CRC */
785 		crc4 = drm_dp_msg_data_crc4(msg->chunk, msg->curchunk_len - 1);
786 		if (crc4 != msg->chunk[msg->curchunk_len - 1])
787 			print_hex_dump(KERN_DEBUG, "wrong crc",
788 				       DUMP_PREFIX_NONE, 16, 1,
789 				       msg->chunk,  msg->curchunk_len, false);
790 		/* copy chunk into bigger msg */
791 		memcpy(&msg->msg[msg->curlen], msg->chunk, msg->curchunk_len - 1);
792 		msg->curlen += msg->curchunk_len - 1;
793 	}
794 	return true;
795 }
796 
797 static bool drm_dp_sideband_parse_link_address(const struct drm_dp_mst_topology_mgr *mgr,
798 					       struct drm_dp_sideband_msg_rx *raw,
799 					       struct drm_dp_sideband_msg_reply_body *repmsg)
800 {
801 	int idx = 1;
802 	int i;
803 
804 	memcpy(repmsg->u.link_addr.guid, &raw->msg[idx], 16);
805 	idx += 16;
806 	repmsg->u.link_addr.nports = raw->msg[idx] & 0xf;
807 	idx++;
808 	if (idx > raw->curlen)
809 		goto fail_len;
810 	for (i = 0; i < repmsg->u.link_addr.nports; i++) {
811 		if (raw->msg[idx] & 0x80)
812 			repmsg->u.link_addr.ports[i].input_port = 1;
813 
814 		repmsg->u.link_addr.ports[i].peer_device_type = (raw->msg[idx] >> 4) & 0x7;
815 		repmsg->u.link_addr.ports[i].port_number = (raw->msg[idx] & 0xf);
816 
817 		idx++;
818 		if (idx > raw->curlen)
819 			goto fail_len;
820 		repmsg->u.link_addr.ports[i].mcs = (raw->msg[idx] >> 7) & 0x1;
821 		repmsg->u.link_addr.ports[i].ddps = (raw->msg[idx] >> 6) & 0x1;
822 		if (repmsg->u.link_addr.ports[i].input_port == 0)
823 			repmsg->u.link_addr.ports[i].legacy_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
824 		idx++;
825 		if (idx > raw->curlen)
826 			goto fail_len;
827 		if (repmsg->u.link_addr.ports[i].input_port == 0) {
828 			repmsg->u.link_addr.ports[i].dpcd_revision = (raw->msg[idx]);
829 			idx++;
830 			if (idx > raw->curlen)
831 				goto fail_len;
832 			memcpy(repmsg->u.link_addr.ports[i].peer_guid, &raw->msg[idx], 16);
833 			idx += 16;
834 			if (idx > raw->curlen)
835 				goto fail_len;
836 			repmsg->u.link_addr.ports[i].num_sdp_streams = (raw->msg[idx] >> 4) & 0xf;
837 			repmsg->u.link_addr.ports[i].num_sdp_stream_sinks = (raw->msg[idx] & 0xf);
838 			idx++;
839 
840 		}
841 		if (idx > raw->curlen)
842 			goto fail_len;
843 	}
844 
845 	return true;
846 fail_len:
847 	DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
848 	return false;
849 }
850 
851 static bool drm_dp_sideband_parse_remote_dpcd_read(struct drm_dp_sideband_msg_rx *raw,
852 						   struct drm_dp_sideband_msg_reply_body *repmsg)
853 {
854 	int idx = 1;
855 
856 	repmsg->u.remote_dpcd_read_ack.port_number = raw->msg[idx] & 0xf;
857 	idx++;
858 	if (idx > raw->curlen)
859 		goto fail_len;
860 	repmsg->u.remote_dpcd_read_ack.num_bytes = raw->msg[idx];
861 	idx++;
862 	if (idx > raw->curlen)
863 		goto fail_len;
864 
865 	memcpy(repmsg->u.remote_dpcd_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_dpcd_read_ack.num_bytes);
866 	return true;
867 fail_len:
868 	DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
869 	return false;
870 }
871 
872 static bool drm_dp_sideband_parse_remote_dpcd_write(struct drm_dp_sideband_msg_rx *raw,
873 						      struct drm_dp_sideband_msg_reply_body *repmsg)
874 {
875 	int idx = 1;
876 
877 	repmsg->u.remote_dpcd_write_ack.port_number = raw->msg[idx] & 0xf;
878 	idx++;
879 	if (idx > raw->curlen)
880 		goto fail_len;
881 	return true;
882 fail_len:
883 	DRM_DEBUG_KMS("parse length fail %d %d\n", idx, raw->curlen);
884 	return false;
885 }
886 
887 static bool drm_dp_sideband_parse_remote_i2c_read_ack(struct drm_dp_sideband_msg_rx *raw,
888 						      struct drm_dp_sideband_msg_reply_body *repmsg)
889 {
890 	int idx = 1;
891 
892 	repmsg->u.remote_i2c_read_ack.port_number = (raw->msg[idx] & 0xf);
893 	idx++;
894 	if (idx > raw->curlen)
895 		goto fail_len;
896 	repmsg->u.remote_i2c_read_ack.num_bytes = raw->msg[idx];
897 	idx++;
898 	/* TODO check */
899 	memcpy(repmsg->u.remote_i2c_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_i2c_read_ack.num_bytes);
900 	return true;
901 fail_len:
902 	DRM_DEBUG_KMS("remote i2c reply parse length fail %d %d\n", idx, raw->curlen);
903 	return false;
904 }
905 
906 static bool drm_dp_sideband_parse_enum_path_resources_ack(struct drm_dp_sideband_msg_rx *raw,
907 							  struct drm_dp_sideband_msg_reply_body *repmsg)
908 {
909 	int idx = 1;
910 
911 	repmsg->u.path_resources.port_number = (raw->msg[idx] >> 4) & 0xf;
912 	repmsg->u.path_resources.fec_capable = raw->msg[idx] & 0x1;
913 	idx++;
914 	if (idx > raw->curlen)
915 		goto fail_len;
916 	repmsg->u.path_resources.full_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
917 	idx += 2;
918 	if (idx > raw->curlen)
919 		goto fail_len;
920 	repmsg->u.path_resources.avail_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
921 	idx += 2;
922 	if (idx > raw->curlen)
923 		goto fail_len;
924 	return true;
925 fail_len:
926 	DRM_DEBUG_KMS("enum resource parse length fail %d %d\n", idx, raw->curlen);
927 	return false;
928 }
929 
930 static bool drm_dp_sideband_parse_allocate_payload_ack(struct drm_dp_sideband_msg_rx *raw,
931 							  struct drm_dp_sideband_msg_reply_body *repmsg)
932 {
933 	int idx = 1;
934 
935 	repmsg->u.allocate_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
936 	idx++;
937 	if (idx > raw->curlen)
938 		goto fail_len;
939 	repmsg->u.allocate_payload.vcpi = raw->msg[idx];
940 	idx++;
941 	if (idx > raw->curlen)
942 		goto fail_len;
943 	repmsg->u.allocate_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
944 	idx += 2;
945 	if (idx > raw->curlen)
946 		goto fail_len;
947 	return true;
948 fail_len:
949 	DRM_DEBUG_KMS("allocate payload parse length fail %d %d\n", idx, raw->curlen);
950 	return false;
951 }
952 
953 static bool drm_dp_sideband_parse_query_payload_ack(struct drm_dp_sideband_msg_rx *raw,
954 						    struct drm_dp_sideband_msg_reply_body *repmsg)
955 {
956 	int idx = 1;
957 
958 	repmsg->u.query_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
959 	idx++;
960 	if (idx > raw->curlen)
961 		goto fail_len;
962 	repmsg->u.query_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
963 	idx += 2;
964 	if (idx > raw->curlen)
965 		goto fail_len;
966 	return true;
967 fail_len:
968 	DRM_DEBUG_KMS("query payload parse length fail %d %d\n", idx, raw->curlen);
969 	return false;
970 }
971 
972 static bool drm_dp_sideband_parse_power_updown_phy_ack(struct drm_dp_sideband_msg_rx *raw,
973 						       struct drm_dp_sideband_msg_reply_body *repmsg)
974 {
975 	int idx = 1;
976 
977 	repmsg->u.port_number.port_number = (raw->msg[idx] >> 4) & 0xf;
978 	idx++;
979 	if (idx > raw->curlen) {
980 		DRM_DEBUG_KMS("power up/down phy parse length fail %d %d\n",
981 			      idx, raw->curlen);
982 		return false;
983 	}
984 	return true;
985 }
986 
987 static bool
988 drm_dp_sideband_parse_query_stream_enc_status(
989 				struct drm_dp_sideband_msg_rx *raw,
990 				struct drm_dp_sideband_msg_reply_body *repmsg)
991 {
992 	struct drm_dp_query_stream_enc_status_ack_reply *reply;
993 
994 	reply = &repmsg->u.enc_status;
995 
996 	reply->stream_id = raw->msg[3];
997 
998 	reply->reply_signed = raw->msg[2] & BIT(0);
999 
1000 	/*
1001 	 * NOTE: It's my impression from reading the spec that the below parsing
1002 	 * is correct. However I noticed while testing with an HDCP 1.4 display
1003 	 * through an HDCP 2.2 hub that only bit 3 was set. In that case, I
1004 	 * would expect both bits to be set. So keep the parsing following the
1005 	 * spec, but beware reality might not match the spec (at least for some
1006 	 * configurations).
1007 	 */
1008 	reply->hdcp_1x_device_present = raw->msg[2] & BIT(4);
1009 	reply->hdcp_2x_device_present = raw->msg[2] & BIT(3);
1010 
1011 	reply->query_capable_device_present = raw->msg[2] & BIT(5);
1012 	reply->legacy_device_present = raw->msg[2] & BIT(6);
1013 	reply->unauthorizable_device_present = raw->msg[2] & BIT(7);
1014 
1015 	reply->auth_completed = !!(raw->msg[1] & BIT(3));
1016 	reply->encryption_enabled = !!(raw->msg[1] & BIT(4));
1017 	reply->repeater_present = !!(raw->msg[1] & BIT(5));
1018 	reply->state = (raw->msg[1] & GENMASK(7, 6)) >> 6;
1019 
1020 	return true;
1021 }
1022 
1023 static bool drm_dp_sideband_parse_reply(const struct drm_dp_mst_topology_mgr *mgr,
1024 					struct drm_dp_sideband_msg_rx *raw,
1025 					struct drm_dp_sideband_msg_reply_body *msg)
1026 {
1027 	memset(msg, 0, sizeof(*msg));
1028 	msg->reply_type = (raw->msg[0] & 0x80) >> 7;
1029 	msg->req_type = (raw->msg[0] & 0x7f);
1030 
1031 	if (msg->reply_type == DP_SIDEBAND_REPLY_NAK) {
1032 		memcpy(msg->u.nak.guid, &raw->msg[1], 16);
1033 		msg->u.nak.reason = raw->msg[17];
1034 		msg->u.nak.nak_data = raw->msg[18];
1035 		return false;
1036 	}
1037 
1038 	switch (msg->req_type) {
1039 	case DP_LINK_ADDRESS:
1040 		return drm_dp_sideband_parse_link_address(mgr, raw, msg);
1041 	case DP_QUERY_PAYLOAD:
1042 		return drm_dp_sideband_parse_query_payload_ack(raw, msg);
1043 	case DP_REMOTE_DPCD_READ:
1044 		return drm_dp_sideband_parse_remote_dpcd_read(raw, msg);
1045 	case DP_REMOTE_DPCD_WRITE:
1046 		return drm_dp_sideband_parse_remote_dpcd_write(raw, msg);
1047 	case DP_REMOTE_I2C_READ:
1048 		return drm_dp_sideband_parse_remote_i2c_read_ack(raw, msg);
1049 	case DP_REMOTE_I2C_WRITE:
1050 		return true; /* since there's nothing to parse */
1051 	case DP_ENUM_PATH_RESOURCES:
1052 		return drm_dp_sideband_parse_enum_path_resources_ack(raw, msg);
1053 	case DP_ALLOCATE_PAYLOAD:
1054 		return drm_dp_sideband_parse_allocate_payload_ack(raw, msg);
1055 	case DP_POWER_DOWN_PHY:
1056 	case DP_POWER_UP_PHY:
1057 		return drm_dp_sideband_parse_power_updown_phy_ack(raw, msg);
1058 	case DP_CLEAR_PAYLOAD_ID_TABLE:
1059 		return true; /* since there's nothing to parse */
1060 	case DP_QUERY_STREAM_ENC_STATUS:
1061 		return drm_dp_sideband_parse_query_stream_enc_status(raw, msg);
1062 	default:
1063 		drm_err(mgr->dev, "Got unknown reply 0x%02x (%s)\n",
1064 			msg->req_type, drm_dp_mst_req_type_str(msg->req_type));
1065 		return false;
1066 	}
1067 }
1068 
1069 static bool
1070 drm_dp_sideband_parse_connection_status_notify(const struct drm_dp_mst_topology_mgr *mgr,
1071 					       struct drm_dp_sideband_msg_rx *raw,
1072 					       struct drm_dp_sideband_msg_req_body *msg)
1073 {
1074 	int idx = 1;
1075 
1076 	msg->u.conn_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
1077 	idx++;
1078 	if (idx > raw->curlen)
1079 		goto fail_len;
1080 
1081 	memcpy(msg->u.conn_stat.guid, &raw->msg[idx], 16);
1082 	idx += 16;
1083 	if (idx > raw->curlen)
1084 		goto fail_len;
1085 
1086 	msg->u.conn_stat.legacy_device_plug_status = (raw->msg[idx] >> 6) & 0x1;
1087 	msg->u.conn_stat.displayport_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
1088 	msg->u.conn_stat.message_capability_status = (raw->msg[idx] >> 4) & 0x1;
1089 	msg->u.conn_stat.input_port = (raw->msg[idx] >> 3) & 0x1;
1090 	msg->u.conn_stat.peer_device_type = (raw->msg[idx] & 0x7);
1091 	idx++;
1092 	return true;
1093 fail_len:
1094 	drm_dbg_kms(mgr->dev, "connection status reply parse length fail %d %d\n",
1095 		    idx, raw->curlen);
1096 	return false;
1097 }
1098 
1099 static bool drm_dp_sideband_parse_resource_status_notify(const struct drm_dp_mst_topology_mgr *mgr,
1100 							 struct drm_dp_sideband_msg_rx *raw,
1101 							 struct drm_dp_sideband_msg_req_body *msg)
1102 {
1103 	int idx = 1;
1104 
1105 	msg->u.resource_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
1106 	idx++;
1107 	if (idx > raw->curlen)
1108 		goto fail_len;
1109 
1110 	memcpy(msg->u.resource_stat.guid, &raw->msg[idx], 16);
1111 	idx += 16;
1112 	if (idx > raw->curlen)
1113 		goto fail_len;
1114 
1115 	msg->u.resource_stat.available_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
1116 	idx++;
1117 	return true;
1118 fail_len:
1119 	drm_dbg_kms(mgr->dev, "resource status reply parse length fail %d %d\n", idx, raw->curlen);
1120 	return false;
1121 }
1122 
1123 static bool drm_dp_sideband_parse_req(const struct drm_dp_mst_topology_mgr *mgr,
1124 				      struct drm_dp_sideband_msg_rx *raw,
1125 				      struct drm_dp_sideband_msg_req_body *msg)
1126 {
1127 	memset(msg, 0, sizeof(*msg));
1128 	msg->req_type = (raw->msg[0] & 0x7f);
1129 
1130 	switch (msg->req_type) {
1131 	case DP_CONNECTION_STATUS_NOTIFY:
1132 		return drm_dp_sideband_parse_connection_status_notify(mgr, raw, msg);
1133 	case DP_RESOURCE_STATUS_NOTIFY:
1134 		return drm_dp_sideband_parse_resource_status_notify(mgr, raw, msg);
1135 	default:
1136 		drm_err(mgr->dev, "Got unknown request 0x%02x (%s)\n",
1137 			msg->req_type, drm_dp_mst_req_type_str(msg->req_type));
1138 		return false;
1139 	}
1140 }
1141 
1142 static void build_dpcd_write(struct drm_dp_sideband_msg_tx *msg,
1143 			     u8 port_num, u32 offset, u8 num_bytes, u8 *bytes)
1144 {
1145 	struct drm_dp_sideband_msg_req_body req;
1146 
1147 	req.req_type = DP_REMOTE_DPCD_WRITE;
1148 	req.u.dpcd_write.port_number = port_num;
1149 	req.u.dpcd_write.dpcd_address = offset;
1150 	req.u.dpcd_write.num_bytes = num_bytes;
1151 	req.u.dpcd_write.bytes = bytes;
1152 	drm_dp_encode_sideband_req(&req, msg);
1153 }
1154 
1155 static void build_link_address(struct drm_dp_sideband_msg_tx *msg)
1156 {
1157 	struct drm_dp_sideband_msg_req_body req;
1158 
1159 	req.req_type = DP_LINK_ADDRESS;
1160 	drm_dp_encode_sideband_req(&req, msg);
1161 }
1162 
1163 static void build_clear_payload_id_table(struct drm_dp_sideband_msg_tx *msg)
1164 {
1165 	struct drm_dp_sideband_msg_req_body req;
1166 
1167 	req.req_type = DP_CLEAR_PAYLOAD_ID_TABLE;
1168 	drm_dp_encode_sideband_req(&req, msg);
1169 	msg->path_msg = true;
1170 }
1171 
1172 static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg,
1173 				     int port_num)
1174 {
1175 	struct drm_dp_sideband_msg_req_body req;
1176 
1177 	req.req_type = DP_ENUM_PATH_RESOURCES;
1178 	req.u.port_num.port_number = port_num;
1179 	drm_dp_encode_sideband_req(&req, msg);
1180 	msg->path_msg = true;
1181 	return 0;
1182 }
1183 
1184 static void build_allocate_payload(struct drm_dp_sideband_msg_tx *msg,
1185 				   int port_num,
1186 				   u8 vcpi, uint16_t pbn,
1187 				   u8 number_sdp_streams,
1188 				   u8 *sdp_stream_sink)
1189 {
1190 	struct drm_dp_sideband_msg_req_body req;
1191 
1192 	memset(&req, 0, sizeof(req));
1193 	req.req_type = DP_ALLOCATE_PAYLOAD;
1194 	req.u.allocate_payload.port_number = port_num;
1195 	req.u.allocate_payload.vcpi = vcpi;
1196 	req.u.allocate_payload.pbn = pbn;
1197 	req.u.allocate_payload.number_sdp_streams = number_sdp_streams;
1198 	memcpy(req.u.allocate_payload.sdp_stream_sink, sdp_stream_sink,
1199 		   number_sdp_streams);
1200 	drm_dp_encode_sideband_req(&req, msg);
1201 	msg->path_msg = true;
1202 }
1203 
1204 static void build_power_updown_phy(struct drm_dp_sideband_msg_tx *msg,
1205 				   int port_num, bool power_up)
1206 {
1207 	struct drm_dp_sideband_msg_req_body req;
1208 
1209 	if (power_up)
1210 		req.req_type = DP_POWER_UP_PHY;
1211 	else
1212 		req.req_type = DP_POWER_DOWN_PHY;
1213 
1214 	req.u.port_num.port_number = port_num;
1215 	drm_dp_encode_sideband_req(&req, msg);
1216 	msg->path_msg = true;
1217 }
1218 
1219 static int
1220 build_query_stream_enc_status(struct drm_dp_sideband_msg_tx *msg, u8 stream_id,
1221 			      u8 *q_id)
1222 {
1223 	struct drm_dp_sideband_msg_req_body req;
1224 
1225 	req.req_type = DP_QUERY_STREAM_ENC_STATUS;
1226 	req.u.enc_status.stream_id = stream_id;
1227 	memcpy(req.u.enc_status.client_id, q_id,
1228 	       sizeof(req.u.enc_status.client_id));
1229 	req.u.enc_status.stream_event = 0;
1230 	req.u.enc_status.valid_stream_event = false;
1231 	req.u.enc_status.stream_behavior = 0;
1232 	req.u.enc_status.valid_stream_behavior = false;
1233 
1234 	drm_dp_encode_sideband_req(&req, msg);
1235 	return 0;
1236 }
1237 
1238 static bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr,
1239 			      struct drm_dp_sideband_msg_tx *txmsg)
1240 {
1241 	unsigned int state;
1242 
1243 	/*
1244 	 * All updates to txmsg->state are protected by mgr->qlock, and the two
1245 	 * cases we check here are terminal states. For those the barriers
1246 	 * provided by the wake_up/wait_event pair are enough.
1247 	 */
1248 	state = READ_ONCE(txmsg->state);
1249 	return (state == DRM_DP_SIDEBAND_TX_RX ||
1250 		state == DRM_DP_SIDEBAND_TX_TIMEOUT);
1251 }
1252 
1253 static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
1254 				    struct drm_dp_sideband_msg_tx *txmsg)
1255 {
1256 	struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
1257 	unsigned long wait_timeout = msecs_to_jiffies(4000);
1258 	unsigned long wait_expires = jiffies + wait_timeout;
1259 	int ret;
1260 
1261 	for (;;) {
1262 		/*
1263 		 * If the driver provides a way for this, change to
1264 		 * poll-waiting for the MST reply interrupt if we didn't receive
1265 		 * it for 50 msec. This would cater for cases where the HPD
1266 		 * pulse signal got lost somewhere, even though the sink raised
1267 		 * the corresponding MST interrupt correctly. One example is the
1268 		 * Club 3D CAC-1557 TypeC -> DP adapter which for some reason
1269 		 * filters out short pulses with a duration less than ~540 usec.
1270 		 *
1271 		 * The poll period is 50 msec to avoid missing an interrupt
1272 		 * after the sink has cleared it (after a 110msec timeout
1273 		 * since it raised the interrupt).
1274 		 */
1275 		ret = wait_event_timeout(mgr->tx_waitq,
1276 					 check_txmsg_state(mgr, txmsg),
1277 					 mgr->cbs->poll_hpd_irq ?
1278 						msecs_to_jiffies(50) :
1279 						wait_timeout);
1280 
1281 		if (ret || !mgr->cbs->poll_hpd_irq ||
1282 		    time_after(jiffies, wait_expires))
1283 			break;
1284 
1285 		mgr->cbs->poll_hpd_irq(mgr);
1286 	}
1287 
1288 	mutex_lock(&mgr->qlock);
1289 	if (ret > 0) {
1290 		if (txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT) {
1291 			ret = -EIO;
1292 			goto out;
1293 		}
1294 	} else {
1295 		drm_dbg_kms(mgr->dev, "timedout msg send %p %d %d\n",
1296 			    txmsg, txmsg->state, txmsg->seqno);
1297 
1298 		/* dump some state */
1299 		ret = -EIO;
1300 
1301 		/* remove from q */
1302 		if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED ||
1303 		    txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND ||
1304 		    txmsg->state == DRM_DP_SIDEBAND_TX_SENT)
1305 			list_del(&txmsg->next);
1306 	}
1307 out:
1308 	if (unlikely(ret == -EIO) && drm_debug_enabled(DRM_UT_DP)) {
1309 		struct drm_printer p = drm_dbg_printer(mgr->dev, DRM_UT_DP,
1310 						       DBG_PREFIX);
1311 
1312 		drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
1313 	}
1314 	mutex_unlock(&mgr->qlock);
1315 
1316 	drm_dp_mst_kick_tx(mgr);
1317 	return ret;
1318 }
1319 
1320 static struct drm_dp_mst_branch *drm_dp_add_mst_branch_device(u8 lct, u8 *rad)
1321 {
1322 	struct drm_dp_mst_branch *mstb;
1323 
1324 	mstb = kzalloc(sizeof(*mstb), GFP_KERNEL);
1325 	if (!mstb)
1326 		return NULL;
1327 
1328 	mstb->lct = lct;
1329 	if (lct > 1)
1330 		memcpy(mstb->rad, rad, lct / 2);
1331 	INIT_LIST_HEAD(&mstb->ports);
1332 	kref_init(&mstb->topology_kref);
1333 	kref_init(&mstb->malloc_kref);
1334 	return mstb;
1335 }
1336 
1337 static void drm_dp_free_mst_branch_device(struct kref *kref)
1338 {
1339 	struct drm_dp_mst_branch *mstb =
1340 		container_of(kref, struct drm_dp_mst_branch, malloc_kref);
1341 
1342 	if (mstb->port_parent)
1343 		drm_dp_mst_put_port_malloc(mstb->port_parent);
1344 
1345 	kfree(mstb);
1346 }
1347 
1348 /**
1349  * DOC: Branch device and port refcounting
1350  *
1351  * Topology refcount overview
1352  * ~~~~~~~~~~~~~~~~~~~~~~~~~~
1353  *
1354  * The refcounting schemes for &struct drm_dp_mst_branch and &struct
1355  * drm_dp_mst_port are somewhat unusual. Both ports and branch devices have
1356  * two different kinds of refcounts: topology refcounts, and malloc refcounts.
1357  *
1358  * Topology refcounts are not exposed to drivers, and are handled internally
1359  * by the DP MST helpers. The helpers use them in order to prevent the
1360  * in-memory topology state from being changed in the middle of critical
1361  * operations like changing the internal state of payload allocations. This
1362  * means each branch and port will be considered to be connected to the rest
1363  * of the topology until its topology refcount reaches zero. Additionally,
1364  * for ports this means that their associated &struct drm_connector will stay
1365  * registered with userspace until the port's refcount reaches 0.
1366  *
1367  * Malloc refcount overview
1368  * ~~~~~~~~~~~~~~~~~~~~~~~~
1369  *
1370  * Malloc references are used to keep a &struct drm_dp_mst_port or &struct
1371  * drm_dp_mst_branch allocated even after all of its topology references have
1372  * been dropped, so that the driver or MST helpers can safely access each
1373  * branch's last known state before it was disconnected from the topology.
1374  * When the malloc refcount of a port or branch reaches 0, the memory
1375  * allocation containing the &struct drm_dp_mst_branch or &struct
1376  * drm_dp_mst_port respectively will be freed.
1377  *
1378  * For &struct drm_dp_mst_branch, malloc refcounts are not currently exposed
1379  * to drivers. As of writing this documentation, there are no drivers that
1380  * have a usecase for accessing &struct drm_dp_mst_branch outside of the MST
1381  * helpers. Exposing this API to drivers in a race-free manner would take more
1382  * tweaking of the refcounting scheme, however patches are welcome provided
1383  * there is a legitimate driver usecase for this.
1384  *
1385  * Refcount relationships in a topology
1386  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1387  *
1388  * Let's take a look at why the relationship between topology and malloc
1389  * refcounts is designed the way it is.
1390  *
1391  * .. kernel-figure:: dp-mst/topology-figure-1.dot
1392  *
1393  *    An example of topology and malloc refs in a DP MST topology with two
1394  *    active payloads. Topology refcount increments are indicated by solid
1395  *    lines, and malloc refcount increments are indicated by dashed lines.
1396  *    Each starts from the branch which incremented the refcount, and ends at
1397  *    the branch to which the refcount belongs to, i.e. the arrow points the
1398  *    same way as the C pointers used to reference a structure.
1399  *
1400  * As you can see in the above figure, every branch increments the topology
1401  * refcount of its children, and increments the malloc refcount of its
1402  * parent. Additionally, every payload increments the malloc refcount of its
1403  * assigned port by 1.
1404  *
1405  * So, what would happen if MSTB #3 from the above figure was unplugged from
1406  * the system, but the driver hadn't yet removed payload #2 from port #3? The
1407  * topology would start to look like the figure below.
1408  *
1409  * .. kernel-figure:: dp-mst/topology-figure-2.dot
1410  *
1411  *    Ports and branch devices which have been released from memory are
1412  *    colored grey, and references which have been removed are colored red.
1413  *
1414  * Whenever a port or branch device's topology refcount reaches zero, it will
1415  * decrement the topology refcounts of all its children, the malloc refcount
1416  * of its parent, and finally its own malloc refcount. For MSTB #4 and port
1417  * #4, this means they both have been disconnected from the topology and freed
1418  * from memory. But, because payload #2 is still holding a reference to port
1419  * #3, port #3 is removed from the topology but its &struct drm_dp_mst_port
1420  * is still accessible from memory. This also means port #3 has not yet
1421  * decremented the malloc refcount of MSTB #3, so its &struct
1422  * drm_dp_mst_branch will also stay allocated in memory until port #3's
1423  * malloc refcount reaches 0.
1424  *
1425  * This relationship is necessary because in order to release payload #2, we
1426  * need to be able to figure out the last relative of port #3 that's still
1427  * connected to the topology. In this case, we would travel up the topology as
1428  * shown below.
1429  *
1430  * .. kernel-figure:: dp-mst/topology-figure-3.dot
1431  *
1432  * And finally, remove payload #2 by communicating with port #2 through
1433  * sideband transactions.
1434  */
1435 
1436 /**
1437  * drm_dp_mst_get_mstb_malloc() - Increment the malloc refcount of a branch
1438  * device
1439  * @mstb: The &struct drm_dp_mst_branch to increment the malloc refcount of
1440  *
1441  * Increments &drm_dp_mst_branch.malloc_kref. When
1442  * &drm_dp_mst_branch.malloc_kref reaches 0, the memory allocation for @mstb
1443  * will be released and @mstb may no longer be used.
1444  *
1445  * See also: drm_dp_mst_put_mstb_malloc()
1446  */
1447 static void
1448 drm_dp_mst_get_mstb_malloc(struct drm_dp_mst_branch *mstb)
1449 {
1450 	kref_get(&mstb->malloc_kref);
1451 	drm_dbg(mstb->mgr->dev, "mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref));
1452 }
1453 
1454 /**
1455  * drm_dp_mst_put_mstb_malloc() - Decrement the malloc refcount of a branch
1456  * device
1457  * @mstb: The &struct drm_dp_mst_branch to decrement the malloc refcount of
1458  *
1459  * Decrements &drm_dp_mst_branch.malloc_kref. When
1460  * &drm_dp_mst_branch.malloc_kref reaches 0, the memory allocation for @mstb
1461  * will be released and @mstb may no longer be used.
1462  *
1463  * See also: drm_dp_mst_get_mstb_malloc()
1464  */
1465 static void
1466 drm_dp_mst_put_mstb_malloc(struct drm_dp_mst_branch *mstb)
1467 {
1468 	drm_dbg(mstb->mgr->dev, "mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref) - 1);
1469 	kref_put(&mstb->malloc_kref, drm_dp_free_mst_branch_device);
1470 }
1471 
1472 static void drm_dp_free_mst_port(struct kref *kref)
1473 {
1474 	struct drm_dp_mst_port *port =
1475 		container_of(kref, struct drm_dp_mst_port, malloc_kref);
1476 
1477 	drm_dp_mst_put_mstb_malloc(port->parent);
1478 	kfree(port);
1479 }
1480 
1481 /**
1482  * drm_dp_mst_get_port_malloc() - Increment the malloc refcount of an MST port
1483  * @port: The &struct drm_dp_mst_port to increment the malloc refcount of
1484  *
1485  * Increments &drm_dp_mst_port.malloc_kref. When &drm_dp_mst_port.malloc_kref
1486  * reaches 0, the memory allocation for @port will be released and @port may
1487  * no longer be used.
1488  *
1489  * Because @port could potentially be freed at any time by the DP MST helpers
1490  * if &drm_dp_mst_port.malloc_kref reaches 0, including during a call to this
1491  * function, drivers that which to make use of &struct drm_dp_mst_port should
1492  * ensure that they grab at least one main malloc reference to their MST ports
1493  * in &drm_dp_mst_topology_cbs.add_connector. This callback is called before
1494  * there is any chance for &drm_dp_mst_port.malloc_kref to reach 0.
1495  *
1496  * See also: drm_dp_mst_put_port_malloc()
1497  */
1498 void
1499 drm_dp_mst_get_port_malloc(struct drm_dp_mst_port *port)
1500 {
1501 	kref_get(&port->malloc_kref);
1502 	drm_dbg(port->mgr->dev, "port %p (%d)\n", port, kref_read(&port->malloc_kref));
1503 }
1504 EXPORT_SYMBOL(drm_dp_mst_get_port_malloc);
1505 
1506 /**
1507  * drm_dp_mst_put_port_malloc() - Decrement the malloc refcount of an MST port
1508  * @port: The &struct drm_dp_mst_port to decrement the malloc refcount of
1509  *
1510  * Decrements &drm_dp_mst_port.malloc_kref. When &drm_dp_mst_port.malloc_kref
1511  * reaches 0, the memory allocation for @port will be released and @port may
1512  * no longer be used.
1513  *
1514  * See also: drm_dp_mst_get_port_malloc()
1515  */
1516 void
1517 drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port)
1518 {
1519 	drm_dbg(port->mgr->dev, "port %p (%d)\n", port, kref_read(&port->malloc_kref) - 1);
1520 	kref_put(&port->malloc_kref, drm_dp_free_mst_port);
1521 }
1522 EXPORT_SYMBOL(drm_dp_mst_put_port_malloc);
1523 
1524 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
1525 
1526 #define STACK_DEPTH 8
1527 
1528 static noinline void
1529 __topology_ref_save(struct drm_dp_mst_topology_mgr *mgr,
1530 		    struct drm_dp_mst_topology_ref_history *history,
1531 		    enum drm_dp_mst_topology_ref_type type)
1532 {
1533 	struct drm_dp_mst_topology_ref_entry *entry = NULL;
1534 	depot_stack_handle_t backtrace;
1535 	ulong stack_entries[STACK_DEPTH];
1536 	uint n;
1537 	int i;
1538 
1539 	n = stack_trace_save(stack_entries, ARRAY_SIZE(stack_entries), 1);
1540 	backtrace = stack_depot_save(stack_entries, n, GFP_KERNEL);
1541 	if (!backtrace)
1542 		return;
1543 
1544 	/* Try to find an existing entry for this backtrace */
1545 	for (i = 0; i < history->len; i++) {
1546 		if (history->entries[i].backtrace == backtrace) {
1547 			entry = &history->entries[i];
1548 			break;
1549 		}
1550 	}
1551 
1552 	/* Otherwise add one */
1553 	if (!entry) {
1554 		struct drm_dp_mst_topology_ref_entry *new;
1555 		int new_len = history->len + 1;
1556 
1557 		new = krealloc(history->entries, sizeof(*new) * new_len,
1558 			       GFP_KERNEL);
1559 		if (!new)
1560 			return;
1561 
1562 		entry = &new[history->len];
1563 		history->len = new_len;
1564 		history->entries = new;
1565 
1566 		entry->backtrace = backtrace;
1567 		entry->type = type;
1568 		entry->count = 0;
1569 	}
1570 	entry->count++;
1571 	entry->ts_nsec = ktime_get_ns();
1572 }
1573 
1574 static int
1575 topology_ref_history_cmp(const void *a, const void *b)
1576 {
1577 	const struct drm_dp_mst_topology_ref_entry *entry_a = a, *entry_b = b;
1578 
1579 	if (entry_a->ts_nsec > entry_b->ts_nsec)
1580 		return 1;
1581 	else if (entry_a->ts_nsec < entry_b->ts_nsec)
1582 		return -1;
1583 	else
1584 		return 0;
1585 }
1586 
1587 static inline const char *
1588 topology_ref_type_to_str(enum drm_dp_mst_topology_ref_type type)
1589 {
1590 	if (type == DRM_DP_MST_TOPOLOGY_REF_GET)
1591 		return "get";
1592 	else
1593 		return "put";
1594 }
1595 
1596 static void
1597 __dump_topology_ref_history(struct drm_device *drm,
1598 			    struct drm_dp_mst_topology_ref_history *history,
1599 			    void *ptr, const char *type_str)
1600 {
1601 	struct drm_printer p = drm_dbg_printer(drm, DRM_UT_DP, DBG_PREFIX);
1602 	char *buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
1603 	int i;
1604 
1605 	if (!buf)
1606 		return;
1607 
1608 	if (!history->len)
1609 		goto out;
1610 
1611 	/* First, sort the list so that it goes from oldest to newest
1612 	 * reference entry
1613 	 */
1614 	sort(history->entries, history->len, sizeof(*history->entries),
1615 	     topology_ref_history_cmp, NULL);
1616 
1617 	drm_printf(&p, "%s (%p) topology count reached 0, dumping history:\n",
1618 		   type_str, ptr);
1619 
1620 	for (i = 0; i < history->len; i++) {
1621 		const struct drm_dp_mst_topology_ref_entry *entry =
1622 			&history->entries[i];
1623 		u64 ts_nsec = entry->ts_nsec;
1624 		u32 rem_nsec = do_div(ts_nsec, 1000000000);
1625 
1626 		stack_depot_snprint(entry->backtrace, buf, PAGE_SIZE, 4);
1627 
1628 		drm_printf(&p, "  %d %ss (last at %5llu.%06u):\n%s",
1629 			   entry->count,
1630 			   topology_ref_type_to_str(entry->type),
1631 			   ts_nsec, rem_nsec / 1000, buf);
1632 	}
1633 
1634 	/* Now free the history, since this is the only time we expose it */
1635 	kfree(history->entries);
1636 out:
1637 	kfree(buf);
1638 }
1639 
1640 static __always_inline void
1641 drm_dp_mst_dump_mstb_topology_history(struct drm_dp_mst_branch *mstb)
1642 {
1643 	__dump_topology_ref_history(mstb->mgr->dev, &mstb->topology_ref_history,
1644 				    mstb, "MSTB");
1645 }
1646 
1647 static __always_inline void
1648 drm_dp_mst_dump_port_topology_history(struct drm_dp_mst_port *port)
1649 {
1650 	__dump_topology_ref_history(port->mgr->dev, &port->topology_ref_history,
1651 				    port, "Port");
1652 }
1653 
1654 static __always_inline void
1655 save_mstb_topology_ref(struct drm_dp_mst_branch *mstb,
1656 		       enum drm_dp_mst_topology_ref_type type)
1657 {
1658 	__topology_ref_save(mstb->mgr, &mstb->topology_ref_history, type);
1659 }
1660 
1661 static __always_inline void
1662 save_port_topology_ref(struct drm_dp_mst_port *port,
1663 		       enum drm_dp_mst_topology_ref_type type)
1664 {
1665 	__topology_ref_save(port->mgr, &port->topology_ref_history, type);
1666 }
1667 
1668 static inline void
1669 topology_ref_history_lock(struct drm_dp_mst_topology_mgr *mgr)
1670 {
1671 	mutex_lock(&mgr->topology_ref_history_lock);
1672 }
1673 
1674 static inline void
1675 topology_ref_history_unlock(struct drm_dp_mst_topology_mgr *mgr)
1676 {
1677 	mutex_unlock(&mgr->topology_ref_history_lock);
1678 }
1679 #else
1680 static inline void
1681 topology_ref_history_lock(struct drm_dp_mst_topology_mgr *mgr) {}
1682 static inline void
1683 topology_ref_history_unlock(struct drm_dp_mst_topology_mgr *mgr) {}
1684 static inline void
1685 drm_dp_mst_dump_mstb_topology_history(struct drm_dp_mst_branch *mstb) {}
1686 static inline void
1687 drm_dp_mst_dump_port_topology_history(struct drm_dp_mst_port *port) {}
1688 #define save_mstb_topology_ref(mstb, type)
1689 #define save_port_topology_ref(port, type)
1690 #endif
1691 
1692 struct drm_dp_mst_atomic_payload *
1693 drm_atomic_get_mst_payload_state(struct drm_dp_mst_topology_state *state,
1694 				 struct drm_dp_mst_port *port)
1695 {
1696 	struct drm_dp_mst_atomic_payload *payload;
1697 
1698 	list_for_each_entry(payload, &state->payloads, next)
1699 		if (payload->port == port)
1700 			return payload;
1701 
1702 	return NULL;
1703 }
1704 EXPORT_SYMBOL(drm_atomic_get_mst_payload_state);
1705 
1706 static void drm_dp_destroy_mst_branch_device(struct kref *kref)
1707 {
1708 	struct drm_dp_mst_branch *mstb =
1709 		container_of(kref, struct drm_dp_mst_branch, topology_kref);
1710 	struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
1711 
1712 	drm_dp_mst_dump_mstb_topology_history(mstb);
1713 
1714 	INIT_LIST_HEAD(&mstb->destroy_next);
1715 
1716 	/*
1717 	 * This can get called under mgr->mutex, so we need to perform the
1718 	 * actual destruction of the mstb in another worker
1719 	 */
1720 	mutex_lock(&mgr->delayed_destroy_lock);
1721 	list_add(&mstb->destroy_next, &mgr->destroy_branch_device_list);
1722 	mutex_unlock(&mgr->delayed_destroy_lock);
1723 	queue_work(mgr->delayed_destroy_wq, &mgr->delayed_destroy_work);
1724 }
1725 
1726 /**
1727  * drm_dp_mst_topology_try_get_mstb() - Increment the topology refcount of a
1728  * branch device unless it's zero
1729  * @mstb: &struct drm_dp_mst_branch to increment the topology refcount of
1730  *
1731  * Attempts to grab a topology reference to @mstb, if it hasn't yet been
1732  * removed from the topology (e.g. &drm_dp_mst_branch.topology_kref has
1733  * reached 0). Holding a topology reference implies that a malloc reference
1734  * will be held to @mstb as long as the user holds the topology reference.
1735  *
1736  * Care should be taken to ensure that the user has at least one malloc
1737  * reference to @mstb. If you already have a topology reference to @mstb, you
1738  * should use drm_dp_mst_topology_get_mstb() instead.
1739  *
1740  * See also:
1741  * drm_dp_mst_topology_get_mstb()
1742  * drm_dp_mst_topology_put_mstb()
1743  *
1744  * Returns:
1745  * * 1: A topology reference was grabbed successfully
1746  * * 0: @port is no longer in the topology, no reference was grabbed
1747  */
1748 static int __must_check
1749 drm_dp_mst_topology_try_get_mstb(struct drm_dp_mst_branch *mstb)
1750 {
1751 	int ret;
1752 
1753 	topology_ref_history_lock(mstb->mgr);
1754 	ret = kref_get_unless_zero(&mstb->topology_kref);
1755 	if (ret) {
1756 		drm_dbg(mstb->mgr->dev, "mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref));
1757 		save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_GET);
1758 	}
1759 
1760 	topology_ref_history_unlock(mstb->mgr);
1761 
1762 	return ret;
1763 }
1764 
1765 /**
1766  * drm_dp_mst_topology_get_mstb() - Increment the topology refcount of a
1767  * branch device
1768  * @mstb: The &struct drm_dp_mst_branch to increment the topology refcount of
1769  *
1770  * Increments &drm_dp_mst_branch.topology_refcount without checking whether or
1771  * not it's already reached 0. This is only valid to use in scenarios where
1772  * you are already guaranteed to have at least one active topology reference
1773  * to @mstb. Otherwise, drm_dp_mst_topology_try_get_mstb() must be used.
1774  *
1775  * See also:
1776  * drm_dp_mst_topology_try_get_mstb()
1777  * drm_dp_mst_topology_put_mstb()
1778  */
1779 static void drm_dp_mst_topology_get_mstb(struct drm_dp_mst_branch *mstb)
1780 {
1781 	topology_ref_history_lock(mstb->mgr);
1782 
1783 	save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_GET);
1784 	WARN_ON(kref_read(&mstb->topology_kref) == 0);
1785 	kref_get(&mstb->topology_kref);
1786 	drm_dbg(mstb->mgr->dev, "mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref));
1787 
1788 	topology_ref_history_unlock(mstb->mgr);
1789 }
1790 
1791 /**
1792  * drm_dp_mst_topology_put_mstb() - release a topology reference to a branch
1793  * device
1794  * @mstb: The &struct drm_dp_mst_branch to release the topology reference from
1795  *
1796  * Releases a topology reference from @mstb by decrementing
1797  * &drm_dp_mst_branch.topology_kref.
1798  *
1799  * See also:
1800  * drm_dp_mst_topology_try_get_mstb()
1801  * drm_dp_mst_topology_get_mstb()
1802  */
1803 static void
1804 drm_dp_mst_topology_put_mstb(struct drm_dp_mst_branch *mstb)
1805 {
1806 	topology_ref_history_lock(mstb->mgr);
1807 
1808 	drm_dbg(mstb->mgr->dev, "mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref) - 1);
1809 	save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_PUT);
1810 
1811 	topology_ref_history_unlock(mstb->mgr);
1812 	kref_put(&mstb->topology_kref, drm_dp_destroy_mst_branch_device);
1813 }
1814 
1815 static void drm_dp_destroy_port(struct kref *kref)
1816 {
1817 	struct drm_dp_mst_port *port =
1818 		container_of(kref, struct drm_dp_mst_port, topology_kref);
1819 	struct drm_dp_mst_topology_mgr *mgr = port->mgr;
1820 
1821 	drm_dp_mst_dump_port_topology_history(port);
1822 
1823 	/* There's nothing that needs locking to destroy an input port yet */
1824 	if (port->input) {
1825 		drm_dp_mst_put_port_malloc(port);
1826 		return;
1827 	}
1828 
1829 	drm_edid_free(port->cached_edid);
1830 
1831 	/*
1832 	 * we can't destroy the connector here, as we might be holding the
1833 	 * mode_config.mutex from an EDID retrieval
1834 	 */
1835 	mutex_lock(&mgr->delayed_destroy_lock);
1836 	list_add(&port->next, &mgr->destroy_port_list);
1837 	mutex_unlock(&mgr->delayed_destroy_lock);
1838 	queue_work(mgr->delayed_destroy_wq, &mgr->delayed_destroy_work);
1839 }
1840 
1841 /**
1842  * drm_dp_mst_topology_try_get_port() - Increment the topology refcount of a
1843  * port unless it's zero
1844  * @port: &struct drm_dp_mst_port to increment the topology refcount of
1845  *
1846  * Attempts to grab a topology reference to @port, if it hasn't yet been
1847  * removed from the topology (e.g. &drm_dp_mst_port.topology_kref has reached
1848  * 0). Holding a topology reference implies that a malloc reference will be
1849  * held to @port as long as the user holds the topology reference.
1850  *
1851  * Care should be taken to ensure that the user has at least one malloc
1852  * reference to @port. If you already have a topology reference to @port, you
1853  * should use drm_dp_mst_topology_get_port() instead.
1854  *
1855  * See also:
1856  * drm_dp_mst_topology_get_port()
1857  * drm_dp_mst_topology_put_port()
1858  *
1859  * Returns:
1860  * * 1: A topology reference was grabbed successfully
1861  * * 0: @port is no longer in the topology, no reference was grabbed
1862  */
1863 static int __must_check
1864 drm_dp_mst_topology_try_get_port(struct drm_dp_mst_port *port)
1865 {
1866 	int ret;
1867 
1868 	topology_ref_history_lock(port->mgr);
1869 	ret = kref_get_unless_zero(&port->topology_kref);
1870 	if (ret) {
1871 		drm_dbg(port->mgr->dev, "port %p (%d)\n", port, kref_read(&port->topology_kref));
1872 		save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_GET);
1873 	}
1874 
1875 	topology_ref_history_unlock(port->mgr);
1876 	return ret;
1877 }
1878 
1879 /**
1880  * drm_dp_mst_topology_get_port() - Increment the topology refcount of a port
1881  * @port: The &struct drm_dp_mst_port to increment the topology refcount of
1882  *
1883  * Increments &drm_dp_mst_port.topology_refcount without checking whether or
1884  * not it's already reached 0. This is only valid to use in scenarios where
1885  * you are already guaranteed to have at least one active topology reference
1886  * to @port. Otherwise, drm_dp_mst_topology_try_get_port() must be used.
1887  *
1888  * See also:
1889  * drm_dp_mst_topology_try_get_port()
1890  * drm_dp_mst_topology_put_port()
1891  */
1892 static void drm_dp_mst_topology_get_port(struct drm_dp_mst_port *port)
1893 {
1894 	topology_ref_history_lock(port->mgr);
1895 
1896 	WARN_ON(kref_read(&port->topology_kref) == 0);
1897 	kref_get(&port->topology_kref);
1898 	drm_dbg(port->mgr->dev, "port %p (%d)\n", port, kref_read(&port->topology_kref));
1899 	save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_GET);
1900 
1901 	topology_ref_history_unlock(port->mgr);
1902 }
1903 
1904 /**
1905  * drm_dp_mst_topology_put_port() - release a topology reference to a port
1906  * @port: The &struct drm_dp_mst_port to release the topology reference from
1907  *
1908  * Releases a topology reference from @port by decrementing
1909  * &drm_dp_mst_port.topology_kref.
1910  *
1911  * See also:
1912  * drm_dp_mst_topology_try_get_port()
1913  * drm_dp_mst_topology_get_port()
1914  */
1915 static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port)
1916 {
1917 	topology_ref_history_lock(port->mgr);
1918 
1919 	drm_dbg(port->mgr->dev, "port %p (%d)\n", port, kref_read(&port->topology_kref) - 1);
1920 	save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_PUT);
1921 
1922 	topology_ref_history_unlock(port->mgr);
1923 	kref_put(&port->topology_kref, drm_dp_destroy_port);
1924 }
1925 
1926 static struct drm_dp_mst_branch *
1927 drm_dp_mst_topology_get_mstb_validated_locked(struct drm_dp_mst_branch *mstb,
1928 					      struct drm_dp_mst_branch *to_find)
1929 {
1930 	struct drm_dp_mst_port *port;
1931 	struct drm_dp_mst_branch *rmstb;
1932 
1933 	if (to_find == mstb)
1934 		return mstb;
1935 
1936 	list_for_each_entry(port, &mstb->ports, next) {
1937 		if (port->mstb) {
1938 			rmstb = drm_dp_mst_topology_get_mstb_validated_locked(
1939 			    port->mstb, to_find);
1940 			if (rmstb)
1941 				return rmstb;
1942 		}
1943 	}
1944 	return NULL;
1945 }
1946 
1947 static struct drm_dp_mst_branch *
1948 drm_dp_mst_topology_get_mstb_validated(struct drm_dp_mst_topology_mgr *mgr,
1949 				       struct drm_dp_mst_branch *mstb)
1950 {
1951 	struct drm_dp_mst_branch *rmstb = NULL;
1952 
1953 	mutex_lock(&mgr->lock);
1954 	if (mgr->mst_primary) {
1955 		rmstb = drm_dp_mst_topology_get_mstb_validated_locked(
1956 		    mgr->mst_primary, mstb);
1957 
1958 		if (rmstb && !drm_dp_mst_topology_try_get_mstb(rmstb))
1959 			rmstb = NULL;
1960 	}
1961 	mutex_unlock(&mgr->lock);
1962 	return rmstb;
1963 }
1964 
1965 static struct drm_dp_mst_port *
1966 drm_dp_mst_topology_get_port_validated_locked(struct drm_dp_mst_branch *mstb,
1967 					      struct drm_dp_mst_port *to_find)
1968 {
1969 	struct drm_dp_mst_port *port, *mport;
1970 
1971 	list_for_each_entry(port, &mstb->ports, next) {
1972 		if (port == to_find)
1973 			return port;
1974 
1975 		if (port->mstb) {
1976 			mport = drm_dp_mst_topology_get_port_validated_locked(
1977 			    port->mstb, to_find);
1978 			if (mport)
1979 				return mport;
1980 		}
1981 	}
1982 	return NULL;
1983 }
1984 
1985 static struct drm_dp_mst_port *
1986 drm_dp_mst_topology_get_port_validated(struct drm_dp_mst_topology_mgr *mgr,
1987 				       struct drm_dp_mst_port *port)
1988 {
1989 	struct drm_dp_mst_port *rport = NULL;
1990 
1991 	mutex_lock(&mgr->lock);
1992 	if (mgr->mst_primary) {
1993 		rport = drm_dp_mst_topology_get_port_validated_locked(
1994 		    mgr->mst_primary, port);
1995 
1996 		if (rport && !drm_dp_mst_topology_try_get_port(rport))
1997 			rport = NULL;
1998 	}
1999 	mutex_unlock(&mgr->lock);
2000 	return rport;
2001 }
2002 
2003 static struct drm_dp_mst_port *drm_dp_get_port(struct drm_dp_mst_branch *mstb, u8 port_num)
2004 {
2005 	struct drm_dp_mst_port *port;
2006 	int ret;
2007 
2008 	list_for_each_entry(port, &mstb->ports, next) {
2009 		if (port->port_num == port_num) {
2010 			ret = drm_dp_mst_topology_try_get_port(port);
2011 			return ret ? port : NULL;
2012 		}
2013 	}
2014 
2015 	return NULL;
2016 }
2017 
2018 /*
2019  * calculate a new RAD for this MST branch device
2020  * if parent has an LCT of 2 then it has 1 nibble of RAD,
2021  * if parent has an LCT of 3 then it has 2 nibbles of RAD,
2022  */
2023 static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port,
2024 				 u8 *rad)
2025 {
2026 	int parent_lct = port->parent->lct;
2027 	int shift = 4;
2028 	int idx = (parent_lct - 1) / 2;
2029 
2030 	if (parent_lct > 1) {
2031 		memcpy(rad, port->parent->rad, idx + 1);
2032 		shift = (parent_lct % 2) ? 4 : 0;
2033 	} else
2034 		rad[0] = 0;
2035 
2036 	rad[idx] |= port->port_num << shift;
2037 	return parent_lct + 1;
2038 }
2039 
2040 static bool drm_dp_mst_is_end_device(u8 pdt, bool mcs)
2041 {
2042 	switch (pdt) {
2043 	case DP_PEER_DEVICE_DP_LEGACY_CONV:
2044 	case DP_PEER_DEVICE_SST_SINK:
2045 		return true;
2046 	case DP_PEER_DEVICE_MST_BRANCHING:
2047 		/* For sst branch device */
2048 		if (!mcs)
2049 			return true;
2050 
2051 		return false;
2052 	}
2053 	return true;
2054 }
2055 
2056 static int
2057 drm_dp_port_set_pdt(struct drm_dp_mst_port *port, u8 new_pdt,
2058 		    bool new_mcs)
2059 {
2060 	struct drm_dp_mst_topology_mgr *mgr = port->mgr;
2061 	struct drm_dp_mst_branch *mstb;
2062 	u8 rad[8], lct;
2063 	int ret = 0;
2064 
2065 	if (port->pdt == new_pdt && port->mcs == new_mcs)
2066 		return 0;
2067 
2068 	/* Teardown the old pdt, if there is one */
2069 	if (port->pdt != DP_PEER_DEVICE_NONE) {
2070 		if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) {
2071 			/*
2072 			 * If the new PDT would also have an i2c bus,
2073 			 * don't bother with reregistering it
2074 			 */
2075 			if (new_pdt != DP_PEER_DEVICE_NONE &&
2076 			    drm_dp_mst_is_end_device(new_pdt, new_mcs)) {
2077 				port->pdt = new_pdt;
2078 				port->mcs = new_mcs;
2079 				return 0;
2080 			}
2081 
2082 			/* remove i2c over sideband */
2083 			drm_dp_mst_unregister_i2c_bus(port);
2084 		} else {
2085 			mutex_lock(&mgr->lock);
2086 			drm_dp_mst_topology_put_mstb(port->mstb);
2087 			port->mstb = NULL;
2088 			mutex_unlock(&mgr->lock);
2089 		}
2090 	}
2091 
2092 	port->pdt = new_pdt;
2093 	port->mcs = new_mcs;
2094 
2095 	if (port->pdt != DP_PEER_DEVICE_NONE) {
2096 		if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) {
2097 			/* add i2c over sideband */
2098 			ret = drm_dp_mst_register_i2c_bus(port);
2099 		} else {
2100 			lct = drm_dp_calculate_rad(port, rad);
2101 			mstb = drm_dp_add_mst_branch_device(lct, rad);
2102 			if (!mstb) {
2103 				ret = -ENOMEM;
2104 				drm_err(mgr->dev, "Failed to create MSTB for port %p", port);
2105 				goto out;
2106 			}
2107 
2108 			mutex_lock(&mgr->lock);
2109 			port->mstb = mstb;
2110 			mstb->mgr = port->mgr;
2111 			mstb->port_parent = port;
2112 
2113 			/*
2114 			 * Make sure this port's memory allocation stays
2115 			 * around until its child MSTB releases it
2116 			 */
2117 			drm_dp_mst_get_port_malloc(port);
2118 			mutex_unlock(&mgr->lock);
2119 
2120 			/* And make sure we send a link address for this */
2121 			ret = 1;
2122 		}
2123 	}
2124 
2125 out:
2126 	if (ret < 0)
2127 		port->pdt = DP_PEER_DEVICE_NONE;
2128 	return ret;
2129 }
2130 
2131 /**
2132  * drm_dp_mst_dpcd_read() - read a series of bytes from the DPCD via sideband
2133  * @aux: Fake sideband AUX CH
2134  * @offset: address of the (first) register to read
2135  * @buffer: buffer to store the register values
2136  * @size: number of bytes in @buffer
2137  *
2138  * Performs the same functionality for remote devices via
2139  * sideband messaging as drm_dp_dpcd_read() does for local
2140  * devices via actual AUX CH.
2141  *
2142  * Return: Number of bytes read, or negative error code on failure.
2143  */
2144 ssize_t drm_dp_mst_dpcd_read(struct drm_dp_aux *aux,
2145 			     unsigned int offset, void *buffer, size_t size)
2146 {
2147 	struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port,
2148 						    aux);
2149 
2150 	return drm_dp_send_dpcd_read(port->mgr, port,
2151 				     offset, size, buffer);
2152 }
2153 
2154 /**
2155  * drm_dp_mst_dpcd_write() - write a series of bytes to the DPCD via sideband
2156  * @aux: Fake sideband AUX CH
2157  * @offset: address of the (first) register to write
2158  * @buffer: buffer containing the values to write
2159  * @size: number of bytes in @buffer
2160  *
2161  * Performs the same functionality for remote devices via
2162  * sideband messaging as drm_dp_dpcd_write() does for local
2163  * devices via actual AUX CH.
2164  *
2165  * Return: number of bytes written on success, negative error code on failure.
2166  */
2167 ssize_t drm_dp_mst_dpcd_write(struct drm_dp_aux *aux,
2168 			      unsigned int offset, void *buffer, size_t size)
2169 {
2170 	struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port,
2171 						    aux);
2172 
2173 	return drm_dp_send_dpcd_write(port->mgr, port,
2174 				      offset, size, buffer);
2175 }
2176 
2177 static int drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid)
2178 {
2179 	int ret = 0;
2180 
2181 	memcpy(mstb->guid, guid, 16);
2182 
2183 	if (!drm_dp_validate_guid(mstb->mgr, mstb->guid)) {
2184 		if (mstb->port_parent) {
2185 			ret = drm_dp_send_dpcd_write(mstb->mgr,
2186 						     mstb->port_parent,
2187 						     DP_GUID, 16, mstb->guid);
2188 		} else {
2189 			ret = drm_dp_dpcd_write(mstb->mgr->aux,
2190 						DP_GUID, mstb->guid, 16);
2191 		}
2192 	}
2193 
2194 	if (ret < 16 && ret > 0)
2195 		return -EPROTO;
2196 
2197 	return ret == 16 ? 0 : ret;
2198 }
2199 
2200 static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb,
2201 				int pnum,
2202 				char *proppath,
2203 				size_t proppath_size)
2204 {
2205 	int i;
2206 	char temp[8];
2207 
2208 	snprintf(proppath, proppath_size, "mst:%d", mstb->mgr->conn_base_id);
2209 	for (i = 0; i < (mstb->lct - 1); i++) {
2210 		int shift = (i % 2) ? 0 : 4;
2211 		int port_num = (mstb->rad[i / 2] >> shift) & 0xf;
2212 
2213 		snprintf(temp, sizeof(temp), "-%d", port_num);
2214 		strlcat(proppath, temp, proppath_size);
2215 	}
2216 	snprintf(temp, sizeof(temp), "-%d", pnum);
2217 	strlcat(proppath, temp, proppath_size);
2218 }
2219 
2220 /**
2221  * drm_dp_mst_connector_late_register() - Late MST connector registration
2222  * @connector: The MST connector
2223  * @port: The MST port for this connector
2224  *
2225  * Helper to register the remote aux device for this MST port. Drivers should
2226  * call this from their mst connector's late_register hook to enable MST aux
2227  * devices.
2228  *
2229  * Return: 0 on success, negative error code on failure.
2230  */
2231 int drm_dp_mst_connector_late_register(struct drm_connector *connector,
2232 				       struct drm_dp_mst_port *port)
2233 {
2234 	drm_dbg_kms(port->mgr->dev, "registering %s remote bus for %s\n",
2235 		    port->aux.name, connector->kdev->kobj.name);
2236 
2237 	port->aux.dev = connector->kdev;
2238 	return drm_dp_aux_register_devnode(&port->aux);
2239 }
2240 EXPORT_SYMBOL(drm_dp_mst_connector_late_register);
2241 
2242 /**
2243  * drm_dp_mst_connector_early_unregister() - Early MST connector unregistration
2244  * @connector: The MST connector
2245  * @port: The MST port for this connector
2246  *
2247  * Helper to unregister the remote aux device for this MST port, registered by
2248  * drm_dp_mst_connector_late_register(). Drivers should call this from their mst
2249  * connector's early_unregister hook.
2250  */
2251 void drm_dp_mst_connector_early_unregister(struct drm_connector *connector,
2252 					   struct drm_dp_mst_port *port)
2253 {
2254 	drm_dbg_kms(port->mgr->dev, "unregistering %s remote bus for %s\n",
2255 		    port->aux.name, connector->kdev->kobj.name);
2256 	drm_dp_aux_unregister_devnode(&port->aux);
2257 }
2258 EXPORT_SYMBOL(drm_dp_mst_connector_early_unregister);
2259 
2260 static void
2261 drm_dp_mst_port_add_connector(struct drm_dp_mst_branch *mstb,
2262 			      struct drm_dp_mst_port *port)
2263 {
2264 	struct drm_dp_mst_topology_mgr *mgr = port->mgr;
2265 	char proppath[255];
2266 	int ret;
2267 
2268 	build_mst_prop_path(mstb, port->port_num, proppath, sizeof(proppath));
2269 	port->connector = mgr->cbs->add_connector(mgr, port, proppath);
2270 	if (!port->connector) {
2271 		ret = -ENOMEM;
2272 		goto error;
2273 	}
2274 
2275 	if (port->pdt != DP_PEER_DEVICE_NONE &&
2276 	    drm_dp_mst_is_end_device(port->pdt, port->mcs) &&
2277 	    drm_dp_mst_port_is_logical(port))
2278 		port->cached_edid = drm_edid_read_ddc(port->connector,
2279 						      &port->aux.ddc);
2280 
2281 	drm_connector_register(port->connector);
2282 	return;
2283 
2284 error:
2285 	drm_err(mgr->dev, "Failed to create connector for port %p: %d\n", port, ret);
2286 }
2287 
2288 /*
2289  * Drop a topology reference, and unlink the port from the in-memory topology
2290  * layout
2291  */
2292 static void
2293 drm_dp_mst_topology_unlink_port(struct drm_dp_mst_topology_mgr *mgr,
2294 				struct drm_dp_mst_port *port)
2295 {
2296 	mutex_lock(&mgr->lock);
2297 	port->parent->num_ports--;
2298 	list_del(&port->next);
2299 	mutex_unlock(&mgr->lock);
2300 	drm_dp_mst_topology_put_port(port);
2301 }
2302 
2303 static struct drm_dp_mst_port *
2304 drm_dp_mst_add_port(struct drm_device *dev,
2305 		    struct drm_dp_mst_topology_mgr *mgr,
2306 		    struct drm_dp_mst_branch *mstb, u8 port_number)
2307 {
2308 	struct drm_dp_mst_port *port = kzalloc(sizeof(*port), GFP_KERNEL);
2309 
2310 	if (!port)
2311 		return NULL;
2312 
2313 	kref_init(&port->topology_kref);
2314 	kref_init(&port->malloc_kref);
2315 	port->parent = mstb;
2316 	port->port_num = port_number;
2317 	port->mgr = mgr;
2318 	port->aux.name = "DPMST";
2319 	port->aux.dev = dev->dev;
2320 	port->aux.is_remote = true;
2321 
2322 	/* initialize the MST downstream port's AUX crc work queue */
2323 	port->aux.drm_dev = dev;
2324 	drm_dp_remote_aux_init(&port->aux);
2325 
2326 	/*
2327 	 * Make sure the memory allocation for our parent branch stays
2328 	 * around until our own memory allocation is released
2329 	 */
2330 	drm_dp_mst_get_mstb_malloc(mstb);
2331 
2332 	return port;
2333 }
2334 
2335 static int
2336 drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb,
2337 				    struct drm_device *dev,
2338 				    struct drm_dp_link_addr_reply_port *port_msg)
2339 {
2340 	struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
2341 	struct drm_dp_mst_port *port;
2342 	int old_ddps = 0, ret;
2343 	u8 new_pdt = DP_PEER_DEVICE_NONE;
2344 	bool new_mcs = 0;
2345 	bool created = false, send_link_addr = false, changed = false;
2346 
2347 	port = drm_dp_get_port(mstb, port_msg->port_number);
2348 	if (!port) {
2349 		port = drm_dp_mst_add_port(dev, mgr, mstb,
2350 					   port_msg->port_number);
2351 		if (!port)
2352 			return -ENOMEM;
2353 		created = true;
2354 		changed = true;
2355 	} else if (!port->input && port_msg->input_port && port->connector) {
2356 		/* Since port->connector can't be changed here, we create a
2357 		 * new port if input_port changes from 0 to 1
2358 		 */
2359 		drm_dp_mst_topology_unlink_port(mgr, port);
2360 		drm_dp_mst_topology_put_port(port);
2361 		port = drm_dp_mst_add_port(dev, mgr, mstb,
2362 					   port_msg->port_number);
2363 		if (!port)
2364 			return -ENOMEM;
2365 		changed = true;
2366 		created = true;
2367 	} else if (port->input && !port_msg->input_port) {
2368 		changed = true;
2369 	} else if (port->connector) {
2370 		/* We're updating a port that's exposed to userspace, so do it
2371 		 * under lock
2372 		 */
2373 		drm_modeset_lock(&mgr->base.lock, NULL);
2374 
2375 		old_ddps = port->ddps;
2376 		changed = port->ddps != port_msg->ddps ||
2377 			(port->ddps &&
2378 			 (port->ldps != port_msg->legacy_device_plug_status ||
2379 			  port->dpcd_rev != port_msg->dpcd_revision ||
2380 			  port->mcs != port_msg->mcs ||
2381 			  port->pdt != port_msg->peer_device_type ||
2382 			  port->num_sdp_stream_sinks !=
2383 			  port_msg->num_sdp_stream_sinks));
2384 	}
2385 
2386 	port->input = port_msg->input_port;
2387 	if (!port->input)
2388 		new_pdt = port_msg->peer_device_type;
2389 	new_mcs = port_msg->mcs;
2390 	port->ddps = port_msg->ddps;
2391 	port->ldps = port_msg->legacy_device_plug_status;
2392 	port->dpcd_rev = port_msg->dpcd_revision;
2393 	port->num_sdp_streams = port_msg->num_sdp_streams;
2394 	port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks;
2395 
2396 	/* manage mstb port lists with mgr lock - take a reference
2397 	   for this list */
2398 	if (created) {
2399 		mutex_lock(&mgr->lock);
2400 		drm_dp_mst_topology_get_port(port);
2401 		list_add(&port->next, &mstb->ports);
2402 		mstb->num_ports++;
2403 		mutex_unlock(&mgr->lock);
2404 	}
2405 
2406 	/*
2407 	 * Reprobe PBN caps on both hotplug, and when re-probing the link
2408 	 * for our parent mstb
2409 	 */
2410 	if (old_ddps != port->ddps || !created) {
2411 		if (port->ddps && !port->input) {
2412 			ret = drm_dp_send_enum_path_resources(mgr, mstb,
2413 							      port);
2414 			if (ret == 1)
2415 				changed = true;
2416 		} else {
2417 			port->full_pbn = 0;
2418 		}
2419 	}
2420 
2421 	ret = drm_dp_port_set_pdt(port, new_pdt, new_mcs);
2422 	if (ret == 1) {
2423 		send_link_addr = true;
2424 	} else if (ret < 0) {
2425 		drm_err(dev, "Failed to change PDT on port %p: %d\n", port, ret);
2426 		goto fail;
2427 	}
2428 
2429 	/*
2430 	 * If this port wasn't just created, then we're reprobing because
2431 	 * we're coming out of suspend. In this case, always resend the link
2432 	 * address if there's an MSTB on this port
2433 	 */
2434 	if (!created && port->pdt == DP_PEER_DEVICE_MST_BRANCHING &&
2435 	    port->mcs)
2436 		send_link_addr = true;
2437 
2438 	if (port->connector)
2439 		drm_modeset_unlock(&mgr->base.lock);
2440 	else if (!port->input)
2441 		drm_dp_mst_port_add_connector(mstb, port);
2442 
2443 	if (send_link_addr && port->mstb) {
2444 		ret = drm_dp_send_link_address(mgr, port->mstb);
2445 		if (ret == 1) /* MSTB below us changed */
2446 			changed = true;
2447 		else if (ret < 0)
2448 			goto fail_put;
2449 	}
2450 
2451 	/* put reference to this port */
2452 	drm_dp_mst_topology_put_port(port);
2453 	return changed;
2454 
2455 fail:
2456 	drm_dp_mst_topology_unlink_port(mgr, port);
2457 	if (port->connector)
2458 		drm_modeset_unlock(&mgr->base.lock);
2459 fail_put:
2460 	drm_dp_mst_topology_put_port(port);
2461 	return ret;
2462 }
2463 
2464 static int
2465 drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
2466 			    struct drm_dp_connection_status_notify *conn_stat)
2467 {
2468 	struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
2469 	struct drm_dp_mst_port *port;
2470 	int old_ddps, ret;
2471 	u8 new_pdt;
2472 	bool new_mcs;
2473 	bool dowork = false, create_connector = false;
2474 
2475 	port = drm_dp_get_port(mstb, conn_stat->port_number);
2476 	if (!port)
2477 		return 0;
2478 
2479 	if (port->connector) {
2480 		if (!port->input && conn_stat->input_port) {
2481 			/*
2482 			 * We can't remove a connector from an already exposed
2483 			 * port, so just throw the port out and make sure we
2484 			 * reprobe the link address of it's parent MSTB
2485 			 */
2486 			drm_dp_mst_topology_unlink_port(mgr, port);
2487 			mstb->link_address_sent = false;
2488 			dowork = true;
2489 			goto out;
2490 		}
2491 
2492 		/* Locking is only needed if the port's exposed to userspace */
2493 		drm_modeset_lock(&mgr->base.lock, NULL);
2494 	} else if (port->input && !conn_stat->input_port) {
2495 		create_connector = true;
2496 		/* Reprobe link address so we get num_sdp_streams */
2497 		mstb->link_address_sent = false;
2498 		dowork = true;
2499 	}
2500 
2501 	old_ddps = port->ddps;
2502 	port->input = conn_stat->input_port;
2503 	port->ldps = conn_stat->legacy_device_plug_status;
2504 	port->ddps = conn_stat->displayport_device_plug_status;
2505 
2506 	if (old_ddps != port->ddps) {
2507 		if (port->ddps && !port->input)
2508 			drm_dp_send_enum_path_resources(mgr, mstb, port);
2509 		else
2510 			port->full_pbn = 0;
2511 	}
2512 
2513 	new_pdt = port->input ? DP_PEER_DEVICE_NONE : conn_stat->peer_device_type;
2514 	new_mcs = conn_stat->message_capability_status;
2515 	ret = drm_dp_port_set_pdt(port, new_pdt, new_mcs);
2516 	if (ret == 1) {
2517 		dowork = true;
2518 	} else if (ret < 0) {
2519 		drm_err(mgr->dev, "Failed to change PDT for port %p: %d\n", port, ret);
2520 		dowork = false;
2521 	}
2522 
2523 	if (port->connector)
2524 		drm_modeset_unlock(&mgr->base.lock);
2525 	else if (create_connector)
2526 		drm_dp_mst_port_add_connector(mstb, port);
2527 
2528 out:
2529 	drm_dp_mst_topology_put_port(port);
2530 	return dowork;
2531 }
2532 
2533 static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_topology_mgr *mgr,
2534 							       u8 lct, u8 *rad)
2535 {
2536 	struct drm_dp_mst_branch *mstb;
2537 	struct drm_dp_mst_port *port;
2538 	int i, ret;
2539 	/* find the port by iterating down */
2540 
2541 	mutex_lock(&mgr->lock);
2542 	mstb = mgr->mst_primary;
2543 
2544 	if (!mstb)
2545 		goto out;
2546 
2547 	for (i = 0; i < lct - 1; i++) {
2548 		int shift = (i % 2) ? 0 : 4;
2549 		int port_num = (rad[i / 2] >> shift) & 0xf;
2550 
2551 		list_for_each_entry(port, &mstb->ports, next) {
2552 			if (port->port_num == port_num) {
2553 				mstb = port->mstb;
2554 				if (!mstb) {
2555 					drm_err(mgr->dev,
2556 						"failed to lookup MSTB with lct %d, rad %02x\n",
2557 						lct, rad[0]);
2558 					goto out;
2559 				}
2560 
2561 				break;
2562 			}
2563 		}
2564 	}
2565 	ret = drm_dp_mst_topology_try_get_mstb(mstb);
2566 	if (!ret)
2567 		mstb = NULL;
2568 out:
2569 	mutex_unlock(&mgr->lock);
2570 	return mstb;
2571 }
2572 
2573 static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
2574 	struct drm_dp_mst_branch *mstb,
2575 	const uint8_t *guid)
2576 {
2577 	struct drm_dp_mst_branch *found_mstb;
2578 	struct drm_dp_mst_port *port;
2579 
2580 	if (!mstb)
2581 		return NULL;
2582 
2583 	if (memcmp(mstb->guid, guid, 16) == 0)
2584 		return mstb;
2585 
2586 
2587 	list_for_each_entry(port, &mstb->ports, next) {
2588 		found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid);
2589 
2590 		if (found_mstb)
2591 			return found_mstb;
2592 	}
2593 
2594 	return NULL;
2595 }
2596 
2597 static struct drm_dp_mst_branch *
2598 drm_dp_get_mst_branch_device_by_guid(struct drm_dp_mst_topology_mgr *mgr,
2599 				     const uint8_t *guid)
2600 {
2601 	struct drm_dp_mst_branch *mstb;
2602 	int ret;
2603 
2604 	/* find the port by iterating down */
2605 	mutex_lock(&mgr->lock);
2606 
2607 	mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid);
2608 	if (mstb) {
2609 		ret = drm_dp_mst_topology_try_get_mstb(mstb);
2610 		if (!ret)
2611 			mstb = NULL;
2612 	}
2613 
2614 	mutex_unlock(&mgr->lock);
2615 	return mstb;
2616 }
2617 
2618 static int drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
2619 					       struct drm_dp_mst_branch *mstb)
2620 {
2621 	struct drm_dp_mst_port *port;
2622 	int ret;
2623 	bool changed = false;
2624 
2625 	if (!mstb->link_address_sent) {
2626 		ret = drm_dp_send_link_address(mgr, mstb);
2627 		if (ret == 1)
2628 			changed = true;
2629 		else if (ret < 0)
2630 			return ret;
2631 	}
2632 
2633 	list_for_each_entry(port, &mstb->ports, next) {
2634 		if (port->input || !port->ddps || !port->mstb)
2635 			continue;
2636 
2637 		ret = drm_dp_check_and_send_link_address(mgr, port->mstb);
2638 		if (ret == 1)
2639 			changed = true;
2640 		else if (ret < 0)
2641 			return ret;
2642 	}
2643 
2644 	return changed;
2645 }
2646 
2647 static void drm_dp_mst_link_probe_work(struct work_struct *work)
2648 {
2649 	struct drm_dp_mst_topology_mgr *mgr =
2650 		container_of(work, struct drm_dp_mst_topology_mgr, work);
2651 	struct drm_device *dev = mgr->dev;
2652 	struct drm_dp_mst_branch *mstb;
2653 	int ret;
2654 	bool clear_payload_id_table;
2655 
2656 	mutex_lock(&mgr->probe_lock);
2657 
2658 	mutex_lock(&mgr->lock);
2659 	clear_payload_id_table = !mgr->payload_id_table_cleared;
2660 	mgr->payload_id_table_cleared = true;
2661 
2662 	mstb = mgr->mst_primary;
2663 	if (mstb) {
2664 		ret = drm_dp_mst_topology_try_get_mstb(mstb);
2665 		if (!ret)
2666 			mstb = NULL;
2667 	}
2668 	mutex_unlock(&mgr->lock);
2669 	if (!mstb) {
2670 		mutex_unlock(&mgr->probe_lock);
2671 		return;
2672 	}
2673 
2674 	/*
2675 	 * Certain branch devices seem to incorrectly report an available_pbn
2676 	 * of 0 on downstream sinks, even after clearing the
2677 	 * DP_PAYLOAD_ALLOCATE_* registers in
2678 	 * drm_dp_mst_topology_mgr_set_mst(). Namely, the CableMatters USB-C
2679 	 * 2x DP hub. Sending a CLEAR_PAYLOAD_ID_TABLE message seems to make
2680 	 * things work again.
2681 	 */
2682 	if (clear_payload_id_table) {
2683 		drm_dbg_kms(dev, "Clearing payload ID table\n");
2684 		drm_dp_send_clear_payload_id_table(mgr, mstb);
2685 	}
2686 
2687 	ret = drm_dp_check_and_send_link_address(mgr, mstb);
2688 	drm_dp_mst_topology_put_mstb(mstb);
2689 
2690 	mutex_unlock(&mgr->probe_lock);
2691 	if (ret > 0)
2692 		drm_kms_helper_hotplug_event(dev);
2693 }
2694 
2695 static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
2696 				 u8 *guid)
2697 {
2698 	u64 salt;
2699 
2700 	if (memchr_inv(guid, 0, 16))
2701 		return true;
2702 
2703 	salt = get_jiffies_64();
2704 
2705 	memcpy(&guid[0], &salt, sizeof(u64));
2706 	memcpy(&guid[8], &salt, sizeof(u64));
2707 
2708 	return false;
2709 }
2710 
2711 static void build_dpcd_read(struct drm_dp_sideband_msg_tx *msg,
2712 			    u8 port_num, u32 offset, u8 num_bytes)
2713 {
2714 	struct drm_dp_sideband_msg_req_body req;
2715 
2716 	req.req_type = DP_REMOTE_DPCD_READ;
2717 	req.u.dpcd_read.port_number = port_num;
2718 	req.u.dpcd_read.dpcd_address = offset;
2719 	req.u.dpcd_read.num_bytes = num_bytes;
2720 	drm_dp_encode_sideband_req(&req, msg);
2721 }
2722 
2723 static int drm_dp_send_sideband_msg(struct drm_dp_mst_topology_mgr *mgr,
2724 				    bool up, u8 *msg, int len)
2725 {
2726 	int ret;
2727 	int regbase = up ? DP_SIDEBAND_MSG_UP_REP_BASE : DP_SIDEBAND_MSG_DOWN_REQ_BASE;
2728 	int tosend, total, offset;
2729 	int retries = 0;
2730 
2731 retry:
2732 	total = len;
2733 	offset = 0;
2734 	do {
2735 		tosend = min3(mgr->max_dpcd_transaction_bytes, 16, total);
2736 
2737 		ret = drm_dp_dpcd_write(mgr->aux, regbase + offset,
2738 					&msg[offset],
2739 					tosend);
2740 		if (ret != tosend) {
2741 			if (ret == -EIO && retries < 5) {
2742 				retries++;
2743 				goto retry;
2744 			}
2745 			drm_dbg_kms(mgr->dev, "failed to dpcd write %d %d\n", tosend, ret);
2746 
2747 			return -EIO;
2748 		}
2749 		offset += tosend;
2750 		total -= tosend;
2751 	} while (total > 0);
2752 	return 0;
2753 }
2754 
2755 static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
2756 				  struct drm_dp_sideband_msg_tx *txmsg)
2757 {
2758 	struct drm_dp_mst_branch *mstb = txmsg->dst;
2759 	u8 req_type;
2760 
2761 	req_type = txmsg->msg[0] & 0x7f;
2762 	if (req_type == DP_CONNECTION_STATUS_NOTIFY ||
2763 		req_type == DP_RESOURCE_STATUS_NOTIFY ||
2764 		req_type == DP_CLEAR_PAYLOAD_ID_TABLE)
2765 		hdr->broadcast = 1;
2766 	else
2767 		hdr->broadcast = 0;
2768 	hdr->path_msg = txmsg->path_msg;
2769 	if (hdr->broadcast) {
2770 		hdr->lct = 1;
2771 		hdr->lcr = 6;
2772 	} else {
2773 		hdr->lct = mstb->lct;
2774 		hdr->lcr = mstb->lct - 1;
2775 	}
2776 
2777 	memcpy(hdr->rad, mstb->rad, hdr->lct / 2);
2778 
2779 	return 0;
2780 }
2781 /*
2782  * process a single block of the next message in the sideband queue
2783  */
2784 static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
2785 				   struct drm_dp_sideband_msg_tx *txmsg,
2786 				   bool up)
2787 {
2788 	u8 chunk[48];
2789 	struct drm_dp_sideband_msg_hdr hdr;
2790 	int len, space, idx, tosend;
2791 	int ret;
2792 
2793 	if (txmsg->state == DRM_DP_SIDEBAND_TX_SENT)
2794 		return 0;
2795 
2796 	memset(&hdr, 0, sizeof(struct drm_dp_sideband_msg_hdr));
2797 
2798 	if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED)
2799 		txmsg->state = DRM_DP_SIDEBAND_TX_START_SEND;
2800 
2801 	/* make hdr from dst mst */
2802 	ret = set_hdr_from_dst_qlock(&hdr, txmsg);
2803 	if (ret < 0)
2804 		return ret;
2805 
2806 	/* amount left to send in this message */
2807 	len = txmsg->cur_len - txmsg->cur_offset;
2808 
2809 	/* 48 - sideband msg size - 1 byte for data CRC, x header bytes */
2810 	space = 48 - 1 - drm_dp_calc_sb_hdr_size(&hdr);
2811 
2812 	tosend = min(len, space);
2813 	if (len == txmsg->cur_len)
2814 		hdr.somt = 1;
2815 	if (space >= len)
2816 		hdr.eomt = 1;
2817 
2818 
2819 	hdr.msg_len = tosend + 1;
2820 	drm_dp_encode_sideband_msg_hdr(&hdr, chunk, &idx);
2821 	memcpy(&chunk[idx], &txmsg->msg[txmsg->cur_offset], tosend);
2822 	/* add crc at end */
2823 	drm_dp_crc_sideband_chunk_req(&chunk[idx], tosend);
2824 	idx += tosend + 1;
2825 
2826 	ret = drm_dp_send_sideband_msg(mgr, up, chunk, idx);
2827 	if (ret) {
2828 		if (drm_debug_enabled(DRM_UT_DP)) {
2829 			struct drm_printer p = drm_dbg_printer(mgr->dev,
2830 							       DRM_UT_DP,
2831 							       DBG_PREFIX);
2832 
2833 			drm_printf(&p, "sideband msg failed to send\n");
2834 			drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
2835 		}
2836 		return ret;
2837 	}
2838 
2839 	txmsg->cur_offset += tosend;
2840 	if (txmsg->cur_offset == txmsg->cur_len) {
2841 		txmsg->state = DRM_DP_SIDEBAND_TX_SENT;
2842 		return 1;
2843 	}
2844 	return 0;
2845 }
2846 
2847 static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
2848 {
2849 	struct drm_dp_sideband_msg_tx *txmsg;
2850 	int ret;
2851 
2852 	WARN_ON(!mutex_is_locked(&mgr->qlock));
2853 
2854 	/* construct a chunk from the first msg in the tx_msg queue */
2855 	if (list_empty(&mgr->tx_msg_downq))
2856 		return;
2857 
2858 	txmsg = list_first_entry(&mgr->tx_msg_downq,
2859 				 struct drm_dp_sideband_msg_tx, next);
2860 	ret = process_single_tx_qlock(mgr, txmsg, false);
2861 	if (ret < 0) {
2862 		drm_dbg_kms(mgr->dev, "failed to send msg in q %d\n", ret);
2863 		list_del(&txmsg->next);
2864 		txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
2865 		wake_up_all(&mgr->tx_waitq);
2866 	}
2867 }
2868 
2869 static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
2870 				 struct drm_dp_sideband_msg_tx *txmsg)
2871 {
2872 	mutex_lock(&mgr->qlock);
2873 	list_add_tail(&txmsg->next, &mgr->tx_msg_downq);
2874 
2875 	if (drm_debug_enabled(DRM_UT_DP)) {
2876 		struct drm_printer p = drm_dbg_printer(mgr->dev, DRM_UT_DP,
2877 						       DBG_PREFIX);
2878 
2879 		drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
2880 	}
2881 
2882 	if (list_is_singular(&mgr->tx_msg_downq))
2883 		process_single_down_tx_qlock(mgr);
2884 	mutex_unlock(&mgr->qlock);
2885 }
2886 
2887 static void
2888 drm_dp_dump_link_address(const struct drm_dp_mst_topology_mgr *mgr,
2889 			 struct drm_dp_link_address_ack_reply *reply)
2890 {
2891 	struct drm_dp_link_addr_reply_port *port_reply;
2892 	int i;
2893 
2894 	for (i = 0; i < reply->nports; i++) {
2895 		port_reply = &reply->ports[i];
2896 		drm_dbg_kms(mgr->dev,
2897 			    "port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n",
2898 			    i,
2899 			    port_reply->input_port,
2900 			    port_reply->peer_device_type,
2901 			    port_reply->port_number,
2902 			    port_reply->dpcd_revision,
2903 			    port_reply->mcs,
2904 			    port_reply->ddps,
2905 			    port_reply->legacy_device_plug_status,
2906 			    port_reply->num_sdp_streams,
2907 			    port_reply->num_sdp_stream_sinks);
2908 	}
2909 }
2910 
2911 static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
2912 				     struct drm_dp_mst_branch *mstb)
2913 {
2914 	struct drm_dp_sideband_msg_tx *txmsg;
2915 	struct drm_dp_link_address_ack_reply *reply;
2916 	struct drm_dp_mst_port *port, *tmp;
2917 	int i, ret, port_mask = 0;
2918 	bool changed = false;
2919 
2920 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2921 	if (!txmsg)
2922 		return -ENOMEM;
2923 
2924 	txmsg->dst = mstb;
2925 	build_link_address(txmsg);
2926 
2927 	mstb->link_address_sent = true;
2928 	drm_dp_queue_down_tx(mgr, txmsg);
2929 
2930 	/* FIXME: Actually do some real error handling here */
2931 	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2932 	if (ret <= 0) {
2933 		drm_err(mgr->dev, "Sending link address failed with %d\n", ret);
2934 		goto out;
2935 	}
2936 	if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
2937 		drm_err(mgr->dev, "link address NAK received\n");
2938 		ret = -EIO;
2939 		goto out;
2940 	}
2941 
2942 	reply = &txmsg->reply.u.link_addr;
2943 	drm_dbg_kms(mgr->dev, "link address reply: %d\n", reply->nports);
2944 	drm_dp_dump_link_address(mgr, reply);
2945 
2946 	ret = drm_dp_check_mstb_guid(mstb, reply->guid);
2947 	if (ret) {
2948 		char buf[64];
2949 
2950 		drm_dp_mst_rad_to_str(mstb->rad, mstb->lct, buf, sizeof(buf));
2951 		drm_err(mgr->dev, "GUID check on %s failed: %d\n", buf, ret);
2952 		goto out;
2953 	}
2954 
2955 	for (i = 0; i < reply->nports; i++) {
2956 		port_mask |= BIT(reply->ports[i].port_number);
2957 		ret = drm_dp_mst_handle_link_address_port(mstb, mgr->dev,
2958 							  &reply->ports[i]);
2959 		if (ret == 1)
2960 			changed = true;
2961 		else if (ret < 0)
2962 			goto out;
2963 	}
2964 
2965 	/* Prune any ports that are currently a part of mstb in our in-memory
2966 	 * topology, but were not seen in this link address. Usually this
2967 	 * means that they were removed while the topology was out of sync,
2968 	 * e.g. during suspend/resume
2969 	 */
2970 	mutex_lock(&mgr->lock);
2971 	list_for_each_entry_safe(port, tmp, &mstb->ports, next) {
2972 		if (port_mask & BIT(port->port_num))
2973 			continue;
2974 
2975 		drm_dbg_kms(mgr->dev, "port %d was not in link address, removing\n",
2976 			    port->port_num);
2977 		list_del(&port->next);
2978 		drm_dp_mst_topology_put_port(port);
2979 		changed = true;
2980 	}
2981 	mutex_unlock(&mgr->lock);
2982 
2983 out:
2984 	if (ret <= 0)
2985 		mstb->link_address_sent = false;
2986 	kfree(txmsg);
2987 	return ret < 0 ? ret : changed;
2988 }
2989 
2990 static void
2991 drm_dp_send_clear_payload_id_table(struct drm_dp_mst_topology_mgr *mgr,
2992 				   struct drm_dp_mst_branch *mstb)
2993 {
2994 	struct drm_dp_sideband_msg_tx *txmsg;
2995 	int ret;
2996 
2997 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2998 	if (!txmsg)
2999 		return;
3000 
3001 	txmsg->dst = mstb;
3002 	build_clear_payload_id_table(txmsg);
3003 
3004 	drm_dp_queue_down_tx(mgr, txmsg);
3005 
3006 	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
3007 	if (ret > 0 && txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
3008 		drm_dbg_kms(mgr->dev, "clear payload table id nak received\n");
3009 
3010 	kfree(txmsg);
3011 }
3012 
3013 static int
3014 drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
3015 				struct drm_dp_mst_branch *mstb,
3016 				struct drm_dp_mst_port *port)
3017 {
3018 	struct drm_dp_enum_path_resources_ack_reply *path_res;
3019 	struct drm_dp_sideband_msg_tx *txmsg;
3020 	int ret;
3021 
3022 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3023 	if (!txmsg)
3024 		return -ENOMEM;
3025 
3026 	txmsg->dst = mstb;
3027 	build_enum_path_resources(txmsg, port->port_num);
3028 
3029 	drm_dp_queue_down_tx(mgr, txmsg);
3030 
3031 	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
3032 	if (ret > 0) {
3033 		ret = 0;
3034 		path_res = &txmsg->reply.u.path_resources;
3035 
3036 		if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
3037 			drm_dbg_kms(mgr->dev, "enum path resources nak received\n");
3038 		} else {
3039 			if (port->port_num != path_res->port_number)
3040 				DRM_ERROR("got incorrect port in response\n");
3041 
3042 			drm_dbg_kms(mgr->dev, "enum path resources %d: %d %d\n",
3043 				    path_res->port_number,
3044 				    path_res->full_payload_bw_number,
3045 				    path_res->avail_payload_bw_number);
3046 
3047 			/*
3048 			 * If something changed, make sure we send a
3049 			 * hotplug
3050 			 */
3051 			if (port->full_pbn != path_res->full_payload_bw_number ||
3052 			    port->fec_capable != path_res->fec_capable)
3053 				ret = 1;
3054 
3055 			port->full_pbn = path_res->full_payload_bw_number;
3056 			port->fec_capable = path_res->fec_capable;
3057 		}
3058 	}
3059 
3060 	kfree(txmsg);
3061 	return ret;
3062 }
3063 
3064 static struct drm_dp_mst_port *drm_dp_get_last_connected_port_to_mstb(struct drm_dp_mst_branch *mstb)
3065 {
3066 	if (!mstb->port_parent)
3067 		return NULL;
3068 
3069 	if (mstb->port_parent->mstb != mstb)
3070 		return mstb->port_parent;
3071 
3072 	return drm_dp_get_last_connected_port_to_mstb(mstb->port_parent->parent);
3073 }
3074 
3075 /*
3076  * Searches upwards in the topology starting from mstb to try to find the
3077  * closest available parent of mstb that's still connected to the rest of the
3078  * topology. This can be used in order to perform operations like releasing
3079  * payloads, where the branch device which owned the payload may no longer be
3080  * around and thus would require that the payload on the last living relative
3081  * be freed instead.
3082  */
3083 static struct drm_dp_mst_branch *
3084 drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr *mgr,
3085 					struct drm_dp_mst_branch *mstb,
3086 					int *port_num)
3087 {
3088 	struct drm_dp_mst_branch *rmstb = NULL;
3089 	struct drm_dp_mst_port *found_port;
3090 
3091 	mutex_lock(&mgr->lock);
3092 	if (!mgr->mst_primary)
3093 		goto out;
3094 
3095 	do {
3096 		found_port = drm_dp_get_last_connected_port_to_mstb(mstb);
3097 		if (!found_port)
3098 			break;
3099 
3100 		if (drm_dp_mst_topology_try_get_mstb(found_port->parent)) {
3101 			rmstb = found_port->parent;
3102 			*port_num = found_port->port_num;
3103 		} else {
3104 			/* Search again, starting from this parent */
3105 			mstb = found_port->parent;
3106 		}
3107 	} while (!rmstb);
3108 out:
3109 	mutex_unlock(&mgr->lock);
3110 	return rmstb;
3111 }
3112 
3113 static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
3114 				   struct drm_dp_mst_port *port,
3115 				   int id,
3116 				   int pbn)
3117 {
3118 	struct drm_dp_sideband_msg_tx *txmsg;
3119 	struct drm_dp_mst_branch *mstb;
3120 	int ret, port_num;
3121 	u8 sinks[DRM_DP_MAX_SDP_STREAMS];
3122 	int i;
3123 
3124 	port_num = port->port_num;
3125 	mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
3126 	if (!mstb) {
3127 		mstb = drm_dp_get_last_connected_port_and_mstb(mgr,
3128 							       port->parent,
3129 							       &port_num);
3130 
3131 		if (!mstb)
3132 			return -EINVAL;
3133 	}
3134 
3135 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3136 	if (!txmsg) {
3137 		ret = -ENOMEM;
3138 		goto fail_put;
3139 	}
3140 
3141 	for (i = 0; i < port->num_sdp_streams; i++)
3142 		sinks[i] = i;
3143 
3144 	txmsg->dst = mstb;
3145 	build_allocate_payload(txmsg, port_num,
3146 			       id,
3147 			       pbn, port->num_sdp_streams, sinks);
3148 
3149 	drm_dp_queue_down_tx(mgr, txmsg);
3150 
3151 	/*
3152 	 * FIXME: there is a small chance that between getting the last
3153 	 * connected mstb and sending the payload message, the last connected
3154 	 * mstb could also be removed from the topology. In the future, this
3155 	 * needs to be fixed by restarting the
3156 	 * drm_dp_get_last_connected_port_and_mstb() search in the event of a
3157 	 * timeout if the topology is still connected to the system.
3158 	 */
3159 	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
3160 	if (ret > 0) {
3161 		if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
3162 			ret = -EINVAL;
3163 		else
3164 			ret = 0;
3165 	}
3166 	kfree(txmsg);
3167 fail_put:
3168 	drm_dp_mst_topology_put_mstb(mstb);
3169 	return ret;
3170 }
3171 
3172 int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr,
3173 				 struct drm_dp_mst_port *port, bool power_up)
3174 {
3175 	struct drm_dp_sideband_msg_tx *txmsg;
3176 	int ret;
3177 
3178 	port = drm_dp_mst_topology_get_port_validated(mgr, port);
3179 	if (!port)
3180 		return -EINVAL;
3181 
3182 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3183 	if (!txmsg) {
3184 		drm_dp_mst_topology_put_port(port);
3185 		return -ENOMEM;
3186 	}
3187 
3188 	txmsg->dst = port->parent;
3189 	build_power_updown_phy(txmsg, port->port_num, power_up);
3190 	drm_dp_queue_down_tx(mgr, txmsg);
3191 
3192 	ret = drm_dp_mst_wait_tx_reply(port->parent, txmsg);
3193 	if (ret > 0) {
3194 		if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
3195 			ret = -EINVAL;
3196 		else
3197 			ret = 0;
3198 	}
3199 	kfree(txmsg);
3200 	drm_dp_mst_topology_put_port(port);
3201 
3202 	return ret;
3203 }
3204 EXPORT_SYMBOL(drm_dp_send_power_updown_phy);
3205 
3206 int drm_dp_send_query_stream_enc_status(struct drm_dp_mst_topology_mgr *mgr,
3207 		struct drm_dp_mst_port *port,
3208 		struct drm_dp_query_stream_enc_status_ack_reply *status)
3209 {
3210 	struct drm_dp_mst_topology_state *state;
3211 	struct drm_dp_mst_atomic_payload *payload;
3212 	struct drm_dp_sideband_msg_tx *txmsg;
3213 	u8 nonce[7];
3214 	int ret;
3215 
3216 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3217 	if (!txmsg)
3218 		return -ENOMEM;
3219 
3220 	port = drm_dp_mst_topology_get_port_validated(mgr, port);
3221 	if (!port) {
3222 		ret = -EINVAL;
3223 		goto out_get_port;
3224 	}
3225 
3226 	get_random_bytes(nonce, sizeof(nonce));
3227 
3228 	drm_modeset_lock(&mgr->base.lock, NULL);
3229 	state = to_drm_dp_mst_topology_state(mgr->base.state);
3230 	payload = drm_atomic_get_mst_payload_state(state, port);
3231 
3232 	/*
3233 	 * "Source device targets the QUERY_STREAM_ENCRYPTION_STATUS message
3234 	 *  transaction at the MST Branch device directly connected to the
3235 	 *  Source"
3236 	 */
3237 	txmsg->dst = mgr->mst_primary;
3238 
3239 	build_query_stream_enc_status(txmsg, payload->vcpi, nonce);
3240 
3241 	drm_dp_queue_down_tx(mgr, txmsg);
3242 
3243 	ret = drm_dp_mst_wait_tx_reply(mgr->mst_primary, txmsg);
3244 	if (ret < 0) {
3245 		goto out;
3246 	} else if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
3247 		drm_dbg_kms(mgr->dev, "query encryption status nak received\n");
3248 		ret = -ENXIO;
3249 		goto out;
3250 	}
3251 
3252 	ret = 0;
3253 	memcpy(status, &txmsg->reply.u.enc_status, sizeof(*status));
3254 
3255 out:
3256 	drm_modeset_unlock(&mgr->base.lock);
3257 	drm_dp_mst_topology_put_port(port);
3258 out_get_port:
3259 	kfree(txmsg);
3260 	return ret;
3261 }
3262 EXPORT_SYMBOL(drm_dp_send_query_stream_enc_status);
3263 
3264 static int drm_dp_create_payload_at_dfp(struct drm_dp_mst_topology_mgr *mgr,
3265 					struct drm_dp_mst_atomic_payload *payload)
3266 {
3267 	return drm_dp_dpcd_write_payload(mgr, payload->vcpi, payload->vc_start_slot,
3268 					 payload->time_slots);
3269 }
3270 
3271 static int drm_dp_create_payload_to_remote(struct drm_dp_mst_topology_mgr *mgr,
3272 					   struct drm_dp_mst_atomic_payload *payload)
3273 {
3274 	int ret;
3275 	struct drm_dp_mst_port *port = drm_dp_mst_topology_get_port_validated(mgr, payload->port);
3276 
3277 	if (!port)
3278 		return -EIO;
3279 
3280 	ret = drm_dp_payload_send_msg(mgr, port, payload->vcpi, payload->pbn);
3281 	drm_dp_mst_topology_put_port(port);
3282 	return ret;
3283 }
3284 
3285 static void drm_dp_destroy_payload_at_remote_and_dfp(struct drm_dp_mst_topology_mgr *mgr,
3286 						     struct drm_dp_mst_topology_state *mst_state,
3287 						     struct drm_dp_mst_atomic_payload *payload)
3288 {
3289 	drm_dbg_kms(mgr->dev, "\n");
3290 
3291 	/* it's okay for these to fail */
3292 	if (payload->payload_allocation_status == DRM_DP_MST_PAYLOAD_ALLOCATION_REMOTE) {
3293 		drm_dp_payload_send_msg(mgr, payload->port, payload->vcpi, 0);
3294 		payload->payload_allocation_status = DRM_DP_MST_PAYLOAD_ALLOCATION_DFP;
3295 	}
3296 
3297 	if (payload->payload_allocation_status == DRM_DP_MST_PAYLOAD_ALLOCATION_DFP)
3298 		drm_dp_dpcd_write_payload(mgr, payload->vcpi, payload->vc_start_slot, 0);
3299 }
3300 
3301 /**
3302  * drm_dp_add_payload_part1() - Execute payload update part 1
3303  * @mgr: Manager to use.
3304  * @mst_state: The MST atomic state
3305  * @payload: The payload to write
3306  *
3307  * Determines the starting time slot for the given payload, and programs the VCPI for this payload
3308  * into the DPCD of DPRX. After calling this, the driver should generate ACT and payload packets.
3309  *
3310  * Returns: 0 on success, error code on failure.
3311  */
3312 int drm_dp_add_payload_part1(struct drm_dp_mst_topology_mgr *mgr,
3313 			     struct drm_dp_mst_topology_state *mst_state,
3314 			     struct drm_dp_mst_atomic_payload *payload)
3315 {
3316 	struct drm_dp_mst_port *port;
3317 	int ret;
3318 
3319 	/* Update mst mgr info */
3320 	if (mgr->payload_count == 0)
3321 		mgr->next_start_slot = mst_state->start_slot;
3322 
3323 	payload->vc_start_slot = mgr->next_start_slot;
3324 
3325 	mgr->payload_count++;
3326 	mgr->next_start_slot += payload->time_slots;
3327 
3328 	payload->payload_allocation_status = DRM_DP_MST_PAYLOAD_ALLOCATION_LOCAL;
3329 
3330 	/* Allocate payload to immediate downstream facing port */
3331 	port = drm_dp_mst_topology_get_port_validated(mgr, payload->port);
3332 	if (!port) {
3333 		drm_dbg_kms(mgr->dev,
3334 			    "VCPI %d for port %p not in topology, not creating a payload to remote\n",
3335 			    payload->vcpi, payload->port);
3336 		return -EIO;
3337 	}
3338 
3339 	ret = drm_dp_create_payload_at_dfp(mgr, payload);
3340 	if (ret < 0) {
3341 		drm_dbg_kms(mgr->dev, "Failed to create MST payload for port %p: %d\n",
3342 			    payload->port, ret);
3343 		goto put_port;
3344 	}
3345 
3346 	payload->payload_allocation_status = DRM_DP_MST_PAYLOAD_ALLOCATION_DFP;
3347 
3348 put_port:
3349 	drm_dp_mst_topology_put_port(port);
3350 
3351 	return ret;
3352 }
3353 EXPORT_SYMBOL(drm_dp_add_payload_part1);
3354 
3355 /**
3356  * drm_dp_remove_payload_part1() - Remove an MST payload along the virtual channel
3357  * @mgr: Manager to use.
3358  * @mst_state: The MST atomic state
3359  * @payload: The payload to remove
3360  *
3361  * Removes a payload along the virtual channel if it was successfully allocated.
3362  * After calling this, the driver should set HW to generate ACT and then switch to new
3363  * payload allocation state.
3364  */
3365 void drm_dp_remove_payload_part1(struct drm_dp_mst_topology_mgr *mgr,
3366 				 struct drm_dp_mst_topology_state *mst_state,
3367 				 struct drm_dp_mst_atomic_payload *payload)
3368 {
3369 	/* Remove remote payload allocation */
3370 	bool send_remove = false;
3371 
3372 	mutex_lock(&mgr->lock);
3373 	send_remove = drm_dp_mst_port_downstream_of_branch(payload->port, mgr->mst_primary);
3374 	mutex_unlock(&mgr->lock);
3375 
3376 	if (send_remove)
3377 		drm_dp_destroy_payload_at_remote_and_dfp(mgr, mst_state, payload);
3378 	else
3379 		drm_dbg_kms(mgr->dev, "Payload for VCPI %d not in topology, not sending remove\n",
3380 			    payload->vcpi);
3381 
3382 	payload->payload_allocation_status = DRM_DP_MST_PAYLOAD_ALLOCATION_LOCAL;
3383 }
3384 EXPORT_SYMBOL(drm_dp_remove_payload_part1);
3385 
3386 /**
3387  * drm_dp_remove_payload_part2() - Remove an MST payload locally
3388  * @mgr: Manager to use.
3389  * @mst_state: The MST atomic state
3390  * @old_payload: The payload with its old state
3391  * @new_payload: The payload with its latest state
3392  *
3393  * Updates the starting time slots of all other payloads which would have been shifted towards
3394  * the start of the payload ID table as a result of removing a payload. Driver should call this
3395  * function whenever it removes a payload in its HW. It's independent to the result of payload
3396  * allocation/deallocation at branch devices along the virtual channel.
3397  */
3398 void drm_dp_remove_payload_part2(struct drm_dp_mst_topology_mgr *mgr,
3399 				 struct drm_dp_mst_topology_state *mst_state,
3400 				 const struct drm_dp_mst_atomic_payload *old_payload,
3401 				 struct drm_dp_mst_atomic_payload *new_payload)
3402 {
3403 	struct drm_dp_mst_atomic_payload *pos;
3404 
3405 	/* Remove local payload allocation */
3406 	list_for_each_entry(pos, &mst_state->payloads, next) {
3407 		if (pos != new_payload && pos->vc_start_slot > new_payload->vc_start_slot)
3408 			pos->vc_start_slot -= old_payload->time_slots;
3409 	}
3410 	new_payload->vc_start_slot = -1;
3411 
3412 	mgr->payload_count--;
3413 	mgr->next_start_slot -= old_payload->time_slots;
3414 
3415 	if (new_payload->delete)
3416 		drm_dp_mst_put_port_malloc(new_payload->port);
3417 
3418 	new_payload->payload_allocation_status = DRM_DP_MST_PAYLOAD_ALLOCATION_NONE;
3419 }
3420 EXPORT_SYMBOL(drm_dp_remove_payload_part2);
3421 /**
3422  * drm_dp_add_payload_part2() - Execute payload update part 2
3423  * @mgr: Manager to use.
3424  * @payload: The payload to update
3425  *
3426  * If @payload was successfully assigned a starting time slot by drm_dp_add_payload_part1(), this
3427  * function will send the sideband messages to finish allocating this payload.
3428  *
3429  * Returns: 0 on success, negative error code on failure.
3430  */
3431 int drm_dp_add_payload_part2(struct drm_dp_mst_topology_mgr *mgr,
3432 			     struct drm_dp_mst_atomic_payload *payload)
3433 {
3434 	int ret = 0;
3435 
3436 	/* Skip failed payloads */
3437 	if (payload->payload_allocation_status != DRM_DP_MST_PAYLOAD_ALLOCATION_DFP) {
3438 		drm_dbg_kms(mgr->dev, "Part 1 of payload creation for %s failed, skipping part 2\n",
3439 			    payload->port->connector->name);
3440 		return -EIO;
3441 	}
3442 
3443 	/* Allocate payload to remote end */
3444 	ret = drm_dp_create_payload_to_remote(mgr, payload);
3445 	if (ret < 0)
3446 		drm_err(mgr->dev, "Step 2 of creating MST payload for %p failed: %d\n",
3447 			payload->port, ret);
3448 	else
3449 		payload->payload_allocation_status = DRM_DP_MST_PAYLOAD_ALLOCATION_REMOTE;
3450 
3451 	return ret;
3452 }
3453 EXPORT_SYMBOL(drm_dp_add_payload_part2);
3454 
3455 static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
3456 				 struct drm_dp_mst_port *port,
3457 				 int offset, int size, u8 *bytes)
3458 {
3459 	int ret = 0;
3460 	struct drm_dp_sideband_msg_tx *txmsg;
3461 	struct drm_dp_mst_branch *mstb;
3462 
3463 	mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
3464 	if (!mstb)
3465 		return -EINVAL;
3466 
3467 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3468 	if (!txmsg) {
3469 		ret = -ENOMEM;
3470 		goto fail_put;
3471 	}
3472 
3473 	build_dpcd_read(txmsg, port->port_num, offset, size);
3474 	txmsg->dst = port->parent;
3475 
3476 	drm_dp_queue_down_tx(mgr, txmsg);
3477 
3478 	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
3479 	if (ret < 0)
3480 		goto fail_free;
3481 
3482 	if (txmsg->reply.reply_type == 1) {
3483 		drm_dbg_kms(mgr->dev, "mstb %p port %d: DPCD read on addr 0x%x for %d bytes NAKed\n",
3484 			    mstb, port->port_num, offset, size);
3485 		ret = -EIO;
3486 		goto fail_free;
3487 	}
3488 
3489 	if (txmsg->reply.u.remote_dpcd_read_ack.num_bytes != size) {
3490 		ret = -EPROTO;
3491 		goto fail_free;
3492 	}
3493 
3494 	ret = min_t(size_t, txmsg->reply.u.remote_dpcd_read_ack.num_bytes,
3495 		    size);
3496 	memcpy(bytes, txmsg->reply.u.remote_dpcd_read_ack.bytes, ret);
3497 
3498 fail_free:
3499 	kfree(txmsg);
3500 fail_put:
3501 	drm_dp_mst_topology_put_mstb(mstb);
3502 
3503 	return ret;
3504 }
3505 
3506 static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
3507 				  struct drm_dp_mst_port *port,
3508 				  int offset, int size, u8 *bytes)
3509 {
3510 	int ret;
3511 	struct drm_dp_sideband_msg_tx *txmsg;
3512 	struct drm_dp_mst_branch *mstb;
3513 
3514 	mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
3515 	if (!mstb)
3516 		return -EINVAL;
3517 
3518 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3519 	if (!txmsg) {
3520 		ret = -ENOMEM;
3521 		goto fail_put;
3522 	}
3523 
3524 	build_dpcd_write(txmsg, port->port_num, offset, size, bytes);
3525 	txmsg->dst = mstb;
3526 
3527 	drm_dp_queue_down_tx(mgr, txmsg);
3528 
3529 	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
3530 	if (ret > 0) {
3531 		if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
3532 			ret = -EIO;
3533 		else
3534 			ret = size;
3535 	}
3536 
3537 	kfree(txmsg);
3538 fail_put:
3539 	drm_dp_mst_topology_put_mstb(mstb);
3540 	return ret;
3541 }
3542 
3543 static int drm_dp_encode_up_ack_reply(struct drm_dp_sideband_msg_tx *msg, u8 req_type)
3544 {
3545 	struct drm_dp_sideband_msg_reply_body reply;
3546 
3547 	reply.reply_type = DP_SIDEBAND_REPLY_ACK;
3548 	reply.req_type = req_type;
3549 	drm_dp_encode_sideband_reply(&reply, msg);
3550 	return 0;
3551 }
3552 
3553 static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
3554 				    struct drm_dp_mst_branch *mstb,
3555 				    int req_type, bool broadcast)
3556 {
3557 	struct drm_dp_sideband_msg_tx *txmsg;
3558 
3559 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3560 	if (!txmsg)
3561 		return -ENOMEM;
3562 
3563 	txmsg->dst = mstb;
3564 	drm_dp_encode_up_ack_reply(txmsg, req_type);
3565 
3566 	mutex_lock(&mgr->qlock);
3567 	/* construct a chunk from the first msg in the tx_msg queue */
3568 	process_single_tx_qlock(mgr, txmsg, true);
3569 	mutex_unlock(&mgr->qlock);
3570 
3571 	kfree(txmsg);
3572 	return 0;
3573 }
3574 
3575 /**
3576  * drm_dp_get_vc_payload_bw - get the VC payload BW for an MST link
3577  * @mgr: The &drm_dp_mst_topology_mgr to use
3578  * @link_rate: link rate in 10kbits/s units
3579  * @link_lane_count: lane count
3580  *
3581  * Calculate the total bandwidth of a MultiStream Transport link. The returned
3582  * value is in units of PBNs/(timeslots/1 MTP). This value can be used to
3583  * convert the number of PBNs required for a given stream to the number of
3584  * timeslots this stream requires in each MTP.
3585  *
3586  * Returns the BW / timeslot value in 20.12 fixed point format.
3587  */
3588 fixed20_12 drm_dp_get_vc_payload_bw(const struct drm_dp_mst_topology_mgr *mgr,
3589 				    int link_rate, int link_lane_count)
3590 {
3591 	int ch_coding_efficiency =
3592 		drm_dp_bw_channel_coding_efficiency(drm_dp_is_uhbr_rate(link_rate));
3593 	fixed20_12 ret;
3594 
3595 	if (link_rate == 0 || link_lane_count == 0)
3596 		drm_dbg_kms(mgr->dev, "invalid link rate/lane count: (%d / %d)\n",
3597 			    link_rate, link_lane_count);
3598 
3599 	/* See DP v2.0 2.6.4.2, 2.7.6.3 VCPayload_Bandwidth_for_OneTimeSlotPer_MTP_Allocation */
3600 	ret.full = DIV_ROUND_DOWN_ULL(mul_u32_u32(link_rate * link_lane_count,
3601 						  ch_coding_efficiency),
3602 				      (1000000ULL * 8 * 5400) >> 12);
3603 
3604 	return ret;
3605 }
3606 EXPORT_SYMBOL(drm_dp_get_vc_payload_bw);
3607 
3608 /**
3609  * drm_dp_read_mst_cap() - Read the sink's MST mode capability
3610  * @aux: The DP AUX channel to use
3611  * @dpcd: A cached copy of the DPCD capabilities for this sink
3612  *
3613  * Returns: enum drm_dp_mst_mode to indicate MST mode capability
3614  */
3615 enum drm_dp_mst_mode drm_dp_read_mst_cap(struct drm_dp_aux *aux,
3616 					 const u8 dpcd[DP_RECEIVER_CAP_SIZE])
3617 {
3618 	u8 mstm_cap;
3619 
3620 	if (dpcd[DP_DPCD_REV] < DP_DPCD_REV_12)
3621 		return DRM_DP_SST;
3622 
3623 	if (drm_dp_dpcd_readb(aux, DP_MSTM_CAP, &mstm_cap) != 1)
3624 		return DRM_DP_SST;
3625 
3626 	if (mstm_cap & DP_MST_CAP)
3627 		return DRM_DP_MST;
3628 
3629 	if (mstm_cap & DP_SINGLE_STREAM_SIDEBAND_MSG)
3630 		return DRM_DP_SST_SIDEBAND_MSG;
3631 
3632 	return DRM_DP_SST;
3633 }
3634 EXPORT_SYMBOL(drm_dp_read_mst_cap);
3635 
3636 /**
3637  * drm_dp_mst_topology_mgr_set_mst() - Set the MST state for a topology manager
3638  * @mgr: manager to set state for
3639  * @mst_state: true to enable MST on this connector - false to disable.
3640  *
3641  * This is called by the driver when it detects an MST capable device plugged
3642  * into a DP MST capable port, or when a DP MST capable device is unplugged.
3643  */
3644 int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state)
3645 {
3646 	int ret = 0;
3647 	struct drm_dp_mst_branch *mstb = NULL;
3648 
3649 	mutex_lock(&mgr->lock);
3650 	if (mst_state == mgr->mst_state)
3651 		goto out_unlock;
3652 
3653 	mgr->mst_state = mst_state;
3654 	/* set the device into MST mode */
3655 	if (mst_state) {
3656 		WARN_ON(mgr->mst_primary);
3657 
3658 		/* get dpcd info */
3659 		ret = drm_dp_read_dpcd_caps(mgr->aux, mgr->dpcd);
3660 		if (ret < 0) {
3661 			drm_dbg_kms(mgr->dev, "%s: failed to read DPCD, ret %d\n",
3662 				    mgr->aux->name, ret);
3663 			goto out_unlock;
3664 		}
3665 
3666 		/* add initial branch device at LCT 1 */
3667 		mstb = drm_dp_add_mst_branch_device(1, NULL);
3668 		if (mstb == NULL) {
3669 			ret = -ENOMEM;
3670 			goto out_unlock;
3671 		}
3672 		mstb->mgr = mgr;
3673 
3674 		/* give this the main reference */
3675 		mgr->mst_primary = mstb;
3676 		drm_dp_mst_topology_get_mstb(mgr->mst_primary);
3677 
3678 		ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
3679 					 DP_MST_EN |
3680 					 DP_UP_REQ_EN |
3681 					 DP_UPSTREAM_IS_SRC);
3682 		if (ret < 0)
3683 			goto out_unlock;
3684 
3685 		/* Write reset payload */
3686 		drm_dp_dpcd_write_payload(mgr, 0, 0, 0x3f);
3687 
3688 		queue_work(system_long_wq, &mgr->work);
3689 
3690 		ret = 0;
3691 	} else {
3692 		/* disable MST on the device */
3693 		mstb = mgr->mst_primary;
3694 		mgr->mst_primary = NULL;
3695 		/* this can fail if the device is gone */
3696 		drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0);
3697 		ret = 0;
3698 		mgr->payload_id_table_cleared = false;
3699 
3700 		memset(&mgr->down_rep_recv, 0, sizeof(mgr->down_rep_recv));
3701 		memset(&mgr->up_req_recv, 0, sizeof(mgr->up_req_recv));
3702 	}
3703 
3704 out_unlock:
3705 	mutex_unlock(&mgr->lock);
3706 	if (mstb)
3707 		drm_dp_mst_topology_put_mstb(mstb);
3708 	return ret;
3709 
3710 }
3711 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_set_mst);
3712 
3713 static void
3714 drm_dp_mst_topology_mgr_invalidate_mstb(struct drm_dp_mst_branch *mstb)
3715 {
3716 	struct drm_dp_mst_port *port;
3717 
3718 	/* The link address will need to be re-sent on resume */
3719 	mstb->link_address_sent = false;
3720 
3721 	list_for_each_entry(port, &mstb->ports, next)
3722 		if (port->mstb)
3723 			drm_dp_mst_topology_mgr_invalidate_mstb(port->mstb);
3724 }
3725 
3726 /**
3727  * drm_dp_mst_topology_mgr_suspend() - suspend the MST manager
3728  * @mgr: manager to suspend
3729  *
3730  * This function tells the MST device that we can't handle UP messages
3731  * anymore. This should stop it from sending any since we are suspended.
3732  */
3733 void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr)
3734 {
3735 	mutex_lock(&mgr->lock);
3736 	drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
3737 			   DP_MST_EN | DP_UPSTREAM_IS_SRC);
3738 	mutex_unlock(&mgr->lock);
3739 	flush_work(&mgr->up_req_work);
3740 	flush_work(&mgr->work);
3741 	flush_work(&mgr->delayed_destroy_work);
3742 
3743 	mutex_lock(&mgr->lock);
3744 	if (mgr->mst_state && mgr->mst_primary)
3745 		drm_dp_mst_topology_mgr_invalidate_mstb(mgr->mst_primary);
3746 	mutex_unlock(&mgr->lock);
3747 }
3748 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend);
3749 
3750 /**
3751  * drm_dp_mst_topology_mgr_resume() - resume the MST manager
3752  * @mgr: manager to resume
3753  * @sync: whether or not to perform topology reprobing synchronously
3754  *
3755  * This will fetch DPCD and see if the device is still there,
3756  * if it is, it will rewrite the MSTM control bits, and return.
3757  *
3758  * If the device fails this returns -1, and the driver should do
3759  * a full MST reprobe, in case we were undocked.
3760  *
3761  * During system resume (where it is assumed that the driver will be calling
3762  * drm_atomic_helper_resume()) this function should be called beforehand with
3763  * @sync set to true. In contexts like runtime resume where the driver is not
3764  * expected to be calling drm_atomic_helper_resume(), this function should be
3765  * called with @sync set to false in order to avoid deadlocking.
3766  *
3767  * Returns: -1 if the MST topology was removed while we were suspended, 0
3768  * otherwise.
3769  */
3770 int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr,
3771 				   bool sync)
3772 {
3773 	int ret;
3774 	u8 guid[16];
3775 
3776 	mutex_lock(&mgr->lock);
3777 	if (!mgr->mst_primary)
3778 		goto out_fail;
3779 
3780 	if (drm_dp_read_dpcd_caps(mgr->aux, mgr->dpcd) < 0) {
3781 		drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n");
3782 		goto out_fail;
3783 	}
3784 
3785 	ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
3786 				 DP_MST_EN |
3787 				 DP_UP_REQ_EN |
3788 				 DP_UPSTREAM_IS_SRC);
3789 	if (ret < 0) {
3790 		drm_dbg_kms(mgr->dev, "mst write failed - undocked during suspend?\n");
3791 		goto out_fail;
3792 	}
3793 
3794 	/* Some hubs forget their guids after they resume */
3795 	ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
3796 	if (ret != 16) {
3797 		drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n");
3798 		goto out_fail;
3799 	}
3800 
3801 	ret = drm_dp_check_mstb_guid(mgr->mst_primary, guid);
3802 	if (ret) {
3803 		drm_dbg_kms(mgr->dev, "check mstb failed - undocked during suspend?\n");
3804 		goto out_fail;
3805 	}
3806 
3807 	/*
3808 	 * For the final step of resuming the topology, we need to bring the
3809 	 * state of our in-memory topology back into sync with reality. So,
3810 	 * restart the probing process as if we're probing a new hub
3811 	 */
3812 	queue_work(system_long_wq, &mgr->work);
3813 	mutex_unlock(&mgr->lock);
3814 
3815 	if (sync) {
3816 		drm_dbg_kms(mgr->dev,
3817 			    "Waiting for link probe work to finish re-syncing topology...\n");
3818 		flush_work(&mgr->work);
3819 	}
3820 
3821 	return 0;
3822 
3823 out_fail:
3824 	mutex_unlock(&mgr->lock);
3825 	return -1;
3826 }
3827 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume);
3828 
3829 static bool
3830 drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up,
3831 		      struct drm_dp_mst_branch **mstb)
3832 {
3833 	int len;
3834 	u8 replyblock[32];
3835 	int replylen, curreply;
3836 	int ret;
3837 	u8 hdrlen;
3838 	struct drm_dp_sideband_msg_hdr hdr;
3839 	struct drm_dp_sideband_msg_rx *msg =
3840 		up ? &mgr->up_req_recv : &mgr->down_rep_recv;
3841 	int basereg = up ? DP_SIDEBAND_MSG_UP_REQ_BASE :
3842 			   DP_SIDEBAND_MSG_DOWN_REP_BASE;
3843 
3844 	if (!up)
3845 		*mstb = NULL;
3846 
3847 	len = min(mgr->max_dpcd_transaction_bytes, 16);
3848 	ret = drm_dp_dpcd_read(mgr->aux, basereg, replyblock, len);
3849 	if (ret != len) {
3850 		drm_dbg_kms(mgr->dev, "failed to read DPCD down rep %d %d\n", len, ret);
3851 		return false;
3852 	}
3853 
3854 	ret = drm_dp_decode_sideband_msg_hdr(mgr, &hdr, replyblock, len, &hdrlen);
3855 	if (ret == false) {
3856 		print_hex_dump(KERN_DEBUG, "failed hdr", DUMP_PREFIX_NONE, 16,
3857 			       1, replyblock, len, false);
3858 		drm_dbg_kms(mgr->dev, "ERROR: failed header\n");
3859 		return false;
3860 	}
3861 
3862 	if (!up) {
3863 		/* Caller is responsible for giving back this reference */
3864 		*mstb = drm_dp_get_mst_branch_device(mgr, hdr.lct, hdr.rad);
3865 		if (!*mstb) {
3866 			drm_dbg_kms(mgr->dev, "Got MST reply from unknown device %d\n", hdr.lct);
3867 			return false;
3868 		}
3869 	}
3870 
3871 	if (!drm_dp_sideband_msg_set_header(msg, &hdr, hdrlen)) {
3872 		drm_dbg_kms(mgr->dev, "sideband msg set header failed %d\n", replyblock[0]);
3873 		return false;
3874 	}
3875 
3876 	replylen = min(msg->curchunk_len, (u8)(len - hdrlen));
3877 	ret = drm_dp_sideband_append_payload(msg, replyblock + hdrlen, replylen);
3878 	if (!ret) {
3879 		drm_dbg_kms(mgr->dev, "sideband msg build failed %d\n", replyblock[0]);
3880 		return false;
3881 	}
3882 
3883 	replylen = msg->curchunk_len + msg->curchunk_hdrlen - len;
3884 	curreply = len;
3885 	while (replylen > 0) {
3886 		len = min3(replylen, mgr->max_dpcd_transaction_bytes, 16);
3887 		ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply,
3888 				    replyblock, len);
3889 		if (ret != len) {
3890 			drm_dbg_kms(mgr->dev, "failed to read a chunk (len %d, ret %d)\n",
3891 				    len, ret);
3892 			return false;
3893 		}
3894 
3895 		ret = drm_dp_sideband_append_payload(msg, replyblock, len);
3896 		if (!ret) {
3897 			drm_dbg_kms(mgr->dev, "failed to build sideband msg\n");
3898 			return false;
3899 		}
3900 
3901 		curreply += len;
3902 		replylen -= len;
3903 	}
3904 	return true;
3905 }
3906 
3907 static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
3908 {
3909 	struct drm_dp_sideband_msg_tx *txmsg;
3910 	struct drm_dp_mst_branch *mstb = NULL;
3911 	struct drm_dp_sideband_msg_rx *msg = &mgr->down_rep_recv;
3912 
3913 	if (!drm_dp_get_one_sb_msg(mgr, false, &mstb))
3914 		goto out_clear_reply;
3915 
3916 	/* Multi-packet message transmission, don't clear the reply */
3917 	if (!msg->have_eomt)
3918 		goto out;
3919 
3920 	/* find the message */
3921 	mutex_lock(&mgr->qlock);
3922 	txmsg = list_first_entry_or_null(&mgr->tx_msg_downq,
3923 					 struct drm_dp_sideband_msg_tx, next);
3924 	mutex_unlock(&mgr->qlock);
3925 
3926 	/* Were we actually expecting a response, and from this mstb? */
3927 	if (!txmsg || txmsg->dst != mstb) {
3928 		struct drm_dp_sideband_msg_hdr *hdr;
3929 
3930 		hdr = &msg->initial_hdr;
3931 		drm_dbg_kms(mgr->dev, "Got MST reply with no msg %p %d %d %02x %02x\n",
3932 			    mstb, hdr->seqno, hdr->lct, hdr->rad[0], msg->msg[0]);
3933 		goto out_clear_reply;
3934 	}
3935 
3936 	drm_dp_sideband_parse_reply(mgr, msg, &txmsg->reply);
3937 
3938 	if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
3939 		drm_dbg_kms(mgr->dev,
3940 			    "Got NAK reply: req 0x%02x (%s), reason 0x%02x (%s), nak data 0x%02x\n",
3941 			    txmsg->reply.req_type,
3942 			    drm_dp_mst_req_type_str(txmsg->reply.req_type),
3943 			    txmsg->reply.u.nak.reason,
3944 			    drm_dp_mst_nak_reason_str(txmsg->reply.u.nak.reason),
3945 			    txmsg->reply.u.nak.nak_data);
3946 	}
3947 
3948 	memset(msg, 0, sizeof(struct drm_dp_sideband_msg_rx));
3949 	drm_dp_mst_topology_put_mstb(mstb);
3950 
3951 	mutex_lock(&mgr->qlock);
3952 	txmsg->state = DRM_DP_SIDEBAND_TX_RX;
3953 	list_del(&txmsg->next);
3954 	mutex_unlock(&mgr->qlock);
3955 
3956 	wake_up_all(&mgr->tx_waitq);
3957 
3958 	return 0;
3959 
3960 out_clear_reply:
3961 	memset(msg, 0, sizeof(struct drm_dp_sideband_msg_rx));
3962 out:
3963 	if (mstb)
3964 		drm_dp_mst_topology_put_mstb(mstb);
3965 
3966 	return 0;
3967 }
3968 
3969 static inline bool
3970 drm_dp_mst_process_up_req(struct drm_dp_mst_topology_mgr *mgr,
3971 			  struct drm_dp_pending_up_req *up_req)
3972 {
3973 	struct drm_dp_mst_branch *mstb = NULL;
3974 	struct drm_dp_sideband_msg_req_body *msg = &up_req->msg;
3975 	struct drm_dp_sideband_msg_hdr *hdr = &up_req->hdr;
3976 	bool hotplug = false, dowork = false;
3977 
3978 	if (hdr->broadcast) {
3979 		const u8 *guid = NULL;
3980 
3981 		if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY)
3982 			guid = msg->u.conn_stat.guid;
3983 		else if (msg->req_type == DP_RESOURCE_STATUS_NOTIFY)
3984 			guid = msg->u.resource_stat.guid;
3985 
3986 		if (guid)
3987 			mstb = drm_dp_get_mst_branch_device_by_guid(mgr, guid);
3988 	} else {
3989 		mstb = drm_dp_get_mst_branch_device(mgr, hdr->lct, hdr->rad);
3990 	}
3991 
3992 	if (!mstb) {
3993 		drm_dbg_kms(mgr->dev, "Got MST reply from unknown device %d\n", hdr->lct);
3994 		return false;
3995 	}
3996 
3997 	/* TODO: Add missing handler for DP_RESOURCE_STATUS_NOTIFY events */
3998 	if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY) {
3999 		dowork = drm_dp_mst_handle_conn_stat(mstb, &msg->u.conn_stat);
4000 		hotplug = true;
4001 	}
4002 
4003 	drm_dp_mst_topology_put_mstb(mstb);
4004 
4005 	if (dowork)
4006 		queue_work(system_long_wq, &mgr->work);
4007 	return hotplug;
4008 }
4009 
4010 static void drm_dp_mst_up_req_work(struct work_struct *work)
4011 {
4012 	struct drm_dp_mst_topology_mgr *mgr =
4013 		container_of(work, struct drm_dp_mst_topology_mgr,
4014 			     up_req_work);
4015 	struct drm_dp_pending_up_req *up_req;
4016 	bool send_hotplug = false;
4017 
4018 	mutex_lock(&mgr->probe_lock);
4019 	while (true) {
4020 		mutex_lock(&mgr->up_req_lock);
4021 		up_req = list_first_entry_or_null(&mgr->up_req_list,
4022 						  struct drm_dp_pending_up_req,
4023 						  next);
4024 		if (up_req)
4025 			list_del(&up_req->next);
4026 		mutex_unlock(&mgr->up_req_lock);
4027 
4028 		if (!up_req)
4029 			break;
4030 
4031 		send_hotplug |= drm_dp_mst_process_up_req(mgr, up_req);
4032 		kfree(up_req);
4033 	}
4034 	mutex_unlock(&mgr->probe_lock);
4035 
4036 	if (send_hotplug)
4037 		drm_kms_helper_hotplug_event(mgr->dev);
4038 }
4039 
4040 static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
4041 {
4042 	struct drm_dp_pending_up_req *up_req;
4043 
4044 	if (!drm_dp_get_one_sb_msg(mgr, true, NULL))
4045 		goto out;
4046 
4047 	if (!mgr->up_req_recv.have_eomt)
4048 		return 0;
4049 
4050 	up_req = kzalloc(sizeof(*up_req), GFP_KERNEL);
4051 	if (!up_req)
4052 		return -ENOMEM;
4053 
4054 	INIT_LIST_HEAD(&up_req->next);
4055 
4056 	drm_dp_sideband_parse_req(mgr, &mgr->up_req_recv, &up_req->msg);
4057 
4058 	if (up_req->msg.req_type != DP_CONNECTION_STATUS_NOTIFY &&
4059 	    up_req->msg.req_type != DP_RESOURCE_STATUS_NOTIFY) {
4060 		drm_dbg_kms(mgr->dev, "Received unknown up req type, ignoring: %x\n",
4061 			    up_req->msg.req_type);
4062 		kfree(up_req);
4063 		goto out;
4064 	}
4065 
4066 	drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, up_req->msg.req_type,
4067 				 false);
4068 
4069 	if (up_req->msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
4070 		const struct drm_dp_connection_status_notify *conn_stat =
4071 			&up_req->msg.u.conn_stat;
4072 
4073 		drm_dbg_kms(mgr->dev, "Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n",
4074 			    conn_stat->port_number,
4075 			    conn_stat->legacy_device_plug_status,
4076 			    conn_stat->displayport_device_plug_status,
4077 			    conn_stat->message_capability_status,
4078 			    conn_stat->input_port,
4079 			    conn_stat->peer_device_type);
4080 	} else if (up_req->msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
4081 		const struct drm_dp_resource_status_notify *res_stat =
4082 			&up_req->msg.u.resource_stat;
4083 
4084 		drm_dbg_kms(mgr->dev, "Got RSN: pn: %d avail_pbn %d\n",
4085 			    res_stat->port_number,
4086 			    res_stat->available_pbn);
4087 	}
4088 
4089 	up_req->hdr = mgr->up_req_recv.initial_hdr;
4090 	mutex_lock(&mgr->up_req_lock);
4091 	list_add_tail(&up_req->next, &mgr->up_req_list);
4092 	mutex_unlock(&mgr->up_req_lock);
4093 	queue_work(system_long_wq, &mgr->up_req_work);
4094 
4095 out:
4096 	memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
4097 	return 0;
4098 }
4099 
4100 /**
4101  * drm_dp_mst_hpd_irq_handle_event() - MST hotplug IRQ handle MST event
4102  * @mgr: manager to notify irq for.
4103  * @esi: 4 bytes from SINK_COUNT_ESI
4104  * @ack: 4 bytes used to ack events starting from SINK_COUNT_ESI
4105  * @handled: whether the hpd interrupt was consumed or not
4106  *
4107  * This should be called from the driver when it detects a HPD IRQ,
4108  * along with the value of the DEVICE_SERVICE_IRQ_VECTOR_ESI0. The
4109  * topology manager will process the sideband messages received
4110  * as indicated in the DEVICE_SERVICE_IRQ_VECTOR_ESI0 and set the
4111  * corresponding flags that Driver has to ack the DP receiver later.
4112  *
4113  * Note that driver shall also call
4114  * drm_dp_mst_hpd_irq_send_new_request() if the 'handled' is set
4115  * after calling this function, to try to kick off a new request in
4116  * the queue if the previous message transaction is completed.
4117  *
4118  * See also:
4119  * drm_dp_mst_hpd_irq_send_new_request()
4120  */
4121 int drm_dp_mst_hpd_irq_handle_event(struct drm_dp_mst_topology_mgr *mgr, const u8 *esi,
4122 				    u8 *ack, bool *handled)
4123 {
4124 	int ret = 0;
4125 	int sc;
4126 	*handled = false;
4127 	sc = DP_GET_SINK_COUNT(esi[0]);
4128 
4129 	if (sc != mgr->sink_count) {
4130 		mgr->sink_count = sc;
4131 		*handled = true;
4132 	}
4133 
4134 	if (esi[1] & DP_DOWN_REP_MSG_RDY) {
4135 		ret = drm_dp_mst_handle_down_rep(mgr);
4136 		*handled = true;
4137 		ack[1] |= DP_DOWN_REP_MSG_RDY;
4138 	}
4139 
4140 	if (esi[1] & DP_UP_REQ_MSG_RDY) {
4141 		ret |= drm_dp_mst_handle_up_req(mgr);
4142 		*handled = true;
4143 		ack[1] |= DP_UP_REQ_MSG_RDY;
4144 	}
4145 
4146 	return ret;
4147 }
4148 EXPORT_SYMBOL(drm_dp_mst_hpd_irq_handle_event);
4149 
4150 /**
4151  * drm_dp_mst_hpd_irq_send_new_request() - MST hotplug IRQ kick off new request
4152  * @mgr: manager to notify irq for.
4153  *
4154  * This should be called from the driver when mst irq event is handled
4155  * and acked. Note that new down request should only be sent when
4156  * previous message transaction is completed. Source is not supposed to generate
4157  * interleaved message transactions.
4158  */
4159 void drm_dp_mst_hpd_irq_send_new_request(struct drm_dp_mst_topology_mgr *mgr)
4160 {
4161 	struct drm_dp_sideband_msg_tx *txmsg;
4162 	bool kick = true;
4163 
4164 	mutex_lock(&mgr->qlock);
4165 	txmsg = list_first_entry_or_null(&mgr->tx_msg_downq,
4166 					 struct drm_dp_sideband_msg_tx, next);
4167 	/* If last transaction is not completed yet*/
4168 	if (!txmsg ||
4169 	    txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND ||
4170 	    txmsg->state == DRM_DP_SIDEBAND_TX_SENT)
4171 		kick = false;
4172 	mutex_unlock(&mgr->qlock);
4173 
4174 	if (kick)
4175 		drm_dp_mst_kick_tx(mgr);
4176 }
4177 EXPORT_SYMBOL(drm_dp_mst_hpd_irq_send_new_request);
4178 /**
4179  * drm_dp_mst_detect_port() - get connection status for an MST port
4180  * @connector: DRM connector for this port
4181  * @ctx: The acquisition context to use for grabbing locks
4182  * @mgr: manager for this port
4183  * @port: pointer to a port
4184  *
4185  * This returns the current connection state for a port.
4186  */
4187 int
4188 drm_dp_mst_detect_port(struct drm_connector *connector,
4189 		       struct drm_modeset_acquire_ctx *ctx,
4190 		       struct drm_dp_mst_topology_mgr *mgr,
4191 		       struct drm_dp_mst_port *port)
4192 {
4193 	int ret;
4194 
4195 	/* we need to search for the port in the mgr in case it's gone */
4196 	port = drm_dp_mst_topology_get_port_validated(mgr, port);
4197 	if (!port)
4198 		return connector_status_disconnected;
4199 
4200 	ret = drm_modeset_lock(&mgr->base.lock, ctx);
4201 	if (ret)
4202 		goto out;
4203 
4204 	ret = connector_status_disconnected;
4205 
4206 	if (!port->ddps)
4207 		goto out;
4208 
4209 	switch (port->pdt) {
4210 	case DP_PEER_DEVICE_NONE:
4211 		break;
4212 	case DP_PEER_DEVICE_MST_BRANCHING:
4213 		if (!port->mcs)
4214 			ret = connector_status_connected;
4215 		break;
4216 
4217 	case DP_PEER_DEVICE_SST_SINK:
4218 		ret = connector_status_connected;
4219 		/* for logical ports - cache the EDID */
4220 		if (drm_dp_mst_port_is_logical(port) && !port->cached_edid)
4221 			port->cached_edid = drm_edid_read_ddc(connector, &port->aux.ddc);
4222 		break;
4223 	case DP_PEER_DEVICE_DP_LEGACY_CONV:
4224 		if (port->ldps)
4225 			ret = connector_status_connected;
4226 		break;
4227 	}
4228 out:
4229 	drm_dp_mst_topology_put_port(port);
4230 	return ret;
4231 }
4232 EXPORT_SYMBOL(drm_dp_mst_detect_port);
4233 
4234 /**
4235  * drm_dp_mst_edid_read() - get EDID for an MST port
4236  * @connector: toplevel connector to get EDID for
4237  * @mgr: manager for this port
4238  * @port: unverified pointer to a port.
4239  *
4240  * This returns an EDID for the port connected to a connector,
4241  * It validates the pointer still exists so the caller doesn't require a
4242  * reference.
4243  */
4244 const struct drm_edid *drm_dp_mst_edid_read(struct drm_connector *connector,
4245 					    struct drm_dp_mst_topology_mgr *mgr,
4246 					    struct drm_dp_mst_port *port)
4247 {
4248 	const struct drm_edid *drm_edid;
4249 
4250 	/* we need to search for the port in the mgr in case it's gone */
4251 	port = drm_dp_mst_topology_get_port_validated(mgr, port);
4252 	if (!port)
4253 		return NULL;
4254 
4255 	if (port->cached_edid)
4256 		drm_edid = drm_edid_dup(port->cached_edid);
4257 	else
4258 		drm_edid = drm_edid_read_ddc(connector, &port->aux.ddc);
4259 
4260 	drm_dp_mst_topology_put_port(port);
4261 
4262 	return drm_edid;
4263 }
4264 EXPORT_SYMBOL(drm_dp_mst_edid_read);
4265 
4266 /**
4267  * drm_dp_mst_get_edid() - get EDID for an MST port
4268  * @connector: toplevel connector to get EDID for
4269  * @mgr: manager for this port
4270  * @port: unverified pointer to a port.
4271  *
4272  * This function is deprecated; please use drm_dp_mst_edid_read() instead.
4273  *
4274  * This returns an EDID for the port connected to a connector,
4275  * It validates the pointer still exists so the caller doesn't require a
4276  * reference.
4277  */
4278 struct edid *drm_dp_mst_get_edid(struct drm_connector *connector,
4279 				 struct drm_dp_mst_topology_mgr *mgr,
4280 				 struct drm_dp_mst_port *port)
4281 {
4282 	const struct drm_edid *drm_edid;
4283 	struct edid *edid;
4284 
4285 	drm_edid = drm_dp_mst_edid_read(connector, mgr, port);
4286 
4287 	edid = drm_edid_duplicate(drm_edid_raw(drm_edid));
4288 
4289 	drm_edid_free(drm_edid);
4290 
4291 	return edid;
4292 }
4293 EXPORT_SYMBOL(drm_dp_mst_get_edid);
4294 
4295 /**
4296  * drm_dp_atomic_find_time_slots() - Find and add time slots to the state
4297  * @state: global atomic state
4298  * @mgr: MST topology manager for the port
4299  * @port: port to find time slots for
4300  * @pbn: bandwidth required for the mode in PBN
4301  *
4302  * Allocates time slots to @port, replacing any previous time slot allocations it may
4303  * have had. Any atomic drivers which support MST must call this function in
4304  * their &drm_encoder_helper_funcs.atomic_check() callback unconditionally to
4305  * change the current time slot allocation for the new state, and ensure the MST
4306  * atomic state is added whenever the state of payloads in the topology changes.
4307  *
4308  * Allocations set by this function are not checked against the bandwidth
4309  * restraints of @mgr until the driver calls drm_dp_mst_atomic_check().
4310  *
4311  * Additionally, it is OK to call this function multiple times on the same
4312  * @port as needed. It is not OK however, to call this function and
4313  * drm_dp_atomic_release_time_slots() in the same atomic check phase.
4314  *
4315  * See also:
4316  * drm_dp_atomic_release_time_slots()
4317  * drm_dp_mst_atomic_check()
4318  *
4319  * Returns:
4320  * Total slots in the atomic state assigned for this port, or a negative error
4321  * code if the port no longer exists
4322  */
4323 int drm_dp_atomic_find_time_slots(struct drm_atomic_state *state,
4324 				  struct drm_dp_mst_topology_mgr *mgr,
4325 				  struct drm_dp_mst_port *port, int pbn)
4326 {
4327 	struct drm_dp_mst_topology_state *topology_state;
4328 	struct drm_dp_mst_atomic_payload *payload = NULL;
4329 	struct drm_connector_state *conn_state;
4330 	int prev_slots = 0, prev_bw = 0, req_slots;
4331 
4332 	topology_state = drm_atomic_get_mst_topology_state(state, mgr);
4333 	if (IS_ERR(topology_state))
4334 		return PTR_ERR(topology_state);
4335 
4336 	conn_state = drm_atomic_get_new_connector_state(state, port->connector);
4337 	topology_state->pending_crtc_mask |= drm_crtc_mask(conn_state->crtc);
4338 
4339 	/* Find the current allocation for this port, if any */
4340 	payload = drm_atomic_get_mst_payload_state(topology_state, port);
4341 	if (payload) {
4342 		prev_slots = payload->time_slots;
4343 		prev_bw = payload->pbn;
4344 
4345 		/*
4346 		 * This should never happen, unless the driver tries
4347 		 * releasing and allocating the same timeslot allocation,
4348 		 * which is an error
4349 		 */
4350 		if (drm_WARN_ON(mgr->dev, payload->delete)) {
4351 			drm_err(mgr->dev,
4352 				"cannot allocate and release time slots on [MST PORT:%p] in the same state\n",
4353 				port);
4354 			return -EINVAL;
4355 		}
4356 	}
4357 
4358 	req_slots = DIV_ROUND_UP(dfixed_const(pbn), topology_state->pbn_div.full);
4359 
4360 	drm_dbg_atomic(mgr->dev, "[CONNECTOR:%d:%s] [MST PORT:%p] TU %d -> %d\n",
4361 		       port->connector->base.id, port->connector->name,
4362 		       port, prev_slots, req_slots);
4363 	drm_dbg_atomic(mgr->dev, "[CONNECTOR:%d:%s] [MST PORT:%p] PBN %d -> %d\n",
4364 		       port->connector->base.id, port->connector->name,
4365 		       port, prev_bw, pbn);
4366 
4367 	/* Add the new allocation to the state, note the VCPI isn't assigned until the end */
4368 	if (!payload) {
4369 		payload = kzalloc(sizeof(*payload), GFP_KERNEL);
4370 		if (!payload)
4371 			return -ENOMEM;
4372 
4373 		drm_dp_mst_get_port_malloc(port);
4374 		payload->port = port;
4375 		payload->vc_start_slot = -1;
4376 		payload->payload_allocation_status = DRM_DP_MST_PAYLOAD_ALLOCATION_NONE;
4377 		list_add(&payload->next, &topology_state->payloads);
4378 	}
4379 	payload->time_slots = req_slots;
4380 	payload->pbn = pbn;
4381 
4382 	return req_slots;
4383 }
4384 EXPORT_SYMBOL(drm_dp_atomic_find_time_slots);
4385 
4386 /**
4387  * drm_dp_atomic_release_time_slots() - Release allocated time slots
4388  * @state: global atomic state
4389  * @mgr: MST topology manager for the port
4390  * @port: The port to release the time slots from
4391  *
4392  * Releases any time slots that have been allocated to a port in the atomic
4393  * state. Any atomic drivers which support MST must call this function
4394  * unconditionally in their &drm_connector_helper_funcs.atomic_check() callback.
4395  * This helper will check whether time slots would be released by the new state and
4396  * respond accordingly, along with ensuring the MST state is always added to the
4397  * atomic state whenever a new state would modify the state of payloads on the
4398  * topology.
4399  *
4400  * It is OK to call this even if @port has been removed from the system.
4401  * Additionally, it is OK to call this function multiple times on the same
4402  * @port as needed. It is not OK however, to call this function and
4403  * drm_dp_atomic_find_time_slots() on the same @port in a single atomic check
4404  * phase.
4405  *
4406  * See also:
4407  * drm_dp_atomic_find_time_slots()
4408  * drm_dp_mst_atomic_check()
4409  *
4410  * Returns:
4411  * 0 on success, negative error code otherwise
4412  */
4413 int drm_dp_atomic_release_time_slots(struct drm_atomic_state *state,
4414 				     struct drm_dp_mst_topology_mgr *mgr,
4415 				     struct drm_dp_mst_port *port)
4416 {
4417 	struct drm_dp_mst_topology_state *topology_state;
4418 	struct drm_dp_mst_atomic_payload *payload;
4419 	struct drm_connector_state *old_conn_state, *new_conn_state;
4420 	bool update_payload = true;
4421 
4422 	old_conn_state = drm_atomic_get_old_connector_state(state, port->connector);
4423 	if (!old_conn_state->crtc)
4424 		return 0;
4425 
4426 	/* If the CRTC isn't disabled by this state, don't release it's payload */
4427 	new_conn_state = drm_atomic_get_new_connector_state(state, port->connector);
4428 	if (new_conn_state->crtc) {
4429 		struct drm_crtc_state *crtc_state =
4430 			drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
4431 
4432 		/* No modeset means no payload changes, so it's safe to not pull in the MST state */
4433 		if (!crtc_state || !drm_atomic_crtc_needs_modeset(crtc_state))
4434 			return 0;
4435 
4436 		if (!crtc_state->mode_changed && !crtc_state->connectors_changed)
4437 			update_payload = false;
4438 	}
4439 
4440 	topology_state = drm_atomic_get_mst_topology_state(state, mgr);
4441 	if (IS_ERR(topology_state))
4442 		return PTR_ERR(topology_state);
4443 
4444 	topology_state->pending_crtc_mask |= drm_crtc_mask(old_conn_state->crtc);
4445 	if (!update_payload)
4446 		return 0;
4447 
4448 	payload = drm_atomic_get_mst_payload_state(topology_state, port);
4449 	if (WARN_ON(!payload)) {
4450 		drm_err(mgr->dev, "No payload for [MST PORT:%p] found in mst state %p\n",
4451 			port, &topology_state->base);
4452 		return -EINVAL;
4453 	}
4454 
4455 	if (new_conn_state->crtc)
4456 		return 0;
4457 
4458 	drm_dbg_atomic(mgr->dev, "[MST PORT:%p] TU %d -> 0\n", port, payload->time_slots);
4459 	if (!payload->delete) {
4460 		payload->pbn = 0;
4461 		payload->delete = true;
4462 		topology_state->payload_mask &= ~BIT(payload->vcpi - 1);
4463 	}
4464 
4465 	return 0;
4466 }
4467 EXPORT_SYMBOL(drm_dp_atomic_release_time_slots);
4468 
4469 /**
4470  * drm_dp_mst_atomic_setup_commit() - setup_commit hook for MST helpers
4471  * @state: global atomic state
4472  *
4473  * This function saves all of the &drm_crtc_commit structs in an atomic state that touch any CRTCs
4474  * currently assigned to an MST topology. Drivers must call this hook from their
4475  * &drm_mode_config_helper_funcs.atomic_commit_setup hook.
4476  *
4477  * Returns:
4478  * 0 if all CRTC commits were retrieved successfully, negative error code otherwise
4479  */
4480 int drm_dp_mst_atomic_setup_commit(struct drm_atomic_state *state)
4481 {
4482 	struct drm_dp_mst_topology_mgr *mgr;
4483 	struct drm_dp_mst_topology_state *mst_state;
4484 	struct drm_crtc *crtc;
4485 	struct drm_crtc_state *crtc_state;
4486 	int i, j, commit_idx, num_commit_deps;
4487 
4488 	for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
4489 		if (!mst_state->pending_crtc_mask)
4490 			continue;
4491 
4492 		num_commit_deps = hweight32(mst_state->pending_crtc_mask);
4493 		mst_state->commit_deps = kmalloc_array(num_commit_deps,
4494 						       sizeof(*mst_state->commit_deps), GFP_KERNEL);
4495 		if (!mst_state->commit_deps)
4496 			return -ENOMEM;
4497 		mst_state->num_commit_deps = num_commit_deps;
4498 
4499 		commit_idx = 0;
4500 		for_each_new_crtc_in_state(state, crtc, crtc_state, j) {
4501 			if (mst_state->pending_crtc_mask & drm_crtc_mask(crtc)) {
4502 				mst_state->commit_deps[commit_idx++] =
4503 					drm_crtc_commit_get(crtc_state->commit);
4504 			}
4505 		}
4506 	}
4507 
4508 	return 0;
4509 }
4510 EXPORT_SYMBOL(drm_dp_mst_atomic_setup_commit);
4511 
4512 /**
4513  * drm_dp_mst_atomic_wait_for_dependencies() - Wait for all pending commits on MST topologies,
4514  * prepare new MST state for commit
4515  * @state: global atomic state
4516  *
4517  * Goes through any MST topologies in this atomic state, and waits for any pending commits which
4518  * touched CRTCs that were/are on an MST topology to be programmed to hardware and flipped to before
4519  * returning. This is to prevent multiple non-blocking commits affecting an MST topology from racing
4520  * with eachother by forcing them to be executed sequentially in situations where the only resources
4521  * the modeset objects in these commits share are an MST topology.
4522  *
4523  * This function also prepares the new MST state for commit by performing some state preparation
4524  * which can't be done until this point, such as reading back the final VC start slots (which are
4525  * determined at commit-time) from the previous state.
4526  *
4527  * All MST drivers must call this function after calling drm_atomic_helper_wait_for_dependencies(),
4528  * or whatever their equivalent of that is.
4529  */
4530 void drm_dp_mst_atomic_wait_for_dependencies(struct drm_atomic_state *state)
4531 {
4532 	struct drm_dp_mst_topology_state *old_mst_state, *new_mst_state;
4533 	struct drm_dp_mst_topology_mgr *mgr;
4534 	struct drm_dp_mst_atomic_payload *old_payload, *new_payload;
4535 	int i, j, ret;
4536 
4537 	for_each_oldnew_mst_mgr_in_state(state, mgr, old_mst_state, new_mst_state, i) {
4538 		for (j = 0; j < old_mst_state->num_commit_deps; j++) {
4539 			ret = drm_crtc_commit_wait(old_mst_state->commit_deps[j]);
4540 			if (ret < 0)
4541 				drm_err(state->dev, "Failed to wait for %s: %d\n",
4542 					old_mst_state->commit_deps[j]->crtc->name, ret);
4543 		}
4544 
4545 		/* Now that previous state is committed, it's safe to copy over the start slot
4546 		 * and allocation status assignments
4547 		 */
4548 		list_for_each_entry(old_payload, &old_mst_state->payloads, next) {
4549 			if (old_payload->delete)
4550 				continue;
4551 
4552 			new_payload = drm_atomic_get_mst_payload_state(new_mst_state,
4553 								       old_payload->port);
4554 			new_payload->vc_start_slot = old_payload->vc_start_slot;
4555 			new_payload->payload_allocation_status =
4556 							old_payload->payload_allocation_status;
4557 		}
4558 	}
4559 }
4560 EXPORT_SYMBOL(drm_dp_mst_atomic_wait_for_dependencies);
4561 
4562 /**
4563  * drm_dp_mst_root_conn_atomic_check() - Serialize CRTC commits on MST-capable connectors operating
4564  * in SST mode
4565  * @new_conn_state: The new connector state of the &drm_connector
4566  * @mgr: The MST topology manager for the &drm_connector
4567  *
4568  * Since MST uses fake &drm_encoder structs, the generic atomic modesetting code isn't able to
4569  * serialize non-blocking commits happening on the real DP connector of an MST topology switching
4570  * into/away from MST mode - as the CRTC on the real DP connector and the CRTCs on the connector's
4571  * MST topology will never share the same &drm_encoder.
4572  *
4573  * This function takes care of this serialization issue, by checking a root MST connector's atomic
4574  * state to determine if it is about to have a modeset - and then pulling in the MST topology state
4575  * if so, along with adding any relevant CRTCs to &drm_dp_mst_topology_state.pending_crtc_mask.
4576  *
4577  * Drivers implementing MST must call this function from the
4578  * &drm_connector_helper_funcs.atomic_check hook of any physical DP &drm_connector capable of
4579  * driving MST sinks.
4580  *
4581  * Returns:
4582  * 0 on success, negative error code otherwise
4583  */
4584 int drm_dp_mst_root_conn_atomic_check(struct drm_connector_state *new_conn_state,
4585 				      struct drm_dp_mst_topology_mgr *mgr)
4586 {
4587 	struct drm_atomic_state *state = new_conn_state->state;
4588 	struct drm_connector_state *old_conn_state =
4589 		drm_atomic_get_old_connector_state(state, new_conn_state->connector);
4590 	struct drm_crtc_state *crtc_state;
4591 	struct drm_dp_mst_topology_state *mst_state = NULL;
4592 
4593 	if (new_conn_state->crtc) {
4594 		crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
4595 		if (crtc_state && drm_atomic_crtc_needs_modeset(crtc_state)) {
4596 			mst_state = drm_atomic_get_mst_topology_state(state, mgr);
4597 			if (IS_ERR(mst_state))
4598 				return PTR_ERR(mst_state);
4599 
4600 			mst_state->pending_crtc_mask |= drm_crtc_mask(new_conn_state->crtc);
4601 		}
4602 	}
4603 
4604 	if (old_conn_state->crtc) {
4605 		crtc_state = drm_atomic_get_new_crtc_state(state, old_conn_state->crtc);
4606 		if (crtc_state && drm_atomic_crtc_needs_modeset(crtc_state)) {
4607 			if (!mst_state) {
4608 				mst_state = drm_atomic_get_mst_topology_state(state, mgr);
4609 				if (IS_ERR(mst_state))
4610 					return PTR_ERR(mst_state);
4611 			}
4612 
4613 			mst_state->pending_crtc_mask |= drm_crtc_mask(old_conn_state->crtc);
4614 		}
4615 	}
4616 
4617 	return 0;
4618 }
4619 EXPORT_SYMBOL(drm_dp_mst_root_conn_atomic_check);
4620 
4621 /**
4622  * drm_dp_mst_update_slots() - updates the slot info depending on the DP ecoding format
4623  * @mst_state: mst_state to update
4624  * @link_encoding_cap: the ecoding format on the link
4625  */
4626 void drm_dp_mst_update_slots(struct drm_dp_mst_topology_state *mst_state, uint8_t link_encoding_cap)
4627 {
4628 	if (link_encoding_cap == DP_CAP_ANSI_128B132B) {
4629 		mst_state->total_avail_slots = 64;
4630 		mst_state->start_slot = 0;
4631 	} else {
4632 		mst_state->total_avail_slots = 63;
4633 		mst_state->start_slot = 1;
4634 	}
4635 
4636 	DRM_DEBUG_KMS("%s encoding format on mst_state 0x%p\n",
4637 		      (link_encoding_cap == DP_CAP_ANSI_128B132B) ? "128b/132b":"8b/10b",
4638 		      mst_state);
4639 }
4640 EXPORT_SYMBOL(drm_dp_mst_update_slots);
4641 
4642 static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
4643 				     int id, u8 start_slot, u8 num_slots)
4644 {
4645 	u8 payload_alloc[3], status;
4646 	int ret;
4647 	int retries = 0;
4648 
4649 	drm_dp_dpcd_writeb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS,
4650 			   DP_PAYLOAD_TABLE_UPDATED);
4651 
4652 	payload_alloc[0] = id;
4653 	payload_alloc[1] = start_slot;
4654 	payload_alloc[2] = num_slots;
4655 
4656 	ret = drm_dp_dpcd_write(mgr->aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3);
4657 	if (ret != 3) {
4658 		drm_dbg_kms(mgr->dev, "failed to write payload allocation %d\n", ret);
4659 		goto fail;
4660 	}
4661 
4662 retry:
4663 	ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
4664 	if (ret < 0) {
4665 		drm_dbg_kms(mgr->dev, "failed to read payload table status %d\n", ret);
4666 		goto fail;
4667 	}
4668 
4669 	if (!(status & DP_PAYLOAD_TABLE_UPDATED)) {
4670 		retries++;
4671 		if (retries < 20) {
4672 			usleep_range(10000, 20000);
4673 			goto retry;
4674 		}
4675 		drm_dbg_kms(mgr->dev, "status not set after read payload table status %d\n",
4676 			    status);
4677 		ret = -EINVAL;
4678 		goto fail;
4679 	}
4680 	ret = 0;
4681 fail:
4682 	return ret;
4683 }
4684 
4685 static int do_get_act_status(struct drm_dp_aux *aux)
4686 {
4687 	int ret;
4688 	u8 status;
4689 
4690 	ret = drm_dp_dpcd_readb(aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
4691 	if (ret < 0)
4692 		return ret;
4693 
4694 	return status;
4695 }
4696 
4697 /**
4698  * drm_dp_check_act_status() - Polls for ACT handled status.
4699  * @mgr: manager to use
4700  *
4701  * Tries waiting for the MST hub to finish updating it's payload table by
4702  * polling for the ACT handled bit for up to 3 seconds (yes-some hubs really
4703  * take that long).
4704  *
4705  * Returns:
4706  * 0 if the ACT was handled in time, negative error code on failure.
4707  */
4708 int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr)
4709 {
4710 	/*
4711 	 * There doesn't seem to be any recommended retry count or timeout in
4712 	 * the MST specification. Since some hubs have been observed to take
4713 	 * over 1 second to update their payload allocations under certain
4714 	 * conditions, we use a rather large timeout value.
4715 	 */
4716 	const int timeout_ms = 3000;
4717 	int ret, status;
4718 
4719 	ret = readx_poll_timeout(do_get_act_status, mgr->aux, status,
4720 				 status & DP_PAYLOAD_ACT_HANDLED || status < 0,
4721 				 200, timeout_ms * USEC_PER_MSEC);
4722 	if (ret < 0 && status >= 0) {
4723 		drm_err(mgr->dev, "Failed to get ACT after %dms, last status: %02x\n",
4724 			timeout_ms, status);
4725 		return -EINVAL;
4726 	} else if (status < 0) {
4727 		/*
4728 		 * Failure here isn't unexpected - the hub may have
4729 		 * just been unplugged
4730 		 */
4731 		drm_dbg_kms(mgr->dev, "Failed to read payload table status: %d\n", status);
4732 		return status;
4733 	}
4734 
4735 	return 0;
4736 }
4737 EXPORT_SYMBOL(drm_dp_check_act_status);
4738 
4739 /**
4740  * drm_dp_calc_pbn_mode() - Calculate the PBN for a mode.
4741  * @clock: dot clock
4742  * @bpp: bpp as .4 binary fixed point
4743  *
4744  * This uses the formula in the spec to calculate the PBN value for a mode.
4745  */
4746 int drm_dp_calc_pbn_mode(int clock, int bpp)
4747 {
4748 	/*
4749 	 * The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on
4750 	 * common multiplier to render an integer PBN for all link rate/lane
4751 	 * counts combinations
4752 	 * calculate
4753 	 * peak_kbps = clock * bpp / 16
4754 	 * peak_kbps *= SSC overhead / 1000000
4755 	 * peak_kbps /= 8    convert to Kbytes
4756 	 * peak_kBps *= (64/54) / 1000    convert to PBN
4757 	 */
4758 	/*
4759 	 * TODO: Use the actual link and mode parameters to calculate
4760 	 * the overhead. For now it's assumed that these are
4761 	 * 4 link lanes, 4096 hactive pixels, which don't add any
4762 	 * significant data padding overhead and that there is no DSC
4763 	 * or FEC overhead.
4764 	 */
4765 	int overhead = drm_dp_bw_overhead(4, 4096, 0, bpp,
4766 					  DRM_DP_BW_OVERHEAD_MST |
4767 					  DRM_DP_BW_OVERHEAD_SSC_REF_CLK);
4768 
4769 	return DIV64_U64_ROUND_UP(mul_u32_u32(clock * bpp, 64 * overhead >> 4),
4770 				  1000000ULL * 8 * 54 * 1000);
4771 }
4772 EXPORT_SYMBOL(drm_dp_calc_pbn_mode);
4773 
4774 /* we want to kick the TX after we've ack the up/down IRQs. */
4775 static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr)
4776 {
4777 	queue_work(system_long_wq, &mgr->tx_work);
4778 }
4779 
4780 /*
4781  * Helper function for parsing DP device types into convenient strings
4782  * for use with dp_mst_topology
4783  */
4784 static const char *pdt_to_string(u8 pdt)
4785 {
4786 	switch (pdt) {
4787 	case DP_PEER_DEVICE_NONE:
4788 		return "NONE";
4789 	case DP_PEER_DEVICE_SOURCE_OR_SST:
4790 		return "SOURCE OR SST";
4791 	case DP_PEER_DEVICE_MST_BRANCHING:
4792 		return "MST BRANCHING";
4793 	case DP_PEER_DEVICE_SST_SINK:
4794 		return "SST SINK";
4795 	case DP_PEER_DEVICE_DP_LEGACY_CONV:
4796 		return "DP LEGACY CONV";
4797 	default:
4798 		return "ERR";
4799 	}
4800 }
4801 
4802 static void drm_dp_mst_dump_mstb(struct seq_file *m,
4803 				 struct drm_dp_mst_branch *mstb)
4804 {
4805 	struct drm_dp_mst_port *port;
4806 	int tabs = mstb->lct;
4807 	char prefix[10];
4808 	int i;
4809 
4810 	for (i = 0; i < tabs; i++)
4811 		prefix[i] = '\t';
4812 	prefix[i] = '\0';
4813 
4814 	seq_printf(m, "%smstb - [%p]: num_ports: %d\n", prefix, mstb, mstb->num_ports);
4815 	list_for_each_entry(port, &mstb->ports, next) {
4816 		seq_printf(m, "%sport %d - [%p] (%s - %s): ddps: %d, ldps: %d, sdp: %d/%d, fec: %s, conn: %p\n",
4817 			   prefix,
4818 			   port->port_num,
4819 			   port,
4820 			   port->input ? "input" : "output",
4821 			   pdt_to_string(port->pdt),
4822 			   port->ddps,
4823 			   port->ldps,
4824 			   port->num_sdp_streams,
4825 			   port->num_sdp_stream_sinks,
4826 			   port->fec_capable ? "true" : "false",
4827 			   port->connector);
4828 		if (port->mstb)
4829 			drm_dp_mst_dump_mstb(m, port->mstb);
4830 	}
4831 }
4832 
4833 #define DP_PAYLOAD_TABLE_SIZE		64
4834 
4835 static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
4836 				  char *buf)
4837 {
4838 	int i;
4839 
4840 	for (i = 0; i < DP_PAYLOAD_TABLE_SIZE; i += 16) {
4841 		if (drm_dp_dpcd_read(mgr->aux,
4842 				     DP_PAYLOAD_TABLE_UPDATE_STATUS + i,
4843 				     &buf[i], 16) != 16)
4844 			return false;
4845 	}
4846 	return true;
4847 }
4848 
4849 static void fetch_monitor_name(struct drm_dp_mst_topology_mgr *mgr,
4850 			       struct drm_dp_mst_port *port, char *name,
4851 			       int namelen)
4852 {
4853 	struct edid *mst_edid;
4854 
4855 	mst_edid = drm_dp_mst_get_edid(port->connector, mgr, port);
4856 	drm_edid_get_monitor_name(mst_edid, name, namelen);
4857 	kfree(mst_edid);
4858 }
4859 
4860 /**
4861  * drm_dp_mst_dump_topology(): dump topology to seq file.
4862  * @m: seq_file to dump output to
4863  * @mgr: manager to dump current topology for.
4864  *
4865  * helper to dump MST topology to a seq file for debugfs.
4866  */
4867 void drm_dp_mst_dump_topology(struct seq_file *m,
4868 			      struct drm_dp_mst_topology_mgr *mgr)
4869 {
4870 	struct drm_dp_mst_topology_state *state;
4871 	struct drm_dp_mst_atomic_payload *payload;
4872 	int i, ret;
4873 
4874 	static const char *const status[] = {
4875 		"None",
4876 		"Local",
4877 		"DFP",
4878 		"Remote",
4879 	};
4880 
4881 	mutex_lock(&mgr->lock);
4882 	if (mgr->mst_primary)
4883 		drm_dp_mst_dump_mstb(m, mgr->mst_primary);
4884 
4885 	/* dump VCPIs */
4886 	mutex_unlock(&mgr->lock);
4887 
4888 	ret = drm_modeset_lock_single_interruptible(&mgr->base.lock);
4889 	if (ret < 0)
4890 		return;
4891 
4892 	state = to_drm_dp_mst_topology_state(mgr->base.state);
4893 	seq_printf(m, "\n*** Atomic state info ***\n");
4894 	seq_printf(m, "payload_mask: %x, max_payloads: %d, start_slot: %u, pbn_div: %d\n",
4895 		   state->payload_mask, mgr->max_payloads, state->start_slot,
4896 		   dfixed_trunc(state->pbn_div));
4897 
4898 	seq_printf(m, "\n| idx | port | vcpi | slots | pbn | dsc | status |     sink name     |\n");
4899 	for (i = 0; i < mgr->max_payloads; i++) {
4900 		list_for_each_entry(payload, &state->payloads, next) {
4901 			char name[14];
4902 
4903 			if (payload->vcpi != i || payload->delete)
4904 				continue;
4905 
4906 			fetch_monitor_name(mgr, payload->port, name, sizeof(name));
4907 			seq_printf(m, " %5d %6d %6d %02d - %02d %5d %5s %8s %19s\n",
4908 				   i,
4909 				   payload->port->port_num,
4910 				   payload->vcpi,
4911 				   payload->vc_start_slot,
4912 				   payload->vc_start_slot + payload->time_slots - 1,
4913 				   payload->pbn,
4914 				   payload->dsc_enabled ? "Y" : "N",
4915 				   status[payload->payload_allocation_status],
4916 				   (*name != 0) ? name : "Unknown");
4917 		}
4918 	}
4919 
4920 	seq_printf(m, "\n*** DPCD Info ***\n");
4921 	mutex_lock(&mgr->lock);
4922 	if (mgr->mst_primary) {
4923 		u8 buf[DP_PAYLOAD_TABLE_SIZE];
4924 		int ret;
4925 
4926 		if (drm_dp_read_dpcd_caps(mgr->aux, buf) < 0) {
4927 			seq_printf(m, "dpcd read failed\n");
4928 			goto out;
4929 		}
4930 		seq_printf(m, "dpcd: %*ph\n", DP_RECEIVER_CAP_SIZE, buf);
4931 
4932 		ret = drm_dp_dpcd_read(mgr->aux, DP_FAUX_CAP, buf, 2);
4933 		if (ret != 2) {
4934 			seq_printf(m, "faux/mst read failed\n");
4935 			goto out;
4936 		}
4937 		seq_printf(m, "faux/mst: %*ph\n", 2, buf);
4938 
4939 		ret = drm_dp_dpcd_read(mgr->aux, DP_MSTM_CTRL, buf, 1);
4940 		if (ret != 1) {
4941 			seq_printf(m, "mst ctrl read failed\n");
4942 			goto out;
4943 		}
4944 		seq_printf(m, "mst ctrl: %*ph\n", 1, buf);
4945 
4946 		/* dump the standard OUI branch header */
4947 		ret = drm_dp_dpcd_read(mgr->aux, DP_BRANCH_OUI, buf, DP_BRANCH_OUI_HEADER_SIZE);
4948 		if (ret != DP_BRANCH_OUI_HEADER_SIZE) {
4949 			seq_printf(m, "branch oui read failed\n");
4950 			goto out;
4951 		}
4952 		seq_printf(m, "branch oui: %*phN devid: ", 3, buf);
4953 
4954 		for (i = 0x3; i < 0x8 && buf[i]; i++)
4955 			seq_printf(m, "%c", buf[i]);
4956 		seq_printf(m, " revision: hw: %x.%x sw: %x.%x\n",
4957 			   buf[0x9] >> 4, buf[0x9] & 0xf, buf[0xa], buf[0xb]);
4958 		if (dump_dp_payload_table(mgr, buf))
4959 			seq_printf(m, "payload table: %*ph\n", DP_PAYLOAD_TABLE_SIZE, buf);
4960 	}
4961 
4962 out:
4963 	mutex_unlock(&mgr->lock);
4964 	drm_modeset_unlock(&mgr->base.lock);
4965 }
4966 EXPORT_SYMBOL(drm_dp_mst_dump_topology);
4967 
4968 static void drm_dp_tx_work(struct work_struct *work)
4969 {
4970 	struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work);
4971 
4972 	mutex_lock(&mgr->qlock);
4973 	if (!list_empty(&mgr->tx_msg_downq))
4974 		process_single_down_tx_qlock(mgr);
4975 	mutex_unlock(&mgr->qlock);
4976 }
4977 
4978 static inline void
4979 drm_dp_delayed_destroy_port(struct drm_dp_mst_port *port)
4980 {
4981 	drm_dp_port_set_pdt(port, DP_PEER_DEVICE_NONE, port->mcs);
4982 
4983 	if (port->connector) {
4984 		drm_connector_unregister(port->connector);
4985 		drm_connector_put(port->connector);
4986 	}
4987 
4988 	drm_dp_mst_put_port_malloc(port);
4989 }
4990 
4991 static inline void
4992 drm_dp_delayed_destroy_mstb(struct drm_dp_mst_branch *mstb)
4993 {
4994 	struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
4995 	struct drm_dp_mst_port *port, *port_tmp;
4996 	struct drm_dp_sideband_msg_tx *txmsg, *txmsg_tmp;
4997 	bool wake_tx = false;
4998 
4999 	mutex_lock(&mgr->lock);
5000 	list_for_each_entry_safe(port, port_tmp, &mstb->ports, next) {
5001 		list_del(&port->next);
5002 		drm_dp_mst_topology_put_port(port);
5003 	}
5004 	mutex_unlock(&mgr->lock);
5005 
5006 	/* drop any tx slot msg */
5007 	mutex_lock(&mstb->mgr->qlock);
5008 	list_for_each_entry_safe(txmsg, txmsg_tmp, &mgr->tx_msg_downq, next) {
5009 		if (txmsg->dst != mstb)
5010 			continue;
5011 
5012 		txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
5013 		list_del(&txmsg->next);
5014 		wake_tx = true;
5015 	}
5016 	mutex_unlock(&mstb->mgr->qlock);
5017 
5018 	if (wake_tx)
5019 		wake_up_all(&mstb->mgr->tx_waitq);
5020 
5021 	drm_dp_mst_put_mstb_malloc(mstb);
5022 }
5023 
5024 static void drm_dp_delayed_destroy_work(struct work_struct *work)
5025 {
5026 	struct drm_dp_mst_topology_mgr *mgr =
5027 		container_of(work, struct drm_dp_mst_topology_mgr,
5028 			     delayed_destroy_work);
5029 	bool send_hotplug = false, go_again;
5030 
5031 	/*
5032 	 * Not a regular list traverse as we have to drop the destroy
5033 	 * connector lock before destroying the mstb/port, to avoid AB->BA
5034 	 * ordering between this lock and the config mutex.
5035 	 */
5036 	do {
5037 		go_again = false;
5038 
5039 		for (;;) {
5040 			struct drm_dp_mst_branch *mstb;
5041 
5042 			mutex_lock(&mgr->delayed_destroy_lock);
5043 			mstb = list_first_entry_or_null(&mgr->destroy_branch_device_list,
5044 							struct drm_dp_mst_branch,
5045 							destroy_next);
5046 			if (mstb)
5047 				list_del(&mstb->destroy_next);
5048 			mutex_unlock(&mgr->delayed_destroy_lock);
5049 
5050 			if (!mstb)
5051 				break;
5052 
5053 			drm_dp_delayed_destroy_mstb(mstb);
5054 			go_again = true;
5055 		}
5056 
5057 		for (;;) {
5058 			struct drm_dp_mst_port *port;
5059 
5060 			mutex_lock(&mgr->delayed_destroy_lock);
5061 			port = list_first_entry_or_null(&mgr->destroy_port_list,
5062 							struct drm_dp_mst_port,
5063 							next);
5064 			if (port)
5065 				list_del(&port->next);
5066 			mutex_unlock(&mgr->delayed_destroy_lock);
5067 
5068 			if (!port)
5069 				break;
5070 
5071 			drm_dp_delayed_destroy_port(port);
5072 			send_hotplug = true;
5073 			go_again = true;
5074 		}
5075 	} while (go_again);
5076 
5077 	if (send_hotplug)
5078 		drm_kms_helper_hotplug_event(mgr->dev);
5079 }
5080 
5081 static struct drm_private_state *
5082 drm_dp_mst_duplicate_state(struct drm_private_obj *obj)
5083 {
5084 	struct drm_dp_mst_topology_state *state, *old_state =
5085 		to_dp_mst_topology_state(obj->state);
5086 	struct drm_dp_mst_atomic_payload *pos, *payload;
5087 
5088 	state = kmemdup(old_state, sizeof(*state), GFP_KERNEL);
5089 	if (!state)
5090 		return NULL;
5091 
5092 	__drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
5093 
5094 	INIT_LIST_HEAD(&state->payloads);
5095 	state->commit_deps = NULL;
5096 	state->num_commit_deps = 0;
5097 	state->pending_crtc_mask = 0;
5098 
5099 	list_for_each_entry(pos, &old_state->payloads, next) {
5100 		/* Prune leftover freed timeslot allocations */
5101 		if (pos->delete)
5102 			continue;
5103 
5104 		payload = kmemdup(pos, sizeof(*payload), GFP_KERNEL);
5105 		if (!payload)
5106 			goto fail;
5107 
5108 		drm_dp_mst_get_port_malloc(payload->port);
5109 		list_add(&payload->next, &state->payloads);
5110 	}
5111 
5112 	return &state->base;
5113 
5114 fail:
5115 	list_for_each_entry_safe(pos, payload, &state->payloads, next) {
5116 		drm_dp_mst_put_port_malloc(pos->port);
5117 		kfree(pos);
5118 	}
5119 	kfree(state);
5120 
5121 	return NULL;
5122 }
5123 
5124 static void drm_dp_mst_destroy_state(struct drm_private_obj *obj,
5125 				     struct drm_private_state *state)
5126 {
5127 	struct drm_dp_mst_topology_state *mst_state =
5128 		to_dp_mst_topology_state(state);
5129 	struct drm_dp_mst_atomic_payload *pos, *tmp;
5130 	int i;
5131 
5132 	list_for_each_entry_safe(pos, tmp, &mst_state->payloads, next) {
5133 		/* We only keep references to ports with active payloads */
5134 		if (!pos->delete)
5135 			drm_dp_mst_put_port_malloc(pos->port);
5136 		kfree(pos);
5137 	}
5138 
5139 	for (i = 0; i < mst_state->num_commit_deps; i++)
5140 		drm_crtc_commit_put(mst_state->commit_deps[i]);
5141 
5142 	kfree(mst_state->commit_deps);
5143 	kfree(mst_state);
5144 }
5145 
5146 static bool drm_dp_mst_port_downstream_of_branch(struct drm_dp_mst_port *port,
5147 						 struct drm_dp_mst_branch *branch)
5148 {
5149 	while (port->parent) {
5150 		if (port->parent == branch)
5151 			return true;
5152 
5153 		if (port->parent->port_parent)
5154 			port = port->parent->port_parent;
5155 		else
5156 			break;
5157 	}
5158 	return false;
5159 }
5160 
5161 static bool
5162 drm_dp_mst_port_downstream_of_parent_locked(struct drm_dp_mst_topology_mgr *mgr,
5163 					    struct drm_dp_mst_port *port,
5164 					    struct drm_dp_mst_port *parent)
5165 {
5166 	if (!mgr->mst_primary)
5167 		return false;
5168 
5169 	port = drm_dp_mst_topology_get_port_validated_locked(mgr->mst_primary,
5170 							     port);
5171 	if (!port)
5172 		return false;
5173 
5174 	if (!parent)
5175 		return true;
5176 
5177 	parent = drm_dp_mst_topology_get_port_validated_locked(mgr->mst_primary,
5178 							       parent);
5179 	if (!parent)
5180 		return false;
5181 
5182 	if (!parent->mstb)
5183 		return false;
5184 
5185 	return drm_dp_mst_port_downstream_of_branch(port, parent->mstb);
5186 }
5187 
5188 /**
5189  * drm_dp_mst_port_downstream_of_parent - check if a port is downstream of a parent port
5190  * @mgr: MST topology manager
5191  * @port: the port being looked up
5192  * @parent: the parent port
5193  *
5194  * The function returns %true if @port is downstream of @parent. If @parent is
5195  * %NULL - denoting the root port - the function returns %true if @port is in
5196  * @mgr's topology.
5197  */
5198 bool
5199 drm_dp_mst_port_downstream_of_parent(struct drm_dp_mst_topology_mgr *mgr,
5200 				     struct drm_dp_mst_port *port,
5201 				     struct drm_dp_mst_port *parent)
5202 {
5203 	bool ret;
5204 
5205 	mutex_lock(&mgr->lock);
5206 	ret = drm_dp_mst_port_downstream_of_parent_locked(mgr, port, parent);
5207 	mutex_unlock(&mgr->lock);
5208 
5209 	return ret;
5210 }
5211 EXPORT_SYMBOL(drm_dp_mst_port_downstream_of_parent);
5212 
5213 static int
5214 drm_dp_mst_atomic_check_port_bw_limit(struct drm_dp_mst_port *port,
5215 				      struct drm_dp_mst_topology_state *state,
5216 				      struct drm_dp_mst_port **failing_port);
5217 
5218 static int
5219 drm_dp_mst_atomic_check_mstb_bw_limit(struct drm_dp_mst_branch *mstb,
5220 				      struct drm_dp_mst_topology_state *state,
5221 				      struct drm_dp_mst_port **failing_port)
5222 {
5223 	struct drm_dp_mst_atomic_payload *payload;
5224 	struct drm_dp_mst_port *port;
5225 	int pbn_used = 0, ret;
5226 	bool found = false;
5227 
5228 	/* Check that we have at least one port in our state that's downstream
5229 	 * of this branch, otherwise we can skip this branch
5230 	 */
5231 	list_for_each_entry(payload, &state->payloads, next) {
5232 		if (!payload->pbn ||
5233 		    !drm_dp_mst_port_downstream_of_branch(payload->port, mstb))
5234 			continue;
5235 
5236 		found = true;
5237 		break;
5238 	}
5239 	if (!found)
5240 		return 0;
5241 
5242 	if (mstb->port_parent)
5243 		drm_dbg_atomic(mstb->mgr->dev,
5244 			       "[MSTB:%p] [MST PORT:%p] Checking bandwidth limits on [MSTB:%p]\n",
5245 			       mstb->port_parent->parent, mstb->port_parent, mstb);
5246 	else
5247 		drm_dbg_atomic(mstb->mgr->dev, "[MSTB:%p] Checking bandwidth limits\n", mstb);
5248 
5249 	list_for_each_entry(port, &mstb->ports, next) {
5250 		ret = drm_dp_mst_atomic_check_port_bw_limit(port, state, failing_port);
5251 		if (ret < 0)
5252 			return ret;
5253 
5254 		pbn_used += ret;
5255 	}
5256 
5257 	return pbn_used;
5258 }
5259 
5260 static int
5261 drm_dp_mst_atomic_check_port_bw_limit(struct drm_dp_mst_port *port,
5262 				      struct drm_dp_mst_topology_state *state,
5263 				      struct drm_dp_mst_port **failing_port)
5264 {
5265 	struct drm_dp_mst_atomic_payload *payload;
5266 	int pbn_used = 0;
5267 
5268 	if (port->pdt == DP_PEER_DEVICE_NONE)
5269 		return 0;
5270 
5271 	if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) {
5272 		payload = drm_atomic_get_mst_payload_state(state, port);
5273 		if (!payload)
5274 			return 0;
5275 
5276 		/*
5277 		 * This could happen if the sink deasserted its HPD line, but
5278 		 * the branch device still reports it as attached (PDT != NONE).
5279 		 */
5280 		if (!port->full_pbn) {
5281 			drm_dbg_atomic(port->mgr->dev,
5282 				       "[MSTB:%p] [MST PORT:%p] no BW available for the port\n",
5283 				       port->parent, port);
5284 			*failing_port = port;
5285 			return -EINVAL;
5286 		}
5287 
5288 		pbn_used = payload->pbn;
5289 	} else {
5290 		pbn_used = drm_dp_mst_atomic_check_mstb_bw_limit(port->mstb,
5291 								 state,
5292 								 failing_port);
5293 		if (pbn_used <= 0)
5294 			return pbn_used;
5295 	}
5296 
5297 	if (pbn_used > port->full_pbn) {
5298 		drm_dbg_atomic(port->mgr->dev,
5299 			       "[MSTB:%p] [MST PORT:%p] required PBN of %d exceeds port limit of %d\n",
5300 			       port->parent, port, pbn_used, port->full_pbn);
5301 		*failing_port = port;
5302 		return -ENOSPC;
5303 	}
5304 
5305 	drm_dbg_atomic(port->mgr->dev, "[MSTB:%p] [MST PORT:%p] uses %d out of %d PBN\n",
5306 		       port->parent, port, pbn_used, port->full_pbn);
5307 
5308 	return pbn_used;
5309 }
5310 
5311 static inline int
5312 drm_dp_mst_atomic_check_payload_alloc_limits(struct drm_dp_mst_topology_mgr *mgr,
5313 					     struct drm_dp_mst_topology_state *mst_state)
5314 {
5315 	struct drm_dp_mst_atomic_payload *payload;
5316 	int avail_slots = mst_state->total_avail_slots, payload_count = 0;
5317 
5318 	list_for_each_entry(payload, &mst_state->payloads, next) {
5319 		/* Releasing payloads is always OK-even if the port is gone */
5320 		if (payload->delete) {
5321 			drm_dbg_atomic(mgr->dev, "[MST PORT:%p] releases all time slots\n",
5322 				       payload->port);
5323 			continue;
5324 		}
5325 
5326 		drm_dbg_atomic(mgr->dev, "[MST PORT:%p] requires %d time slots\n",
5327 			       payload->port, payload->time_slots);
5328 
5329 		avail_slots -= payload->time_slots;
5330 		if (avail_slots < 0) {
5331 			drm_dbg_atomic(mgr->dev,
5332 				       "[MST PORT:%p] not enough time slots in mst state %p (avail=%d)\n",
5333 				       payload->port, mst_state, avail_slots + payload->time_slots);
5334 			return -ENOSPC;
5335 		}
5336 
5337 		if (++payload_count > mgr->max_payloads) {
5338 			drm_dbg_atomic(mgr->dev,
5339 				       "[MST MGR:%p] state %p has too many payloads (max=%d)\n",
5340 				       mgr, mst_state, mgr->max_payloads);
5341 			return -EINVAL;
5342 		}
5343 
5344 		/* Assign a VCPI */
5345 		if (!payload->vcpi) {
5346 			payload->vcpi = ffz(mst_state->payload_mask) + 1;
5347 			drm_dbg_atomic(mgr->dev, "[MST PORT:%p] assigned VCPI #%d\n",
5348 				       payload->port, payload->vcpi);
5349 			mst_state->payload_mask |= BIT(payload->vcpi - 1);
5350 		}
5351 	}
5352 
5353 	if (!payload_count)
5354 		mst_state->pbn_div.full = dfixed_const(0);
5355 
5356 	drm_dbg_atomic(mgr->dev, "[MST MGR:%p] mst state %p TU pbn_div=%d avail=%d used=%d\n",
5357 		       mgr, mst_state, dfixed_trunc(mst_state->pbn_div), avail_slots,
5358 		       mst_state->total_avail_slots - avail_slots);
5359 
5360 	return 0;
5361 }
5362 
5363 /**
5364  * drm_dp_mst_add_affected_dsc_crtcs
5365  * @state: Pointer to the new struct drm_dp_mst_topology_state
5366  * @mgr: MST topology manager
5367  *
5368  * Whenever there is a change in mst topology
5369  * DSC configuration would have to be recalculated
5370  * therefore we need to trigger modeset on all affected
5371  * CRTCs in that topology
5372  *
5373  * See also:
5374  * drm_dp_mst_atomic_enable_dsc()
5375  */
5376 int drm_dp_mst_add_affected_dsc_crtcs(struct drm_atomic_state *state, struct drm_dp_mst_topology_mgr *mgr)
5377 {
5378 	struct drm_dp_mst_topology_state *mst_state;
5379 	struct drm_dp_mst_atomic_payload *pos;
5380 	struct drm_connector *connector;
5381 	struct drm_connector_state *conn_state;
5382 	struct drm_crtc *crtc;
5383 	struct drm_crtc_state *crtc_state;
5384 
5385 	mst_state = drm_atomic_get_mst_topology_state(state, mgr);
5386 
5387 	if (IS_ERR(mst_state))
5388 		return PTR_ERR(mst_state);
5389 
5390 	list_for_each_entry(pos, &mst_state->payloads, next) {
5391 
5392 		connector = pos->port->connector;
5393 
5394 		if (!connector)
5395 			return -EINVAL;
5396 
5397 		conn_state = drm_atomic_get_connector_state(state, connector);
5398 
5399 		if (IS_ERR(conn_state))
5400 			return PTR_ERR(conn_state);
5401 
5402 		crtc = conn_state->crtc;
5403 
5404 		if (!crtc)
5405 			continue;
5406 
5407 		if (!drm_dp_mst_dsc_aux_for_port(pos->port))
5408 			continue;
5409 
5410 		crtc_state = drm_atomic_get_crtc_state(mst_state->base.state, crtc);
5411 
5412 		if (IS_ERR(crtc_state))
5413 			return PTR_ERR(crtc_state);
5414 
5415 		drm_dbg_atomic(mgr->dev, "[MST MGR:%p] Setting mode_changed flag on CRTC %p\n",
5416 			       mgr, crtc);
5417 
5418 		crtc_state->mode_changed = true;
5419 	}
5420 	return 0;
5421 }
5422 EXPORT_SYMBOL(drm_dp_mst_add_affected_dsc_crtcs);
5423 
5424 /**
5425  * drm_dp_mst_atomic_enable_dsc - Set DSC Enable Flag to On/Off
5426  * @state: Pointer to the new drm_atomic_state
5427  * @port: Pointer to the affected MST Port
5428  * @pbn: Newly recalculated bw required for link with DSC enabled
5429  * @enable: Boolean flag to enable or disable DSC on the port
5430  *
5431  * This function enables DSC on the given Port
5432  * by recalculating its vcpi from pbn provided
5433  * and sets dsc_enable flag to keep track of which
5434  * ports have DSC enabled
5435  *
5436  */
5437 int drm_dp_mst_atomic_enable_dsc(struct drm_atomic_state *state,
5438 				 struct drm_dp_mst_port *port,
5439 				 int pbn, bool enable)
5440 {
5441 	struct drm_dp_mst_topology_state *mst_state;
5442 	struct drm_dp_mst_atomic_payload *payload;
5443 	int time_slots = 0;
5444 
5445 	mst_state = drm_atomic_get_mst_topology_state(state, port->mgr);
5446 	if (IS_ERR(mst_state))
5447 		return PTR_ERR(mst_state);
5448 
5449 	payload = drm_atomic_get_mst_payload_state(mst_state, port);
5450 	if (!payload) {
5451 		drm_dbg_atomic(state->dev,
5452 			       "[MST PORT:%p] Couldn't find payload in mst state %p\n",
5453 			       port, mst_state);
5454 		return -EINVAL;
5455 	}
5456 
5457 	if (payload->dsc_enabled == enable) {
5458 		drm_dbg_atomic(state->dev,
5459 			       "[MST PORT:%p] DSC flag is already set to %d, returning %d time slots\n",
5460 			       port, enable, payload->time_slots);
5461 		time_slots = payload->time_slots;
5462 	}
5463 
5464 	if (enable) {
5465 		time_slots = drm_dp_atomic_find_time_slots(state, port->mgr, port, pbn);
5466 		drm_dbg_atomic(state->dev,
5467 			       "[MST PORT:%p] Enabling DSC flag, reallocating %d time slots on the port\n",
5468 			       port, time_slots);
5469 		if (time_slots < 0)
5470 			return -EINVAL;
5471 	}
5472 
5473 	payload->dsc_enabled = enable;
5474 
5475 	return time_slots;
5476 }
5477 EXPORT_SYMBOL(drm_dp_mst_atomic_enable_dsc);
5478 
5479 /**
5480  * drm_dp_mst_atomic_check_mgr - Check the atomic state of an MST topology manager
5481  * @state: The global atomic state
5482  * @mgr: Manager to check
5483  * @mst_state: The MST atomic state for @mgr
5484  * @failing_port: Returns the port with a BW limitation
5485  *
5486  * Checks the given MST manager's topology state for an atomic update to ensure
5487  * that it's valid. This includes checking whether there's enough bandwidth to
5488  * support the new timeslot allocations in the atomic update.
5489  *
5490  * Any atomic drivers supporting DP MST must make sure to call this or
5491  * the drm_dp_mst_atomic_check() function after checking the rest of their state
5492  * in their &drm_mode_config_funcs.atomic_check() callback.
5493  *
5494  * See also:
5495  * drm_dp_mst_atomic_check()
5496  * drm_dp_atomic_find_time_slots()
5497  * drm_dp_atomic_release_time_slots()
5498  *
5499  * Returns:
5500  *   - 0 if the new state is valid
5501  *   - %-ENOSPC, if the new state is invalid, because of BW limitation
5502  *         @failing_port is set to:
5503  *
5504  *         - The non-root port where a BW limit check failed
5505  *           with all the ports downstream of @failing_port passing
5506  *           the BW limit check.
5507  *           The returned port pointer is valid until at least
5508  *           one payload downstream of it exists.
5509  *         - %NULL if the BW limit check failed at the root port
5510  *           with all the ports downstream of the root port passing
5511  *           the BW limit check.
5512  *
5513  *   - %-EINVAL, if the new state is invalid, because the root port has
5514  *     too many payloads.
5515  */
5516 int drm_dp_mst_atomic_check_mgr(struct drm_atomic_state *state,
5517 				struct drm_dp_mst_topology_mgr *mgr,
5518 				struct drm_dp_mst_topology_state *mst_state,
5519 				struct drm_dp_mst_port **failing_port)
5520 {
5521 	int ret;
5522 
5523 	*failing_port = NULL;
5524 
5525 	if (!mgr->mst_state)
5526 		return 0;
5527 
5528 	mutex_lock(&mgr->lock);
5529 	ret = drm_dp_mst_atomic_check_mstb_bw_limit(mgr->mst_primary,
5530 						    mst_state,
5531 						    failing_port);
5532 	mutex_unlock(&mgr->lock);
5533 
5534 	if (ret < 0)
5535 		return ret;
5536 
5537 	return drm_dp_mst_atomic_check_payload_alloc_limits(mgr, mst_state);
5538 }
5539 EXPORT_SYMBOL(drm_dp_mst_atomic_check_mgr);
5540 
5541 /**
5542  * drm_dp_mst_atomic_check - Check that the new state of an MST topology in an
5543  * atomic update is valid
5544  * @state: Pointer to the new &struct drm_dp_mst_topology_state
5545  *
5546  * Checks the given topology state for an atomic update to ensure that it's
5547  * valid, calling drm_dp_mst_atomic_check_mgr() for all MST manager in the
5548  * atomic state. This includes checking whether there's enough bandwidth to
5549  * support the new timeslot allocations in the atomic update.
5550  *
5551  * Any atomic drivers supporting DP MST must make sure to call this after
5552  * checking the rest of their state in their
5553  * &drm_mode_config_funcs.atomic_check() callback.
5554  *
5555  * See also:
5556  * drm_dp_mst_atomic_check_mgr()
5557  * drm_dp_atomic_find_time_slots()
5558  * drm_dp_atomic_release_time_slots()
5559  *
5560  * Returns:
5561  *
5562  * 0 if the new state is valid, negative error code otherwise.
5563  */
5564 int drm_dp_mst_atomic_check(struct drm_atomic_state *state)
5565 {
5566 	struct drm_dp_mst_topology_mgr *mgr;
5567 	struct drm_dp_mst_topology_state *mst_state;
5568 	int i, ret = 0;
5569 
5570 	for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
5571 		struct drm_dp_mst_port *tmp_port;
5572 
5573 		ret = drm_dp_mst_atomic_check_mgr(state, mgr, mst_state, &tmp_port);
5574 		if (ret)
5575 			break;
5576 	}
5577 
5578 	return ret;
5579 }
5580 EXPORT_SYMBOL(drm_dp_mst_atomic_check);
5581 
5582 const struct drm_private_state_funcs drm_dp_mst_topology_state_funcs = {
5583 	.atomic_duplicate_state = drm_dp_mst_duplicate_state,
5584 	.atomic_destroy_state = drm_dp_mst_destroy_state,
5585 };
5586 EXPORT_SYMBOL(drm_dp_mst_topology_state_funcs);
5587 
5588 /**
5589  * drm_atomic_get_mst_topology_state: get MST topology state
5590  * @state: global atomic state
5591  * @mgr: MST topology manager, also the private object in this case
5592  *
5593  * This function wraps drm_atomic_get_priv_obj_state() passing in the MST atomic
5594  * state vtable so that the private object state returned is that of a MST
5595  * topology object.
5596  *
5597  * RETURNS:
5598  *
5599  * The MST topology state or error pointer.
5600  */
5601 struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state,
5602 								    struct drm_dp_mst_topology_mgr *mgr)
5603 {
5604 	return to_dp_mst_topology_state(drm_atomic_get_private_obj_state(state, &mgr->base));
5605 }
5606 EXPORT_SYMBOL(drm_atomic_get_mst_topology_state);
5607 
5608 /**
5609  * drm_atomic_get_old_mst_topology_state: get old MST topology state in atomic state, if any
5610  * @state: global atomic state
5611  * @mgr: MST topology manager, also the private object in this case
5612  *
5613  * This function wraps drm_atomic_get_old_private_obj_state() passing in the MST atomic
5614  * state vtable so that the private object state returned is that of a MST
5615  * topology object.
5616  *
5617  * Returns:
5618  *
5619  * The old MST topology state, or NULL if there's no topology state for this MST mgr
5620  * in the global atomic state
5621  */
5622 struct drm_dp_mst_topology_state *
5623 drm_atomic_get_old_mst_topology_state(struct drm_atomic_state *state,
5624 				      struct drm_dp_mst_topology_mgr *mgr)
5625 {
5626 	struct drm_private_state *old_priv_state =
5627 		drm_atomic_get_old_private_obj_state(state, &mgr->base);
5628 
5629 	return old_priv_state ? to_dp_mst_topology_state(old_priv_state) : NULL;
5630 }
5631 EXPORT_SYMBOL(drm_atomic_get_old_mst_topology_state);
5632 
5633 /**
5634  * drm_atomic_get_new_mst_topology_state: get new MST topology state in atomic state, if any
5635  * @state: global atomic state
5636  * @mgr: MST topology manager, also the private object in this case
5637  *
5638  * This function wraps drm_atomic_get_new_private_obj_state() passing in the MST atomic
5639  * state vtable so that the private object state returned is that of a MST
5640  * topology object.
5641  *
5642  * Returns:
5643  *
5644  * The new MST topology state, or NULL if there's no topology state for this MST mgr
5645  * in the global atomic state
5646  */
5647 struct drm_dp_mst_topology_state *
5648 drm_atomic_get_new_mst_topology_state(struct drm_atomic_state *state,
5649 				      struct drm_dp_mst_topology_mgr *mgr)
5650 {
5651 	struct drm_private_state *new_priv_state =
5652 		drm_atomic_get_new_private_obj_state(state, &mgr->base);
5653 
5654 	return new_priv_state ? to_dp_mst_topology_state(new_priv_state) : NULL;
5655 }
5656 EXPORT_SYMBOL(drm_atomic_get_new_mst_topology_state);
5657 
5658 /**
5659  * drm_dp_mst_topology_mgr_init - initialise a topology manager
5660  * @mgr: manager struct to initialise
5661  * @dev: device providing this structure - for i2c addition.
5662  * @aux: DP helper aux channel to talk to this device
5663  * @max_dpcd_transaction_bytes: hw specific DPCD transaction limit
5664  * @max_payloads: maximum number of payloads this GPU can source
5665  * @conn_base_id: the connector object ID the MST device is connected to.
5666  *
5667  * Return 0 for success, or negative error code on failure
5668  */
5669 int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
5670 				 struct drm_device *dev, struct drm_dp_aux *aux,
5671 				 int max_dpcd_transaction_bytes, int max_payloads,
5672 				 int conn_base_id)
5673 {
5674 	struct drm_dp_mst_topology_state *mst_state;
5675 
5676 	mutex_init(&mgr->lock);
5677 	mutex_init(&mgr->qlock);
5678 	mutex_init(&mgr->delayed_destroy_lock);
5679 	mutex_init(&mgr->up_req_lock);
5680 	mutex_init(&mgr->probe_lock);
5681 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
5682 	mutex_init(&mgr->topology_ref_history_lock);
5683 	stack_depot_init();
5684 #endif
5685 	INIT_LIST_HEAD(&mgr->tx_msg_downq);
5686 	INIT_LIST_HEAD(&mgr->destroy_port_list);
5687 	INIT_LIST_HEAD(&mgr->destroy_branch_device_list);
5688 	INIT_LIST_HEAD(&mgr->up_req_list);
5689 
5690 	/*
5691 	 * delayed_destroy_work will be queued on a dedicated WQ, so that any
5692 	 * requeuing will be also flushed when deiniting the topology manager.
5693 	 */
5694 	mgr->delayed_destroy_wq = alloc_ordered_workqueue("drm_dp_mst_wq", 0);
5695 	if (mgr->delayed_destroy_wq == NULL)
5696 		return -ENOMEM;
5697 
5698 	INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);
5699 	INIT_WORK(&mgr->tx_work, drm_dp_tx_work);
5700 	INIT_WORK(&mgr->delayed_destroy_work, drm_dp_delayed_destroy_work);
5701 	INIT_WORK(&mgr->up_req_work, drm_dp_mst_up_req_work);
5702 	init_waitqueue_head(&mgr->tx_waitq);
5703 	mgr->dev = dev;
5704 	mgr->aux = aux;
5705 	mgr->max_dpcd_transaction_bytes = max_dpcd_transaction_bytes;
5706 	mgr->max_payloads = max_payloads;
5707 	mgr->conn_base_id = conn_base_id;
5708 
5709 	mst_state = kzalloc(sizeof(*mst_state), GFP_KERNEL);
5710 	if (mst_state == NULL)
5711 		return -ENOMEM;
5712 
5713 	mst_state->total_avail_slots = 63;
5714 	mst_state->start_slot = 1;
5715 
5716 	mst_state->mgr = mgr;
5717 	INIT_LIST_HEAD(&mst_state->payloads);
5718 
5719 	drm_atomic_private_obj_init(dev, &mgr->base,
5720 				    &mst_state->base,
5721 				    &drm_dp_mst_topology_state_funcs);
5722 
5723 	return 0;
5724 }
5725 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init);
5726 
5727 /**
5728  * drm_dp_mst_topology_mgr_destroy() - destroy topology manager.
5729  * @mgr: manager to destroy
5730  */
5731 void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
5732 {
5733 	drm_dp_mst_topology_mgr_set_mst(mgr, false);
5734 	flush_work(&mgr->work);
5735 	/* The following will also drain any requeued work on the WQ. */
5736 	if (mgr->delayed_destroy_wq) {
5737 		destroy_workqueue(mgr->delayed_destroy_wq);
5738 		mgr->delayed_destroy_wq = NULL;
5739 	}
5740 	mgr->dev = NULL;
5741 	mgr->aux = NULL;
5742 	drm_atomic_private_obj_fini(&mgr->base);
5743 	mgr->funcs = NULL;
5744 
5745 	mutex_destroy(&mgr->delayed_destroy_lock);
5746 	mutex_destroy(&mgr->qlock);
5747 	mutex_destroy(&mgr->lock);
5748 	mutex_destroy(&mgr->up_req_lock);
5749 	mutex_destroy(&mgr->probe_lock);
5750 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
5751 	mutex_destroy(&mgr->topology_ref_history_lock);
5752 #endif
5753 }
5754 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy);
5755 
5756 static bool remote_i2c_read_ok(const struct i2c_msg msgs[], int num)
5757 {
5758 	int i;
5759 
5760 	if (num - 1 > DP_REMOTE_I2C_READ_MAX_TRANSACTIONS)
5761 		return false;
5762 
5763 	for (i = 0; i < num - 1; i++) {
5764 		if (msgs[i].flags & I2C_M_RD ||
5765 		    msgs[i].len > 0xff)
5766 			return false;
5767 	}
5768 
5769 	return msgs[num - 1].flags & I2C_M_RD &&
5770 		msgs[num - 1].len <= 0xff;
5771 }
5772 
5773 static bool remote_i2c_write_ok(const struct i2c_msg msgs[], int num)
5774 {
5775 	int i;
5776 
5777 	for (i = 0; i < num - 1; i++) {
5778 		if (msgs[i].flags & I2C_M_RD || !(msgs[i].flags & I2C_M_STOP) ||
5779 		    msgs[i].len > 0xff)
5780 			return false;
5781 	}
5782 
5783 	return !(msgs[num - 1].flags & I2C_M_RD) && msgs[num - 1].len <= 0xff;
5784 }
5785 
5786 static int drm_dp_mst_i2c_read(struct drm_dp_mst_branch *mstb,
5787 			       struct drm_dp_mst_port *port,
5788 			       struct i2c_msg *msgs, int num)
5789 {
5790 	struct drm_dp_mst_topology_mgr *mgr = port->mgr;
5791 	unsigned int i;
5792 	struct drm_dp_sideband_msg_req_body msg;
5793 	struct drm_dp_sideband_msg_tx *txmsg = NULL;
5794 	int ret;
5795 
5796 	memset(&msg, 0, sizeof(msg));
5797 	msg.req_type = DP_REMOTE_I2C_READ;
5798 	msg.u.i2c_read.num_transactions = num - 1;
5799 	msg.u.i2c_read.port_number = port->port_num;
5800 	for (i = 0; i < num - 1; i++) {
5801 		msg.u.i2c_read.transactions[i].i2c_dev_id = msgs[i].addr;
5802 		msg.u.i2c_read.transactions[i].num_bytes = msgs[i].len;
5803 		msg.u.i2c_read.transactions[i].bytes = msgs[i].buf;
5804 		msg.u.i2c_read.transactions[i].no_stop_bit = !(msgs[i].flags & I2C_M_STOP);
5805 	}
5806 	msg.u.i2c_read.read_i2c_device_id = msgs[num - 1].addr;
5807 	msg.u.i2c_read.num_bytes_read = msgs[num - 1].len;
5808 
5809 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
5810 	if (!txmsg) {
5811 		ret = -ENOMEM;
5812 		goto out;
5813 	}
5814 
5815 	txmsg->dst = mstb;
5816 	drm_dp_encode_sideband_req(&msg, txmsg);
5817 
5818 	drm_dp_queue_down_tx(mgr, txmsg);
5819 
5820 	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
5821 	if (ret > 0) {
5822 
5823 		if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
5824 			ret = -EREMOTEIO;
5825 			goto out;
5826 		}
5827 		if (txmsg->reply.u.remote_i2c_read_ack.num_bytes != msgs[num - 1].len) {
5828 			ret = -EIO;
5829 			goto out;
5830 		}
5831 		memcpy(msgs[num - 1].buf, txmsg->reply.u.remote_i2c_read_ack.bytes, msgs[num - 1].len);
5832 		ret = num;
5833 	}
5834 out:
5835 	kfree(txmsg);
5836 	return ret;
5837 }
5838 
5839 static int drm_dp_mst_i2c_write(struct drm_dp_mst_branch *mstb,
5840 				struct drm_dp_mst_port *port,
5841 				struct i2c_msg *msgs, int num)
5842 {
5843 	struct drm_dp_mst_topology_mgr *mgr = port->mgr;
5844 	unsigned int i;
5845 	struct drm_dp_sideband_msg_req_body msg;
5846 	struct drm_dp_sideband_msg_tx *txmsg = NULL;
5847 	int ret;
5848 
5849 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
5850 	if (!txmsg) {
5851 		ret = -ENOMEM;
5852 		goto out;
5853 	}
5854 	for (i = 0; i < num; i++) {
5855 		memset(&msg, 0, sizeof(msg));
5856 		msg.req_type = DP_REMOTE_I2C_WRITE;
5857 		msg.u.i2c_write.port_number = port->port_num;
5858 		msg.u.i2c_write.write_i2c_device_id = msgs[i].addr;
5859 		msg.u.i2c_write.num_bytes = msgs[i].len;
5860 		msg.u.i2c_write.bytes = msgs[i].buf;
5861 
5862 		memset(txmsg, 0, sizeof(*txmsg));
5863 		txmsg->dst = mstb;
5864 
5865 		drm_dp_encode_sideband_req(&msg, txmsg);
5866 		drm_dp_queue_down_tx(mgr, txmsg);
5867 
5868 		ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
5869 		if (ret > 0) {
5870 			if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
5871 				ret = -EREMOTEIO;
5872 				goto out;
5873 			}
5874 		} else {
5875 			goto out;
5876 		}
5877 	}
5878 	ret = num;
5879 out:
5880 	kfree(txmsg);
5881 	return ret;
5882 }
5883 
5884 /* I2C device */
5885 static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter,
5886 			       struct i2c_msg *msgs, int num)
5887 {
5888 	struct drm_dp_aux *aux = adapter->algo_data;
5889 	struct drm_dp_mst_port *port =
5890 		container_of(aux, struct drm_dp_mst_port, aux);
5891 	struct drm_dp_mst_branch *mstb;
5892 	struct drm_dp_mst_topology_mgr *mgr = port->mgr;
5893 	int ret;
5894 
5895 	mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
5896 	if (!mstb)
5897 		return -EREMOTEIO;
5898 
5899 	if (remote_i2c_read_ok(msgs, num)) {
5900 		ret = drm_dp_mst_i2c_read(mstb, port, msgs, num);
5901 	} else if (remote_i2c_write_ok(msgs, num)) {
5902 		ret = drm_dp_mst_i2c_write(mstb, port, msgs, num);
5903 	} else {
5904 		drm_dbg_kms(mgr->dev, "Unsupported I2C transaction for MST device\n");
5905 		ret = -EIO;
5906 	}
5907 
5908 	drm_dp_mst_topology_put_mstb(mstb);
5909 	return ret;
5910 }
5911 
5912 static u32 drm_dp_mst_i2c_functionality(struct i2c_adapter *adapter)
5913 {
5914 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
5915 	       I2C_FUNC_SMBUS_READ_BLOCK_DATA |
5916 	       I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
5917 	       I2C_FUNC_10BIT_ADDR;
5918 }
5919 
5920 static const struct i2c_algorithm drm_dp_mst_i2c_algo = {
5921 	.functionality = drm_dp_mst_i2c_functionality,
5922 	.master_xfer = drm_dp_mst_i2c_xfer,
5923 };
5924 
5925 /**
5926  * drm_dp_mst_register_i2c_bus() - register an I2C adapter for I2C-over-AUX
5927  * @port: The port to add the I2C bus on
5928  *
5929  * Returns 0 on success or a negative error code on failure.
5930  */
5931 static int drm_dp_mst_register_i2c_bus(struct drm_dp_mst_port *port)
5932 {
5933 	struct drm_dp_aux *aux = &port->aux;
5934 	struct device *parent_dev = port->mgr->dev->dev;
5935 
5936 	aux->ddc.algo = &drm_dp_mst_i2c_algo;
5937 	aux->ddc.algo_data = aux;
5938 	aux->ddc.retries = 3;
5939 
5940 	aux->ddc.owner = THIS_MODULE;
5941 	/* FIXME: set the kdev of the port's connector as parent */
5942 	aux->ddc.dev.parent = parent_dev;
5943 	aux->ddc.dev.of_node = parent_dev->of_node;
5944 
5945 	strscpy(aux->ddc.name, aux->name ? aux->name : dev_name(parent_dev),
5946 		sizeof(aux->ddc.name));
5947 
5948 	return i2c_add_adapter(&aux->ddc);
5949 }
5950 
5951 /**
5952  * drm_dp_mst_unregister_i2c_bus() - unregister an I2C-over-AUX adapter
5953  * @port: The port to remove the I2C bus from
5954  */
5955 static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_mst_port *port)
5956 {
5957 	i2c_del_adapter(&port->aux.ddc);
5958 }
5959 
5960 /**
5961  * drm_dp_mst_is_virtual_dpcd() - Is the given port a virtual DP Peer Device
5962  * @port: The port to check
5963  *
5964  * A single physical MST hub object can be represented in the topology
5965  * by multiple branches, with virtual ports between those branches.
5966  *
5967  * As of DP1.4, An MST hub with internal (virtual) ports must expose
5968  * certain DPCD registers over those ports. See sections 2.6.1.1.1
5969  * and 2.6.1.1.2 of Display Port specification v1.4 for details.
5970  *
5971  * May acquire mgr->lock
5972  *
5973  * Returns:
5974  * true if the port is a virtual DP peer device, false otherwise
5975  */
5976 static bool drm_dp_mst_is_virtual_dpcd(struct drm_dp_mst_port *port)
5977 {
5978 	struct drm_dp_mst_port *downstream_port;
5979 
5980 	if (!port || port->dpcd_rev < DP_DPCD_REV_14)
5981 		return false;
5982 
5983 	/* Virtual DP Sink (Internal Display Panel) */
5984 	if (drm_dp_mst_port_is_logical(port))
5985 		return true;
5986 
5987 	/* DP-to-HDMI Protocol Converter */
5988 	if (port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV &&
5989 	    !port->mcs &&
5990 	    port->ldps)
5991 		return true;
5992 
5993 	/* DP-to-DP */
5994 	mutex_lock(&port->mgr->lock);
5995 	if (port->pdt == DP_PEER_DEVICE_MST_BRANCHING &&
5996 	    port->mstb &&
5997 	    port->mstb->num_ports == 2) {
5998 		list_for_each_entry(downstream_port, &port->mstb->ports, next) {
5999 			if (downstream_port->pdt == DP_PEER_DEVICE_SST_SINK &&
6000 			    !downstream_port->input) {
6001 				mutex_unlock(&port->mgr->lock);
6002 				return true;
6003 			}
6004 		}
6005 	}
6006 	mutex_unlock(&port->mgr->lock);
6007 
6008 	return false;
6009 }
6010 
6011 /**
6012  * drm_dp_mst_aux_for_parent() - Get the AUX device for an MST port's parent
6013  * @port: MST port whose parent's AUX device is returned
6014  *
6015  * Return the AUX device for @port's parent or NULL if port's parent is the
6016  * root port.
6017  */
6018 struct drm_dp_aux *drm_dp_mst_aux_for_parent(struct drm_dp_mst_port *port)
6019 {
6020 	if (!port->parent || !port->parent->port_parent)
6021 		return NULL;
6022 
6023 	return &port->parent->port_parent->aux;
6024 }
6025 EXPORT_SYMBOL(drm_dp_mst_aux_for_parent);
6026 
6027 /**
6028  * drm_dp_mst_dsc_aux_for_port() - Find the correct aux for DSC
6029  * @port: The port to check. A leaf of the MST tree with an attached display.
6030  *
6031  * Depending on the situation, DSC may be enabled via the endpoint aux,
6032  * the immediately upstream aux, or the connector's physical aux.
6033  *
6034  * This is both the correct aux to read DSC_CAPABILITY and the
6035  * correct aux to write DSC_ENABLED.
6036  *
6037  * This operation can be expensive (up to four aux reads), so
6038  * the caller should cache the return.
6039  *
6040  * Returns:
6041  * NULL if DSC cannot be enabled on this port, otherwise the aux device
6042  */
6043 struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port)
6044 {
6045 	struct drm_dp_mst_port *immediate_upstream_port;
6046 	struct drm_dp_aux *immediate_upstream_aux;
6047 	struct drm_dp_mst_port *fec_port;
6048 	struct drm_dp_desc desc = {};
6049 	u8 endpoint_fec;
6050 	u8 endpoint_dsc;
6051 
6052 	if (!port)
6053 		return NULL;
6054 
6055 	if (port->parent->port_parent)
6056 		immediate_upstream_port = port->parent->port_parent;
6057 	else
6058 		immediate_upstream_port = NULL;
6059 
6060 	fec_port = immediate_upstream_port;
6061 	while (fec_port) {
6062 		/*
6063 		 * Each physical link (i.e. not a virtual port) between the
6064 		 * output and the primary device must support FEC
6065 		 */
6066 		if (!drm_dp_mst_is_virtual_dpcd(fec_port) &&
6067 		    !fec_port->fec_capable)
6068 			return NULL;
6069 
6070 		fec_port = fec_port->parent->port_parent;
6071 	}
6072 
6073 	/* DP-to-DP peer device */
6074 	if (drm_dp_mst_is_virtual_dpcd(immediate_upstream_port)) {
6075 		u8 upstream_dsc;
6076 
6077 		if (drm_dp_dpcd_read(&port->aux,
6078 				     DP_DSC_SUPPORT, &endpoint_dsc, 1) != 1)
6079 			return NULL;
6080 		if (drm_dp_dpcd_read(&port->aux,
6081 				     DP_FEC_CAPABILITY, &endpoint_fec, 1) != 1)
6082 			return NULL;
6083 		if (drm_dp_dpcd_read(&immediate_upstream_port->aux,
6084 				     DP_DSC_SUPPORT, &upstream_dsc, 1) != 1)
6085 			return NULL;
6086 
6087 		/* Enpoint decompression with DP-to-DP peer device */
6088 		if ((endpoint_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED) &&
6089 		    (endpoint_fec & DP_FEC_CAPABLE) &&
6090 		    (upstream_dsc & DP_DSC_PASSTHROUGH_IS_SUPPORTED)) {
6091 			port->passthrough_aux = &immediate_upstream_port->aux;
6092 			return &port->aux;
6093 		}
6094 
6095 		/* Virtual DPCD decompression with DP-to-DP peer device */
6096 		return &immediate_upstream_port->aux;
6097 	}
6098 
6099 	/* Virtual DPCD decompression with DP-to-HDMI or Virtual DP Sink */
6100 	if (drm_dp_mst_is_virtual_dpcd(port))
6101 		return &port->aux;
6102 
6103 	/*
6104 	 * Synaptics quirk
6105 	 * Applies to ports for which:
6106 	 * - Physical aux has Synaptics OUI
6107 	 * - DPv1.4 or higher
6108 	 * - Port is on primary branch device
6109 	 * - Not a VGA adapter (DP_DWN_STRM_PORT_TYPE_ANALOG)
6110 	 */
6111 	if (immediate_upstream_port)
6112 		immediate_upstream_aux = &immediate_upstream_port->aux;
6113 	else
6114 		immediate_upstream_aux = port->mgr->aux;
6115 
6116 	if (drm_dp_read_desc(immediate_upstream_aux, &desc, true))
6117 		return NULL;
6118 
6119 	if (drm_dp_has_quirk(&desc, DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD)) {
6120 		u8 dpcd_ext[DP_RECEIVER_CAP_SIZE];
6121 
6122 		if (drm_dp_read_dpcd_caps(immediate_upstream_aux, dpcd_ext) < 0)
6123 			return NULL;
6124 
6125 		if (dpcd_ext[DP_DPCD_REV] >= DP_DPCD_REV_14 &&
6126 		    ((dpcd_ext[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT) &&
6127 		    ((dpcd_ext[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_TYPE_MASK)
6128 		     != DP_DWN_STRM_PORT_TYPE_ANALOG)))
6129 			return immediate_upstream_aux;
6130 	}
6131 
6132 	/*
6133 	 * The check below verifies if the MST sink
6134 	 * connected to the GPU is capable of DSC -
6135 	 * therefore the endpoint needs to be
6136 	 * both DSC and FEC capable.
6137 	 */
6138 	if (drm_dp_dpcd_read(&port->aux,
6139 	   DP_DSC_SUPPORT, &endpoint_dsc, 1) != 1)
6140 		return NULL;
6141 	if (drm_dp_dpcd_read(&port->aux,
6142 	   DP_FEC_CAPABILITY, &endpoint_fec, 1) != 1)
6143 		return NULL;
6144 	if ((endpoint_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED) &&
6145 	   (endpoint_fec & DP_FEC_CAPABLE))
6146 		return &port->aux;
6147 
6148 	return NULL;
6149 }
6150 EXPORT_SYMBOL(drm_dp_mst_dsc_aux_for_port);
6151