xref: /linux/include/drm/display/drm_dp_mst_helper.h (revision 96f30c8f0aa9923aa39b30bcaefeacf88b490231)
1 /*
2  * Copyright © 2014 Red Hat.
3  *
4  * Permission to use, copy, modify, distribute, and sell this software and its
5  * documentation for any purpose is hereby granted without fee, provided that
6  * the above copyright notice appear in all copies and that both that copyright
7  * notice and this permission notice appear in supporting documentation, and
8  * that the name of the copyright holders not be used in advertising or
9  * publicity pertaining to distribution of the software without specific,
10  * written prior permission.  The copyright holders make no representations
11  * about the suitability of this software for any purpose.  It is provided "as
12  * is" without express or implied warranty.
13  *
14  * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
15  * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
16  * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
17  * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
18  * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
19  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
20  * OF THIS SOFTWARE.
21  */
22 #ifndef _DRM_DP_MST_HELPER_H_
23 #define _DRM_DP_MST_HELPER_H_
24 
25 #include <linux/types.h>
26 #include <drm/display/drm_dp_helper.h>
27 #include <drm/drm_atomic.h>
28 #include <drm/drm_fixed.h>
29 
30 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
31 #include <linux/stackdepot.h>
32 #include <linux/timekeeping.h>
33 
34 enum drm_dp_mst_topology_ref_type {
35 	DRM_DP_MST_TOPOLOGY_REF_GET,
36 	DRM_DP_MST_TOPOLOGY_REF_PUT,
37 };
38 
39 struct drm_dp_mst_topology_ref_history {
40 	struct drm_dp_mst_topology_ref_entry {
41 		enum drm_dp_mst_topology_ref_type type;
42 		int count;
43 		ktime_t ts_nsec;
44 		depot_stack_handle_t backtrace;
45 	} *entries;
46 	int len;
47 };
48 #endif /* IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS) */
49 
50 enum drm_dp_mst_payload_allocation {
51 	DRM_DP_MST_PAYLOAD_ALLOCATION_NONE,
52 	DRM_DP_MST_PAYLOAD_ALLOCATION_LOCAL,
53 	DRM_DP_MST_PAYLOAD_ALLOCATION_DFP,
54 	DRM_DP_MST_PAYLOAD_ALLOCATION_REMOTE,
55 };
56 
57 struct drm_dp_mst_branch;
58 
59 /**
60  * struct drm_dp_mst_port - MST port
61  * @port_num: port number
62  * @input: if this port is an input port. Protected by
63  * &drm_dp_mst_topology_mgr.base.lock.
64  * @mcs: message capability status - DP 1.2 spec. Protected by
65  * &drm_dp_mst_topology_mgr.base.lock.
66  * @ddps: DisplayPort Device Plug Status - DP 1.2. Protected by
67  * &drm_dp_mst_topology_mgr.base.lock.
68  * @pdt: Peer Device Type. Protected by
69  * &drm_dp_mst_topology_mgr.base.lock.
70  * @ldps: Legacy Device Plug Status. Protected by
71  * &drm_dp_mst_topology_mgr.base.lock.
72  * @dpcd_rev: DPCD revision of device on this port. Protected by
73  * &drm_dp_mst_topology_mgr.base.lock.
74  * @num_sdp_streams: Number of simultaneous streams. Protected by
75  * &drm_dp_mst_topology_mgr.base.lock.
76  * @num_sdp_stream_sinks: Number of stream sinks. Protected by
77  * &drm_dp_mst_topology_mgr.base.lock.
78  * @full_pbn: Max possible bandwidth for this port. Protected by
79  * &drm_dp_mst_topology_mgr.base.lock.
80  * @next: link to next port on this branch device
81  * @aux: i2c aux transport to talk to device connected to this port, protected
82  * by &drm_dp_mst_topology_mgr.base.lock.
83  * @passthrough_aux: parent aux to which DSC pass-through requests should be
84  * sent, only set if DSC pass-through is possible.
85  * @parent: branch device parent of this port
86  * @connector: DRM connector this port is connected to. Protected by
87  * &drm_dp_mst_topology_mgr.base.lock.
88  * @mgr: topology manager this port lives under.
89  *
90  * This structure represents an MST port endpoint on a device somewhere
91  * in the MST topology.
92  */
93 struct drm_dp_mst_port {
94 	/**
95 	 * @topology_kref: refcount for this port's lifetime in the topology,
96 	 * only the DP MST helpers should need to touch this
97 	 */
98 	struct kref topology_kref;
99 
100 	/**
101 	 * @malloc_kref: refcount for the memory allocation containing this
102 	 * structure. See drm_dp_mst_get_port_malloc() and
103 	 * drm_dp_mst_put_port_malloc().
104 	 */
105 	struct kref malloc_kref;
106 
107 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
108 	/**
109 	 * @topology_ref_history: A history of each topology
110 	 * reference/dereference. See CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS.
111 	 */
112 	struct drm_dp_mst_topology_ref_history topology_ref_history;
113 #endif
114 
115 	u8 port_num;
116 	bool input;
117 	bool mcs;
118 	bool ddps;
119 	u8 pdt;
120 	bool ldps;
121 	u8 dpcd_rev;
122 	u8 num_sdp_streams;
123 	u8 num_sdp_stream_sinks;
124 	uint16_t full_pbn;
125 	struct list_head next;
126 	/**
127 	 * @mstb: the branch device connected to this port, if there is one.
128 	 * This should be considered protected for reading by
129 	 * &drm_dp_mst_topology_mgr.lock. There are two exceptions to this:
130 	 * &drm_dp_mst_topology_mgr.up_req_work and
131 	 * &drm_dp_mst_topology_mgr.work, which do not grab
132 	 * &drm_dp_mst_topology_mgr.lock during reads but are the only
133 	 * updaters of this list and are protected from writing concurrently
134 	 * by &drm_dp_mst_topology_mgr.probe_lock.
135 	 */
136 	struct drm_dp_mst_branch *mstb;
137 	struct drm_dp_aux aux; /* i2c bus for this port? */
138 	struct drm_dp_aux *passthrough_aux;
139 	struct drm_dp_mst_branch *parent;
140 
141 	struct drm_connector *connector;
142 	struct drm_dp_mst_topology_mgr *mgr;
143 
144 	/**
145 	 * @cached_edid: for DP logical ports - make tiling work by ensuring
146 	 * that the EDID for all connectors is read immediately.
147 	 */
148 	const struct drm_edid *cached_edid;
149 
150 	/**
151 	 * @fec_capable: bool indicating if FEC can be supported up to that
152 	 * point in the MST topology.
153 	 */
154 	bool fec_capable;
155 };
156 
157 /* sideband msg header - not bit struct */
158 struct drm_dp_sideband_msg_hdr {
159 	u8 lct;
160 	u8 lcr;
161 	u8 rad[8];
162 	bool broadcast;
163 	bool path_msg;
164 	u8 msg_len;
165 	bool somt;
166 	bool eomt;
167 	bool seqno;
168 };
169 
170 struct drm_dp_sideband_msg_rx {
171 	u8 chunk[48];
172 	u8 msg[256];
173 	u8 curchunk_len;
174 	u8 curchunk_idx; /* chunk we are parsing now */
175 	u8 curchunk_hdrlen;
176 	u8 curlen; /* total length of the msg */
177 	bool have_somt;
178 	bool have_eomt;
179 	struct drm_dp_sideband_msg_hdr initial_hdr;
180 };
181 
182 /**
183  * struct drm_dp_mst_branch - MST branch device.
184  * @rad: Relative Address to talk to this branch device.
185  * @lct: Link count total to talk to this branch device.
186  * @num_ports: number of ports on the branch.
187  * @port_parent: pointer to the port parent, NULL if toplevel.
188  * @mgr: topology manager for this branch device.
189  * @link_address_sent: if a link address message has been sent to this device yet.
190  * @guid: guid for DP 1.2 branch device. port under this branch can be
191  * identified by port #.
192  *
193  * This structure represents an MST branch device, there is one
194  * primary branch device at the root, along with any other branches connected
195  * to downstream port of parent branches.
196  */
197 struct drm_dp_mst_branch {
198 	/**
199 	 * @topology_kref: refcount for this branch device's lifetime in the
200 	 * topology, only the DP MST helpers should need to touch this
201 	 */
202 	struct kref topology_kref;
203 
204 	/**
205 	 * @malloc_kref: refcount for the memory allocation containing this
206 	 * structure. See drm_dp_mst_get_mstb_malloc() and
207 	 * drm_dp_mst_put_mstb_malloc().
208 	 */
209 	struct kref malloc_kref;
210 
211 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
212 	/**
213 	 * @topology_ref_history: A history of each topology
214 	 * reference/dereference. See CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS.
215 	 */
216 	struct drm_dp_mst_topology_ref_history topology_ref_history;
217 #endif
218 
219 	/**
220 	 * @destroy_next: linked-list entry used by
221 	 * drm_dp_delayed_destroy_work()
222 	 */
223 	struct list_head destroy_next;
224 
225 	u8 rad[8];
226 	u8 lct;
227 	int num_ports;
228 
229 	/**
230 	 * @ports: the list of ports on this branch device. This should be
231 	 * considered protected for reading by &drm_dp_mst_topology_mgr.lock.
232 	 * There are two exceptions to this:
233 	 * &drm_dp_mst_topology_mgr.up_req_work and
234 	 * &drm_dp_mst_topology_mgr.work, which do not grab
235 	 * &drm_dp_mst_topology_mgr.lock during reads but are the only
236 	 * updaters of this list and are protected from updating the list
237 	 * concurrently by @drm_dp_mst_topology_mgr.probe_lock
238 	 */
239 	struct list_head ports;
240 
241 	struct drm_dp_mst_port *port_parent;
242 	struct drm_dp_mst_topology_mgr *mgr;
243 
244 	bool link_address_sent;
245 
246 	/* global unique identifier to identify branch devices */
247 	u8 guid[16];
248 };
249 
250 
251 struct drm_dp_nak_reply {
252 	u8 guid[16];
253 	u8 reason;
254 	u8 nak_data;
255 };
256 
257 struct drm_dp_link_address_ack_reply {
258 	u8 guid[16];
259 	u8 nports;
260 	struct drm_dp_link_addr_reply_port {
261 		bool input_port;
262 		u8 peer_device_type;
263 		u8 port_number;
264 		bool mcs;
265 		bool ddps;
266 		bool legacy_device_plug_status;
267 		u8 dpcd_revision;
268 		u8 peer_guid[16];
269 		u8 num_sdp_streams;
270 		u8 num_sdp_stream_sinks;
271 	} ports[16];
272 };
273 
274 struct drm_dp_remote_dpcd_read_ack_reply {
275 	u8 port_number;
276 	u8 num_bytes;
277 	u8 bytes[255];
278 };
279 
280 struct drm_dp_remote_dpcd_write_ack_reply {
281 	u8 port_number;
282 };
283 
284 struct drm_dp_remote_dpcd_write_nak_reply {
285 	u8 port_number;
286 	u8 reason;
287 	u8 bytes_written_before_failure;
288 };
289 
290 struct drm_dp_remote_i2c_read_ack_reply {
291 	u8 port_number;
292 	u8 num_bytes;
293 	u8 bytes[255];
294 };
295 
296 struct drm_dp_remote_i2c_read_nak_reply {
297 	u8 port_number;
298 	u8 nak_reason;
299 	u8 i2c_nak_transaction;
300 };
301 
302 struct drm_dp_remote_i2c_write_ack_reply {
303 	u8 port_number;
304 };
305 
306 struct drm_dp_query_stream_enc_status_ack_reply {
307 	/* Bit[23:16]- Stream Id */
308 	u8 stream_id;
309 
310 	/* Bit[15]- Signed */
311 	bool reply_signed;
312 
313 	/* Bit[10:8]- Stream Output Sink Type */
314 	bool unauthorizable_device_present;
315 	bool legacy_device_present;
316 	bool query_capable_device_present;
317 
318 	/* Bit[12:11]- Stream Output CP Type */
319 	bool hdcp_1x_device_present;
320 	bool hdcp_2x_device_present;
321 
322 	/* Bit[4]- Stream Authentication */
323 	bool auth_completed;
324 
325 	/* Bit[3]- Stream Encryption */
326 	bool encryption_enabled;
327 
328 	/* Bit[2]- Stream Repeater Function Present */
329 	bool repeater_present;
330 
331 	/* Bit[1:0]- Stream State */
332 	u8 state;
333 };
334 
335 #define DRM_DP_MAX_SDP_STREAMS 16
336 struct drm_dp_allocate_payload {
337 	u8 port_number;
338 	u8 number_sdp_streams;
339 	u8 vcpi;
340 	u16 pbn;
341 	u8 sdp_stream_sink[DRM_DP_MAX_SDP_STREAMS];
342 };
343 
344 struct drm_dp_allocate_payload_ack_reply {
345 	u8 port_number;
346 	u8 vcpi;
347 	u16 allocated_pbn;
348 };
349 
350 struct drm_dp_connection_status_notify {
351 	u8 guid[16];
352 	u8 port_number;
353 	bool legacy_device_plug_status;
354 	bool displayport_device_plug_status;
355 	bool message_capability_status;
356 	bool input_port;
357 	u8 peer_device_type;
358 };
359 
360 struct drm_dp_remote_dpcd_read {
361 	u8 port_number;
362 	u32 dpcd_address;
363 	u8 num_bytes;
364 };
365 
366 struct drm_dp_remote_dpcd_write {
367 	u8 port_number;
368 	u32 dpcd_address;
369 	u8 num_bytes;
370 	u8 *bytes;
371 };
372 
373 #define DP_REMOTE_I2C_READ_MAX_TRANSACTIONS 4
374 struct drm_dp_remote_i2c_read {
375 	u8 num_transactions;
376 	u8 port_number;
377 	struct drm_dp_remote_i2c_read_tx {
378 		u8 i2c_dev_id;
379 		u8 num_bytes;
380 		u8 *bytes;
381 		u8 no_stop_bit;
382 		u8 i2c_transaction_delay;
383 	} transactions[DP_REMOTE_I2C_READ_MAX_TRANSACTIONS];
384 	u8 read_i2c_device_id;
385 	u8 num_bytes_read;
386 };
387 
388 struct drm_dp_remote_i2c_write {
389 	u8 port_number;
390 	u8 write_i2c_device_id;
391 	u8 num_bytes;
392 	u8 *bytes;
393 };
394 
395 struct drm_dp_query_stream_enc_status {
396 	u8 stream_id;
397 	u8 client_id[7];	/* 56-bit nonce */
398 	u8 stream_event;
399 	bool valid_stream_event;
400 	u8 stream_behavior;
401 	u8 valid_stream_behavior;
402 };
403 
404 /* this covers ENUM_RESOURCES, POWER_DOWN_PHY, POWER_UP_PHY */
405 struct drm_dp_port_number_req {
406 	u8 port_number;
407 };
408 
409 struct drm_dp_enum_path_resources_ack_reply {
410 	u8 port_number;
411 	bool fec_capable;
412 	u16 full_payload_bw_number;
413 	u16 avail_payload_bw_number;
414 };
415 
416 /* covers POWER_DOWN_PHY, POWER_UP_PHY */
417 struct drm_dp_port_number_rep {
418 	u8 port_number;
419 };
420 
421 struct drm_dp_query_payload {
422 	u8 port_number;
423 	u8 vcpi;
424 };
425 
426 struct drm_dp_resource_status_notify {
427 	u8 port_number;
428 	u8 guid[16];
429 	u16 available_pbn;
430 };
431 
432 struct drm_dp_query_payload_ack_reply {
433 	u8 port_number;
434 	u16 allocated_pbn;
435 };
436 
437 struct drm_dp_sideband_msg_req_body {
438 	u8 req_type;
439 	union ack_req {
440 		struct drm_dp_connection_status_notify conn_stat;
441 		struct drm_dp_port_number_req port_num;
442 		struct drm_dp_resource_status_notify resource_stat;
443 
444 		struct drm_dp_query_payload query_payload;
445 		struct drm_dp_allocate_payload allocate_payload;
446 
447 		struct drm_dp_remote_dpcd_read dpcd_read;
448 		struct drm_dp_remote_dpcd_write dpcd_write;
449 
450 		struct drm_dp_remote_i2c_read i2c_read;
451 		struct drm_dp_remote_i2c_write i2c_write;
452 
453 		struct drm_dp_query_stream_enc_status enc_status;
454 	} u;
455 };
456 
457 struct drm_dp_sideband_msg_reply_body {
458 	u8 reply_type;
459 	u8 req_type;
460 	union ack_replies {
461 		struct drm_dp_nak_reply nak;
462 		struct drm_dp_link_address_ack_reply link_addr;
463 		struct drm_dp_port_number_rep port_number;
464 
465 		struct drm_dp_enum_path_resources_ack_reply path_resources;
466 		struct drm_dp_allocate_payload_ack_reply allocate_payload;
467 		struct drm_dp_query_payload_ack_reply query_payload;
468 
469 		struct drm_dp_remote_dpcd_read_ack_reply remote_dpcd_read_ack;
470 		struct drm_dp_remote_dpcd_write_ack_reply remote_dpcd_write_ack;
471 		struct drm_dp_remote_dpcd_write_nak_reply remote_dpcd_write_nack;
472 
473 		struct drm_dp_remote_i2c_read_ack_reply remote_i2c_read_ack;
474 		struct drm_dp_remote_i2c_read_nak_reply remote_i2c_read_nack;
475 		struct drm_dp_remote_i2c_write_ack_reply remote_i2c_write_ack;
476 
477 		struct drm_dp_query_stream_enc_status_ack_reply enc_status;
478 	} u;
479 };
480 
481 /* msg is queued to be put into a slot */
482 #define DRM_DP_SIDEBAND_TX_QUEUED 0
483 /* msg has started transmitting on a slot - still on msgq */
484 #define DRM_DP_SIDEBAND_TX_START_SEND 1
485 /* msg has finished transmitting on a slot - removed from msgq only in slot */
486 #define DRM_DP_SIDEBAND_TX_SENT 2
487 /* msg has received a response - removed from slot */
488 #define DRM_DP_SIDEBAND_TX_RX 3
489 #define DRM_DP_SIDEBAND_TX_TIMEOUT 4
490 
491 struct drm_dp_sideband_msg_tx {
492 	u8 msg[256];
493 	u8 chunk[48];
494 	u8 cur_offset;
495 	u8 cur_len;
496 	struct drm_dp_mst_branch *dst;
497 	struct list_head next;
498 	int seqno;
499 	int state;
500 	bool path_msg;
501 	struct drm_dp_sideband_msg_reply_body reply;
502 };
503 
504 /* sideband msg handler */
505 struct drm_dp_mst_topology_mgr;
506 struct drm_dp_mst_topology_cbs {
507 	/* create a connector for a port */
508 	struct drm_connector *(*add_connector)(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, const char *path);
509 	/*
510 	 * Checks for any pending MST interrupts, passing them to MST core for
511 	 * processing, the same way an HPD IRQ pulse handler would do this.
512 	 * If provided MST core calls this callback from a poll-waiting loop
513 	 * when waiting for MST down message replies. The driver is expected
514 	 * to guard against a race between this callback and the driver's HPD
515 	 * IRQ pulse handler.
516 	 */
517 	void (*poll_hpd_irq)(struct drm_dp_mst_topology_mgr *mgr);
518 };
519 
520 #define to_dp_mst_topology_state(x) container_of(x, struct drm_dp_mst_topology_state, base)
521 
522 /**
523  * struct drm_dp_mst_atomic_payload - Atomic state struct for an MST payload
524  *
525  * The primary atomic state structure for a given MST payload. Stores information like current
526  * bandwidth allocation, intended action for this payload, etc.
527  */
528 struct drm_dp_mst_atomic_payload {
529 	/** @port: The MST port assigned to this payload */
530 	struct drm_dp_mst_port *port;
531 
532 	/**
533 	 * @vc_start_slot: The time slot that this payload starts on. Because payload start slots
534 	 * can't be determined ahead of time, the contents of this value are UNDEFINED at atomic
535 	 * check time. This shouldn't usually matter, as the start slot should never be relevant for
536 	 * atomic state computations.
537 	 *
538 	 * Since this value is determined at commit time instead of check time, this value is
539 	 * protected by the MST helpers ensuring that async commits operating on the given topology
540 	 * never run in parallel. In the event that a driver does need to read this value (e.g. to
541 	 * inform hardware of the starting timeslot for a payload), the driver may either:
542 	 *
543 	 * * Read this field during the atomic commit after
544 	 *   drm_dp_mst_atomic_wait_for_dependencies() has been called, which will ensure the
545 	 *   previous MST states payload start slots have been copied over to the new state. Note
546 	 *   that a new start slot won't be assigned/removed from this payload until
547 	 *   drm_dp_add_payload_part1()/drm_dp_remove_payload_part2() have been called.
548 	 * * Acquire the MST modesetting lock, and then wait for any pending MST-related commits to
549 	 *   get committed to hardware by calling drm_crtc_commit_wait() on each of the
550 	 *   &drm_crtc_commit structs in &drm_dp_mst_topology_state.commit_deps.
551 	 *
552 	 * If neither of the two above solutions suffice (e.g. the driver needs to read the start
553 	 * slot in the middle of an atomic commit without waiting for some reason), then drivers
554 	 * should cache this value themselves after changing payloads.
555 	 */
556 	s8 vc_start_slot;
557 
558 	/** @vcpi: The Virtual Channel Payload Identifier */
559 	u8 vcpi;
560 	/**
561 	 * @time_slots:
562 	 * The number of timeslots allocated to this payload from the source DP Tx to
563 	 * the immediate downstream DP Rx
564 	 */
565 	int time_slots;
566 	/** @pbn: The payload bandwidth for this payload */
567 	int pbn;
568 
569 	/** @delete: Whether or not we intend to delete this payload during this atomic commit */
570 	bool delete : 1;
571 	/** @dsc_enabled: Whether or not this payload has DSC enabled */
572 	bool dsc_enabled : 1;
573 
574 	/** @payload_allocation_status: The allocation status of this payload */
575 	enum drm_dp_mst_payload_allocation payload_allocation_status;
576 
577 	/** @next: The list node for this payload */
578 	struct list_head next;
579 };
580 
581 /**
582  * struct drm_dp_mst_topology_state - DisplayPort MST topology atomic state
583  *
584  * This struct represents the atomic state of the toplevel DisplayPort MST manager
585  */
586 struct drm_dp_mst_topology_state {
587 	/** @base: Base private state for atomic */
588 	struct drm_private_state base;
589 
590 	/** @mgr: The topology manager */
591 	struct drm_dp_mst_topology_mgr *mgr;
592 
593 	/**
594 	 * @pending_crtc_mask: A bitmask of all CRTCs this topology state touches, drivers may
595 	 * modify this to add additional dependencies if needed.
596 	 */
597 	u32 pending_crtc_mask;
598 	/**
599 	 * @commit_deps: A list of all CRTC commits affecting this topology, this field isn't
600 	 * populated until drm_dp_mst_atomic_wait_for_dependencies() is called.
601 	 */
602 	struct drm_crtc_commit **commit_deps;
603 	/** @num_commit_deps: The number of CRTC commits in @commit_deps */
604 	size_t num_commit_deps;
605 
606 	/** @payload_mask: A bitmask of allocated VCPIs, used for VCPI assignments */
607 	u32 payload_mask;
608 	/** @payloads: The list of payloads being created/destroyed in this state */
609 	struct list_head payloads;
610 
611 	/** @total_avail_slots: The total number of slots this topology can handle (63 or 64) */
612 	u8 total_avail_slots;
613 	/** @start_slot: The first usable time slot in this topology (1 or 0) */
614 	u8 start_slot;
615 
616 	/**
617 	 * @pbn_div: The current PBN divisor for this topology. The driver is expected to fill this
618 	 * out itself.
619 	 */
620 	fixed20_12 pbn_div;
621 };
622 
623 #define to_dp_mst_topology_mgr(x) container_of(x, struct drm_dp_mst_topology_mgr, base)
624 
625 /**
626  * struct drm_dp_mst_topology_mgr - DisplayPort MST manager
627  *
628  * This struct represents the toplevel displayport MST topology manager.
629  * There should be one instance of this for every MST capable DP connector
630  * on the GPU.
631  */
632 struct drm_dp_mst_topology_mgr {
633 	/**
634 	 * @base: Base private object for atomic
635 	 */
636 	struct drm_private_obj base;
637 
638 	/**
639 	 * @dev: device pointer for adding i2c devices etc.
640 	 */
641 	struct drm_device *dev;
642 	/**
643 	 * @cbs: callbacks for connector addition and destruction.
644 	 */
645 	const struct drm_dp_mst_topology_cbs *cbs;
646 	/**
647 	 * @max_dpcd_transaction_bytes: maximum number of bytes to read/write
648 	 * in one go.
649 	 */
650 	int max_dpcd_transaction_bytes;
651 	/**
652 	 * @aux: AUX channel for the DP MST connector this topolgy mgr is
653 	 * controlling.
654 	 */
655 	struct drm_dp_aux *aux;
656 	/**
657 	 * @max_payloads: maximum number of payloads the GPU can generate.
658 	 */
659 	int max_payloads;
660 	/**
661 	 * @conn_base_id: DRM connector ID this mgr is connected to. Only used
662 	 * to build the MST connector path value.
663 	 */
664 	int conn_base_id;
665 
666 	/**
667 	 * @up_req_recv: Message receiver state for up requests.
668 	 */
669 	struct drm_dp_sideband_msg_rx up_req_recv;
670 
671 	/**
672 	 * @down_rep_recv: Message receiver state for replies to down
673 	 * requests.
674 	 */
675 	struct drm_dp_sideband_msg_rx down_rep_recv;
676 
677 	/**
678 	 * @lock: protects @mst_state, @mst_primary, @dpcd, and
679 	 * @payload_id_table_cleared.
680 	 */
681 	struct mutex lock;
682 
683 	/**
684 	 * @probe_lock: Prevents @work and @up_req_work, the only writers of
685 	 * &drm_dp_mst_port.mstb and &drm_dp_mst_branch.ports, from racing
686 	 * while they update the topology.
687 	 */
688 	struct mutex probe_lock;
689 
690 	/**
691 	 * @mst_state: If this manager is enabled for an MST capable port. False
692 	 * if no MST sink/branch devices is connected.
693 	 */
694 	bool mst_state : 1;
695 
696 	/**
697 	 * @payload_id_table_cleared: Whether or not we've cleared the payload
698 	 * ID table for @mst_primary. Protected by @lock.
699 	 */
700 	bool payload_id_table_cleared : 1;
701 
702 	/**
703 	 * @payload_count: The number of currently active payloads in hardware. This value is only
704 	 * intended to be used internally by MST helpers for payload tracking, and is only safe to
705 	 * read/write from the atomic commit (not check) context.
706 	 */
707 	u8 payload_count;
708 
709 	/**
710 	 * @next_start_slot: The starting timeslot to use for new VC payloads. This value is used
711 	 * internally by MST helpers for payload tracking, and is only safe to read/write from the
712 	 * atomic commit (not check) context.
713 	 */
714 	u8 next_start_slot;
715 
716 	/**
717 	 * @mst_primary: Pointer to the primary/first branch device.
718 	 */
719 	struct drm_dp_mst_branch *mst_primary;
720 
721 	/**
722 	 * @dpcd: Cache of DPCD for primary port.
723 	 */
724 	u8 dpcd[DP_RECEIVER_CAP_SIZE];
725 	/**
726 	 * @sink_count: Sink count from DEVICE_SERVICE_IRQ_VECTOR_ESI0.
727 	 */
728 	u8 sink_count;
729 
730 	/**
731 	 * @funcs: Atomic helper callbacks
732 	 */
733 	const struct drm_private_state_funcs *funcs;
734 
735 	/**
736 	 * @qlock: protects @tx_msg_downq and &drm_dp_sideband_msg_tx.state
737 	 */
738 	struct mutex qlock;
739 
740 	/**
741 	 * @tx_msg_downq: List of pending down requests
742 	 */
743 	struct list_head tx_msg_downq;
744 
745 	/**
746 	 * @tx_waitq: Wait to queue stall for the tx worker.
747 	 */
748 	wait_queue_head_t tx_waitq;
749 	/**
750 	 * @work: Probe work.
751 	 */
752 	struct work_struct work;
753 	/**
754 	 * @tx_work: Sideband transmit worker. This can nest within the main
755 	 * @work worker for each transaction @work launches.
756 	 */
757 	struct work_struct tx_work;
758 
759 	/**
760 	 * @destroy_port_list: List of to be destroyed connectors.
761 	 */
762 	struct list_head destroy_port_list;
763 	/**
764 	 * @destroy_branch_device_list: List of to be destroyed branch
765 	 * devices.
766 	 */
767 	struct list_head destroy_branch_device_list;
768 	/**
769 	 * @delayed_destroy_lock: Protects @destroy_port_list and
770 	 * @destroy_branch_device_list.
771 	 */
772 	struct mutex delayed_destroy_lock;
773 
774 	/**
775 	 * @delayed_destroy_wq: Workqueue used for delayed_destroy_work items.
776 	 * A dedicated WQ makes it possible to drain any requeued work items
777 	 * on it.
778 	 */
779 	struct workqueue_struct *delayed_destroy_wq;
780 
781 	/**
782 	 * @delayed_destroy_work: Work item to destroy MST port and branch
783 	 * devices, needed to avoid locking inversion.
784 	 */
785 	struct work_struct delayed_destroy_work;
786 
787 	/**
788 	 * @up_req_list: List of pending up requests from the topology that
789 	 * need to be processed, in chronological order.
790 	 */
791 	struct list_head up_req_list;
792 	/**
793 	 * @up_req_lock: Protects @up_req_list
794 	 */
795 	struct mutex up_req_lock;
796 	/**
797 	 * @up_req_work: Work item to process up requests received from the
798 	 * topology. Needed to avoid blocking hotplug handling and sideband
799 	 * transmissions.
800 	 */
801 	struct work_struct up_req_work;
802 
803 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
804 	/**
805 	 * @topology_ref_history_lock: protects
806 	 * &drm_dp_mst_port.topology_ref_history and
807 	 * &drm_dp_mst_branch.topology_ref_history.
808 	 */
809 	struct mutex topology_ref_history_lock;
810 #endif
811 };
812 
813 int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
814 				 struct drm_device *dev, struct drm_dp_aux *aux,
815 				 int max_dpcd_transaction_bytes,
816 				 int max_payloads, int conn_base_id);
817 
818 void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr);
819 
820 /**
821  * enum drm_dp_mst_mode - sink's MST mode capability
822  */
823 enum drm_dp_mst_mode {
824 	/**
825 	 * @DRM_DP_SST: The sink does not support MST nor single stream sideband
826 	 * messaging.
827 	 */
828 	DRM_DP_SST,
829 	/**
830 	 * @DRM_DP_MST: Sink supports MST, more than one stream and single
831 	 * stream sideband messaging.
832 	 */
833 	DRM_DP_MST,
834 	/**
835 	 * @DRM_DP_SST_SIDEBAND_MSG: Sink supports only one stream and single
836 	 * stream sideband messaging.
837 	 */
838 	DRM_DP_SST_SIDEBAND_MSG,
839 };
840 
841 enum drm_dp_mst_mode drm_dp_read_mst_cap(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SIZE]);
842 int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state);
843 
844 int drm_dp_mst_hpd_irq_handle_event(struct drm_dp_mst_topology_mgr *mgr,
845 				    const u8 *esi,
846 				    u8 *ack,
847 				    bool *handled);
848 void drm_dp_mst_hpd_irq_send_new_request(struct drm_dp_mst_topology_mgr *mgr);
849 
850 int
851 drm_dp_mst_detect_port(struct drm_connector *connector,
852 		       struct drm_modeset_acquire_ctx *ctx,
853 		       struct drm_dp_mst_topology_mgr *mgr,
854 		       struct drm_dp_mst_port *port);
855 
856 const struct drm_edid *drm_dp_mst_edid_read(struct drm_connector *connector,
857 					    struct drm_dp_mst_topology_mgr *mgr,
858 					    struct drm_dp_mst_port *port);
859 struct edid *drm_dp_mst_get_edid(struct drm_connector *connector,
860 				 struct drm_dp_mst_topology_mgr *mgr,
861 				 struct drm_dp_mst_port *port);
862 
863 fixed20_12 drm_dp_get_vc_payload_bw(const struct drm_dp_mst_topology_mgr *mgr,
864 				    int link_rate, int link_lane_count);
865 
866 int drm_dp_calc_pbn_mode(int clock, int bpp);
867 
868 void drm_dp_mst_update_slots(struct drm_dp_mst_topology_state *mst_state, uint8_t link_encoding_cap);
869 
870 int drm_dp_add_payload_part1(struct drm_dp_mst_topology_mgr *mgr,
871 			     struct drm_dp_mst_topology_state *mst_state,
872 			     struct drm_dp_mst_atomic_payload *payload);
873 int drm_dp_add_payload_part2(struct drm_dp_mst_topology_mgr *mgr,
874 			     struct drm_dp_mst_atomic_payload *payload);
875 void drm_dp_remove_payload_part1(struct drm_dp_mst_topology_mgr *mgr,
876 				 struct drm_dp_mst_topology_state *mst_state,
877 				 struct drm_dp_mst_atomic_payload *payload);
878 void drm_dp_remove_payload_part2(struct drm_dp_mst_topology_mgr *mgr,
879 				 struct drm_dp_mst_topology_state *mst_state,
880 				 const struct drm_dp_mst_atomic_payload *old_payload,
881 				 struct drm_dp_mst_atomic_payload *new_payload);
882 
883 int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr);
884 
885 void drm_dp_mst_dump_topology(struct seq_file *m,
886 			      struct drm_dp_mst_topology_mgr *mgr);
887 
888 void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr);
889 int __must_check
890 drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr,
891 			       bool sync);
892 
893 ssize_t drm_dp_mst_dpcd_read(struct drm_dp_aux *aux,
894 			     unsigned int offset, void *buffer, size_t size);
895 ssize_t drm_dp_mst_dpcd_write(struct drm_dp_aux *aux,
896 			      unsigned int offset, void *buffer, size_t size);
897 
898 int drm_dp_mst_connector_late_register(struct drm_connector *connector,
899 				       struct drm_dp_mst_port *port);
900 void drm_dp_mst_connector_early_unregister(struct drm_connector *connector,
901 					   struct drm_dp_mst_port *port);
902 
903 struct drm_dp_mst_topology_state *
904 drm_atomic_get_mst_topology_state(struct drm_atomic_state *state,
905 				  struct drm_dp_mst_topology_mgr *mgr);
906 struct drm_dp_mst_topology_state *
907 drm_atomic_get_old_mst_topology_state(struct drm_atomic_state *state,
908 				      struct drm_dp_mst_topology_mgr *mgr);
909 struct drm_dp_mst_topology_state *
910 drm_atomic_get_new_mst_topology_state(struct drm_atomic_state *state,
911 				      struct drm_dp_mst_topology_mgr *mgr);
912 struct drm_dp_mst_atomic_payload *
913 drm_atomic_get_mst_payload_state(struct drm_dp_mst_topology_state *state,
914 				 struct drm_dp_mst_port *port);
915 bool drm_dp_mst_port_downstream_of_parent(struct drm_dp_mst_topology_mgr *mgr,
916 					  struct drm_dp_mst_port *port,
917 					  struct drm_dp_mst_port *parent);
918 int __must_check
919 drm_dp_atomic_find_time_slots(struct drm_atomic_state *state,
920 			      struct drm_dp_mst_topology_mgr *mgr,
921 			      struct drm_dp_mst_port *port, int pbn);
922 int drm_dp_mst_atomic_enable_dsc(struct drm_atomic_state *state,
923 				 struct drm_dp_mst_port *port,
924 				 int pbn, bool enable);
925 int __must_check
926 drm_dp_mst_add_affected_dsc_crtcs(struct drm_atomic_state *state,
927 				  struct drm_dp_mst_topology_mgr *mgr);
928 int __must_check
929 drm_dp_atomic_release_time_slots(struct drm_atomic_state *state,
930 				 struct drm_dp_mst_topology_mgr *mgr,
931 				 struct drm_dp_mst_port *port);
932 void drm_dp_mst_atomic_wait_for_dependencies(struct drm_atomic_state *state);
933 int __must_check drm_dp_mst_atomic_setup_commit(struct drm_atomic_state *state);
934 int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr,
935 				 struct drm_dp_mst_port *port, bool power_up);
936 int drm_dp_send_query_stream_enc_status(struct drm_dp_mst_topology_mgr *mgr,
937 		struct drm_dp_mst_port *port,
938 		struct drm_dp_query_stream_enc_status_ack_reply *status);
939 int __must_check drm_dp_mst_atomic_check_mgr(struct drm_atomic_state *state,
940 					     struct drm_dp_mst_topology_mgr *mgr,
941 					     struct drm_dp_mst_topology_state *mst_state,
942 					     struct drm_dp_mst_port **failing_port);
943 int __must_check drm_dp_mst_atomic_check(struct drm_atomic_state *state);
944 int __must_check drm_dp_mst_root_conn_atomic_check(struct drm_connector_state *new_conn_state,
945 						   struct drm_dp_mst_topology_mgr *mgr);
946 
947 void drm_dp_mst_get_port_malloc(struct drm_dp_mst_port *port);
948 void drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port);
949 
950 static inline
951 bool drm_dp_mst_port_is_logical(struct drm_dp_mst_port *port)
952 {
953 	return port->port_num >= DP_MST_LOGICAL_PORT_0;
954 }
955 
956 struct drm_dp_aux *drm_dp_mst_aux_for_parent(struct drm_dp_mst_port *port);
957 struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port);
958 
959 static inline struct drm_dp_mst_topology_state *
960 to_drm_dp_mst_topology_state(struct drm_private_state *state)
961 {
962 	return container_of(state, struct drm_dp_mst_topology_state, base);
963 }
964 
965 extern const struct drm_private_state_funcs drm_dp_mst_topology_state_funcs;
966 
967 /**
968  * __drm_dp_mst_state_iter_get - private atomic state iterator function for
969  * macro-internal use
970  * @state: &struct drm_atomic_state pointer
971  * @mgr: pointer to the &struct drm_dp_mst_topology_mgr iteration cursor
972  * @old_state: optional pointer to the old &struct drm_dp_mst_topology_state
973  * iteration cursor
974  * @new_state: optional pointer to the new &struct drm_dp_mst_topology_state
975  * iteration cursor
976  * @i: int iteration cursor, for macro-internal use
977  *
978  * Used by for_each_oldnew_mst_mgr_in_state(),
979  * for_each_old_mst_mgr_in_state(), and for_each_new_mst_mgr_in_state(). Don't
980  * call this directly.
981  *
982  * Returns:
983  * True if the current &struct drm_private_obj is a &struct
984  * drm_dp_mst_topology_mgr, false otherwise.
985  */
986 static inline bool
987 __drm_dp_mst_state_iter_get(struct drm_atomic_state *state,
988 			    struct drm_dp_mst_topology_mgr **mgr,
989 			    struct drm_dp_mst_topology_state **old_state,
990 			    struct drm_dp_mst_topology_state **new_state,
991 			    int i)
992 {
993 	struct __drm_private_objs_state *objs_state = &state->private_objs[i];
994 
995 	if (objs_state->ptr->funcs != &drm_dp_mst_topology_state_funcs)
996 		return false;
997 
998 	*mgr = to_dp_mst_topology_mgr(objs_state->ptr);
999 	if (old_state)
1000 		*old_state = to_dp_mst_topology_state(objs_state->old_state);
1001 	if (new_state)
1002 		*new_state = to_dp_mst_topology_state(objs_state->new_state);
1003 
1004 	return true;
1005 }
1006 
1007 /**
1008  * for_each_oldnew_mst_mgr_in_state - iterate over all DP MST topology
1009  * managers in an atomic update
1010  * @__state: &struct drm_atomic_state pointer
1011  * @mgr: &struct drm_dp_mst_topology_mgr iteration cursor
1012  * @old_state: &struct drm_dp_mst_topology_state iteration cursor for the old
1013  * state
1014  * @new_state: &struct drm_dp_mst_topology_state iteration cursor for the new
1015  * state
1016  * @__i: int iteration cursor, for macro-internal use
1017  *
1018  * This iterates over all DRM DP MST topology managers in an atomic update,
1019  * tracking both old and new state. This is useful in places where the state
1020  * delta needs to be considered, for example in atomic check functions.
1021  */
1022 #define for_each_oldnew_mst_mgr_in_state(__state, mgr, old_state, new_state, __i) \
1023 	for ((__i) = 0; (__i) < (__state)->num_private_objs; (__i)++) \
1024 		for_each_if(__drm_dp_mst_state_iter_get((__state), &(mgr), &(old_state), &(new_state), (__i)))
1025 
1026 /**
1027  * for_each_old_mst_mgr_in_state - iterate over all DP MST topology managers
1028  * in an atomic update
1029  * @__state: &struct drm_atomic_state pointer
1030  * @mgr: &struct drm_dp_mst_topology_mgr iteration cursor
1031  * @old_state: &struct drm_dp_mst_topology_state iteration cursor for the old
1032  * state
1033  * @__i: int iteration cursor, for macro-internal use
1034  *
1035  * This iterates over all DRM DP MST topology managers in an atomic update,
1036  * tracking only the old state. This is useful in disable functions, where we
1037  * need the old state the hardware is still in.
1038  */
1039 #define for_each_old_mst_mgr_in_state(__state, mgr, old_state, __i) \
1040 	for ((__i) = 0; (__i) < (__state)->num_private_objs; (__i)++) \
1041 		for_each_if(__drm_dp_mst_state_iter_get((__state), &(mgr), &(old_state), NULL, (__i)))
1042 
1043 /**
1044  * for_each_new_mst_mgr_in_state - iterate over all DP MST topology managers
1045  * in an atomic update
1046  * @__state: &struct drm_atomic_state pointer
1047  * @mgr: &struct drm_dp_mst_topology_mgr iteration cursor
1048  * @new_state: &struct drm_dp_mst_topology_state iteration cursor for the new
1049  * state
1050  * @__i: int iteration cursor, for macro-internal use
1051  *
1052  * This iterates over all DRM DP MST topology managers in an atomic update,
1053  * tracking only the new state. This is useful in enable functions, where we
1054  * need the new state the hardware should be in when the atomic commit
1055  * operation has completed.
1056  */
1057 #define for_each_new_mst_mgr_in_state(__state, mgr, new_state, __i) \
1058 	for ((__i) = 0; (__i) < (__state)->num_private_objs; (__i)++) \
1059 		for_each_if(__drm_dp_mst_state_iter_get((__state), &(mgr), NULL, &(new_state), (__i)))
1060 
1061 #endif
1062