xref: /linux/include/drm/display/drm_dp_mst_helper.h (revision 9e56ff53b4115875667760445b028357848b4748)
1 /*
2  * Copyright © 2014 Red Hat.
3  *
4  * Permission to use, copy, modify, distribute, and sell this software and its
5  * documentation for any purpose is hereby granted without fee, provided that
6  * the above copyright notice appear in all copies and that both that copyright
7  * notice and this permission notice appear in supporting documentation, and
8  * that the name of the copyright holders not be used in advertising or
9  * publicity pertaining to distribution of the software without specific,
10  * written prior permission.  The copyright holders make no representations
11  * about the suitability of this software for any purpose.  It is provided "as
12  * is" without express or implied warranty.
13  *
14  * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
15  * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
16  * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
17  * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
18  * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
19  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
20  * OF THIS SOFTWARE.
21  */
22 #ifndef _DRM_DP_MST_HELPER_H_
23 #define _DRM_DP_MST_HELPER_H_
24 
25 #include <linux/types.h>
26 #include <drm/display/drm_dp_helper.h>
27 #include <drm/drm_atomic.h>
28 #include <drm/drm_fixed.h>
29 
30 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
31 #include <linux/stackdepot.h>
32 #include <linux/timekeeping.h>
33 
34 enum drm_dp_mst_topology_ref_type {
35 	DRM_DP_MST_TOPOLOGY_REF_GET,
36 	DRM_DP_MST_TOPOLOGY_REF_PUT,
37 };
38 
39 struct drm_dp_mst_topology_ref_history {
40 	struct drm_dp_mst_topology_ref_entry {
41 		enum drm_dp_mst_topology_ref_type type;
42 		int count;
43 		ktime_t ts_nsec;
44 		depot_stack_handle_t backtrace;
45 	} *entries;
46 	int len;
47 };
48 #endif /* IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS) */
49 
50 enum drm_dp_mst_payload_allocation {
51 	DRM_DP_MST_PAYLOAD_ALLOCATION_NONE,
52 	DRM_DP_MST_PAYLOAD_ALLOCATION_LOCAL,
53 	DRM_DP_MST_PAYLOAD_ALLOCATION_DFP,
54 	DRM_DP_MST_PAYLOAD_ALLOCATION_REMOTE,
55 };
56 
57 struct drm_dp_mst_branch;
58 
59 /**
60  * struct drm_dp_mst_port - MST port
61  * @port_num: port number
62  * @input: if this port is an input port. Protected by
63  * &drm_dp_mst_topology_mgr.base.lock.
64  * @mcs: message capability status - DP 1.2 spec. Protected by
65  * &drm_dp_mst_topology_mgr.base.lock.
66  * @ddps: DisplayPort Device Plug Status - DP 1.2. Protected by
67  * &drm_dp_mst_topology_mgr.base.lock.
68  * @pdt: Peer Device Type. Protected by
69  * &drm_dp_mst_topology_mgr.base.lock.
70  * @ldps: Legacy Device Plug Status. Protected by
71  * &drm_dp_mst_topology_mgr.base.lock.
72  * @dpcd_rev: DPCD revision of device on this port. Protected by
73  * &drm_dp_mst_topology_mgr.base.lock.
74  * @num_sdp_streams: Number of simultaneous streams. Protected by
75  * &drm_dp_mst_topology_mgr.base.lock.
76  * @num_sdp_stream_sinks: Number of stream sinks. Protected by
77  * &drm_dp_mst_topology_mgr.base.lock.
78  * @full_pbn: Max possible bandwidth for this port. Protected by
79  * &drm_dp_mst_topology_mgr.base.lock.
80  * @next: link to next port on this branch device
81  * @aux: i2c aux transport to talk to device connected to this port, protected
82  * by &drm_dp_mst_topology_mgr.base.lock.
83  * @passthrough_aux: parent aux to which DSC pass-through requests should be
84  * sent, only set if DSC pass-through is possible.
85  * @parent: branch device parent of this port
86  * @vcpi: Virtual Channel Payload info for this port.
87  * @connector: DRM connector this port is connected to. Protected by
88  * &drm_dp_mst_topology_mgr.base.lock.
89  * @mgr: topology manager this port lives under.
90  *
91  * This structure represents an MST port endpoint on a device somewhere
92  * in the MST topology.
93  */
94 struct drm_dp_mst_port {
95 	/**
96 	 * @topology_kref: refcount for this port's lifetime in the topology,
97 	 * only the DP MST helpers should need to touch this
98 	 */
99 	struct kref topology_kref;
100 
101 	/**
102 	 * @malloc_kref: refcount for the memory allocation containing this
103 	 * structure. See drm_dp_mst_get_port_malloc() and
104 	 * drm_dp_mst_put_port_malloc().
105 	 */
106 	struct kref malloc_kref;
107 
108 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
109 	/**
110 	 * @topology_ref_history: A history of each topology
111 	 * reference/dereference. See CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS.
112 	 */
113 	struct drm_dp_mst_topology_ref_history topology_ref_history;
114 #endif
115 
116 	u8 port_num;
117 	bool input;
118 	bool mcs;
119 	bool ddps;
120 	u8 pdt;
121 	bool ldps;
122 	u8 dpcd_rev;
123 	u8 num_sdp_streams;
124 	u8 num_sdp_stream_sinks;
125 	uint16_t full_pbn;
126 	struct list_head next;
127 	/**
128 	 * @mstb: the branch device connected to this port, if there is one.
129 	 * This should be considered protected for reading by
130 	 * &drm_dp_mst_topology_mgr.lock. There are two exceptions to this:
131 	 * &drm_dp_mst_topology_mgr.up_req_work and
132 	 * &drm_dp_mst_topology_mgr.work, which do not grab
133 	 * &drm_dp_mst_topology_mgr.lock during reads but are the only
134 	 * updaters of this list and are protected from writing concurrently
135 	 * by &drm_dp_mst_topology_mgr.probe_lock.
136 	 */
137 	struct drm_dp_mst_branch *mstb;
138 	struct drm_dp_aux aux; /* i2c bus for this port? */
139 	struct drm_dp_aux *passthrough_aux;
140 	struct drm_dp_mst_branch *parent;
141 
142 	struct drm_connector *connector;
143 	struct drm_dp_mst_topology_mgr *mgr;
144 
145 	/**
146 	 * @cached_edid: for DP logical ports - make tiling work by ensuring
147 	 * that the EDID for all connectors is read immediately.
148 	 */
149 	const struct drm_edid *cached_edid;
150 
151 	/**
152 	 * @fec_capable: bool indicating if FEC can be supported up to that
153 	 * point in the MST topology.
154 	 */
155 	bool fec_capable;
156 };
157 
158 /* sideband msg header - not bit struct */
159 struct drm_dp_sideband_msg_hdr {
160 	u8 lct;
161 	u8 lcr;
162 	u8 rad[8];
163 	bool broadcast;
164 	bool path_msg;
165 	u8 msg_len;
166 	bool somt;
167 	bool eomt;
168 	bool seqno;
169 };
170 
171 struct drm_dp_sideband_msg_rx {
172 	u8 chunk[48];
173 	u8 msg[256];
174 	u8 curchunk_len;
175 	u8 curchunk_idx; /* chunk we are parsing now */
176 	u8 curchunk_hdrlen;
177 	u8 curlen; /* total length of the msg */
178 	bool have_somt;
179 	bool have_eomt;
180 	struct drm_dp_sideband_msg_hdr initial_hdr;
181 };
182 
183 /**
184  * struct drm_dp_mst_branch - MST branch device.
185  * @rad: Relative Address to talk to this branch device.
186  * @lct: Link count total to talk to this branch device.
187  * @num_ports: number of ports on the branch.
188  * @port_parent: pointer to the port parent, NULL if toplevel.
189  * @mgr: topology manager for this branch device.
190  * @link_address_sent: if a link address message has been sent to this device yet.
191  * @guid: guid for DP 1.2 branch device. port under this branch can be
192  * identified by port #.
193  *
194  * This structure represents an MST branch device, there is one
195  * primary branch device at the root, along with any other branches connected
196  * to downstream port of parent branches.
197  */
198 struct drm_dp_mst_branch {
199 	/**
200 	 * @topology_kref: refcount for this branch device's lifetime in the
201 	 * topology, only the DP MST helpers should need to touch this
202 	 */
203 	struct kref topology_kref;
204 
205 	/**
206 	 * @malloc_kref: refcount for the memory allocation containing this
207 	 * structure. See drm_dp_mst_get_mstb_malloc() and
208 	 * drm_dp_mst_put_mstb_malloc().
209 	 */
210 	struct kref malloc_kref;
211 
212 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
213 	/**
214 	 * @topology_ref_history: A history of each topology
215 	 * reference/dereference. See CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS.
216 	 */
217 	struct drm_dp_mst_topology_ref_history topology_ref_history;
218 #endif
219 
220 	/**
221 	 * @destroy_next: linked-list entry used by
222 	 * drm_dp_delayed_destroy_work()
223 	 */
224 	struct list_head destroy_next;
225 
226 	u8 rad[8];
227 	u8 lct;
228 	int num_ports;
229 
230 	/**
231 	 * @ports: the list of ports on this branch device. This should be
232 	 * considered protected for reading by &drm_dp_mst_topology_mgr.lock.
233 	 * There are two exceptions to this:
234 	 * &drm_dp_mst_topology_mgr.up_req_work and
235 	 * &drm_dp_mst_topology_mgr.work, which do not grab
236 	 * &drm_dp_mst_topology_mgr.lock during reads but are the only
237 	 * updaters of this list and are protected from updating the list
238 	 * concurrently by @drm_dp_mst_topology_mgr.probe_lock
239 	 */
240 	struct list_head ports;
241 
242 	struct drm_dp_mst_port *port_parent;
243 	struct drm_dp_mst_topology_mgr *mgr;
244 
245 	bool link_address_sent;
246 
247 	/* global unique identifier to identify branch devices */
248 	u8 guid[16];
249 };
250 
251 
252 struct drm_dp_nak_reply {
253 	u8 guid[16];
254 	u8 reason;
255 	u8 nak_data;
256 };
257 
258 struct drm_dp_link_address_ack_reply {
259 	u8 guid[16];
260 	u8 nports;
261 	struct drm_dp_link_addr_reply_port {
262 		bool input_port;
263 		u8 peer_device_type;
264 		u8 port_number;
265 		bool mcs;
266 		bool ddps;
267 		bool legacy_device_plug_status;
268 		u8 dpcd_revision;
269 		u8 peer_guid[16];
270 		u8 num_sdp_streams;
271 		u8 num_sdp_stream_sinks;
272 	} ports[16];
273 };
274 
275 struct drm_dp_remote_dpcd_read_ack_reply {
276 	u8 port_number;
277 	u8 num_bytes;
278 	u8 bytes[255];
279 };
280 
281 struct drm_dp_remote_dpcd_write_ack_reply {
282 	u8 port_number;
283 };
284 
285 struct drm_dp_remote_dpcd_write_nak_reply {
286 	u8 port_number;
287 	u8 reason;
288 	u8 bytes_written_before_failure;
289 };
290 
291 struct drm_dp_remote_i2c_read_ack_reply {
292 	u8 port_number;
293 	u8 num_bytes;
294 	u8 bytes[255];
295 };
296 
297 struct drm_dp_remote_i2c_read_nak_reply {
298 	u8 port_number;
299 	u8 nak_reason;
300 	u8 i2c_nak_transaction;
301 };
302 
303 struct drm_dp_remote_i2c_write_ack_reply {
304 	u8 port_number;
305 };
306 
307 struct drm_dp_query_stream_enc_status_ack_reply {
308 	/* Bit[23:16]- Stream Id */
309 	u8 stream_id;
310 
311 	/* Bit[15]- Signed */
312 	bool reply_signed;
313 
314 	/* Bit[10:8]- Stream Output Sink Type */
315 	bool unauthorizable_device_present;
316 	bool legacy_device_present;
317 	bool query_capable_device_present;
318 
319 	/* Bit[12:11]- Stream Output CP Type */
320 	bool hdcp_1x_device_present;
321 	bool hdcp_2x_device_present;
322 
323 	/* Bit[4]- Stream Authentication */
324 	bool auth_completed;
325 
326 	/* Bit[3]- Stream Encryption */
327 	bool encryption_enabled;
328 
329 	/* Bit[2]- Stream Repeater Function Present */
330 	bool repeater_present;
331 
332 	/* Bit[1:0]- Stream State */
333 	u8 state;
334 };
335 
336 #define DRM_DP_MAX_SDP_STREAMS 16
337 struct drm_dp_allocate_payload {
338 	u8 port_number;
339 	u8 number_sdp_streams;
340 	u8 vcpi;
341 	u16 pbn;
342 	u8 sdp_stream_sink[DRM_DP_MAX_SDP_STREAMS];
343 };
344 
345 struct drm_dp_allocate_payload_ack_reply {
346 	u8 port_number;
347 	u8 vcpi;
348 	u16 allocated_pbn;
349 };
350 
351 struct drm_dp_connection_status_notify {
352 	u8 guid[16];
353 	u8 port_number;
354 	bool legacy_device_plug_status;
355 	bool displayport_device_plug_status;
356 	bool message_capability_status;
357 	bool input_port;
358 	u8 peer_device_type;
359 };
360 
361 struct drm_dp_remote_dpcd_read {
362 	u8 port_number;
363 	u32 dpcd_address;
364 	u8 num_bytes;
365 };
366 
367 struct drm_dp_remote_dpcd_write {
368 	u8 port_number;
369 	u32 dpcd_address;
370 	u8 num_bytes;
371 	u8 *bytes;
372 };
373 
374 #define DP_REMOTE_I2C_READ_MAX_TRANSACTIONS 4
375 struct drm_dp_remote_i2c_read {
376 	u8 num_transactions;
377 	u8 port_number;
378 	struct drm_dp_remote_i2c_read_tx {
379 		u8 i2c_dev_id;
380 		u8 num_bytes;
381 		u8 *bytes;
382 		u8 no_stop_bit;
383 		u8 i2c_transaction_delay;
384 	} transactions[DP_REMOTE_I2C_READ_MAX_TRANSACTIONS];
385 	u8 read_i2c_device_id;
386 	u8 num_bytes_read;
387 };
388 
389 struct drm_dp_remote_i2c_write {
390 	u8 port_number;
391 	u8 write_i2c_device_id;
392 	u8 num_bytes;
393 	u8 *bytes;
394 };
395 
396 struct drm_dp_query_stream_enc_status {
397 	u8 stream_id;
398 	u8 client_id[7];	/* 56-bit nonce */
399 	u8 stream_event;
400 	bool valid_stream_event;
401 	u8 stream_behavior;
402 	u8 valid_stream_behavior;
403 };
404 
405 /* this covers ENUM_RESOURCES, POWER_DOWN_PHY, POWER_UP_PHY */
406 struct drm_dp_port_number_req {
407 	u8 port_number;
408 };
409 
410 struct drm_dp_enum_path_resources_ack_reply {
411 	u8 port_number;
412 	bool fec_capable;
413 	u16 full_payload_bw_number;
414 	u16 avail_payload_bw_number;
415 };
416 
417 /* covers POWER_DOWN_PHY, POWER_UP_PHY */
418 struct drm_dp_port_number_rep {
419 	u8 port_number;
420 };
421 
422 struct drm_dp_query_payload {
423 	u8 port_number;
424 	u8 vcpi;
425 };
426 
427 struct drm_dp_resource_status_notify {
428 	u8 port_number;
429 	u8 guid[16];
430 	u16 available_pbn;
431 };
432 
433 struct drm_dp_query_payload_ack_reply {
434 	u8 port_number;
435 	u16 allocated_pbn;
436 };
437 
438 struct drm_dp_sideband_msg_req_body {
439 	u8 req_type;
440 	union ack_req {
441 		struct drm_dp_connection_status_notify conn_stat;
442 		struct drm_dp_port_number_req port_num;
443 		struct drm_dp_resource_status_notify resource_stat;
444 
445 		struct drm_dp_query_payload query_payload;
446 		struct drm_dp_allocate_payload allocate_payload;
447 
448 		struct drm_dp_remote_dpcd_read dpcd_read;
449 		struct drm_dp_remote_dpcd_write dpcd_write;
450 
451 		struct drm_dp_remote_i2c_read i2c_read;
452 		struct drm_dp_remote_i2c_write i2c_write;
453 
454 		struct drm_dp_query_stream_enc_status enc_status;
455 	} u;
456 };
457 
458 struct drm_dp_sideband_msg_reply_body {
459 	u8 reply_type;
460 	u8 req_type;
461 	union ack_replies {
462 		struct drm_dp_nak_reply nak;
463 		struct drm_dp_link_address_ack_reply link_addr;
464 		struct drm_dp_port_number_rep port_number;
465 
466 		struct drm_dp_enum_path_resources_ack_reply path_resources;
467 		struct drm_dp_allocate_payload_ack_reply allocate_payload;
468 		struct drm_dp_query_payload_ack_reply query_payload;
469 
470 		struct drm_dp_remote_dpcd_read_ack_reply remote_dpcd_read_ack;
471 		struct drm_dp_remote_dpcd_write_ack_reply remote_dpcd_write_ack;
472 		struct drm_dp_remote_dpcd_write_nak_reply remote_dpcd_write_nack;
473 
474 		struct drm_dp_remote_i2c_read_ack_reply remote_i2c_read_ack;
475 		struct drm_dp_remote_i2c_read_nak_reply remote_i2c_read_nack;
476 		struct drm_dp_remote_i2c_write_ack_reply remote_i2c_write_ack;
477 
478 		struct drm_dp_query_stream_enc_status_ack_reply enc_status;
479 	} u;
480 };
481 
482 /* msg is queued to be put into a slot */
483 #define DRM_DP_SIDEBAND_TX_QUEUED 0
484 /* msg has started transmitting on a slot - still on msgq */
485 #define DRM_DP_SIDEBAND_TX_START_SEND 1
486 /* msg has finished transmitting on a slot - removed from msgq only in slot */
487 #define DRM_DP_SIDEBAND_TX_SENT 2
488 /* msg has received a response - removed from slot */
489 #define DRM_DP_SIDEBAND_TX_RX 3
490 #define DRM_DP_SIDEBAND_TX_TIMEOUT 4
491 
492 struct drm_dp_sideband_msg_tx {
493 	u8 msg[256];
494 	u8 chunk[48];
495 	u8 cur_offset;
496 	u8 cur_len;
497 	struct drm_dp_mst_branch *dst;
498 	struct list_head next;
499 	int seqno;
500 	int state;
501 	bool path_msg;
502 	struct drm_dp_sideband_msg_reply_body reply;
503 };
504 
505 /* sideband msg handler */
506 struct drm_dp_mst_topology_mgr;
507 struct drm_dp_mst_topology_cbs {
508 	/* create a connector for a port */
509 	struct drm_connector *(*add_connector)(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, const char *path);
510 	/*
511 	 * Checks for any pending MST interrupts, passing them to MST core for
512 	 * processing, the same way an HPD IRQ pulse handler would do this.
513 	 * If provided MST core calls this callback from a poll-waiting loop
514 	 * when waiting for MST down message replies. The driver is expected
515 	 * to guard against a race between this callback and the driver's HPD
516 	 * IRQ pulse handler.
517 	 */
518 	void (*poll_hpd_irq)(struct drm_dp_mst_topology_mgr *mgr);
519 };
520 
521 #define to_dp_mst_topology_state(x) container_of(x, struct drm_dp_mst_topology_state, base)
522 
523 /**
524  * struct drm_dp_mst_atomic_payload - Atomic state struct for an MST payload
525  *
526  * The primary atomic state structure for a given MST payload. Stores information like current
527  * bandwidth allocation, intended action for this payload, etc.
528  */
529 struct drm_dp_mst_atomic_payload {
530 	/** @port: The MST port assigned to this payload */
531 	struct drm_dp_mst_port *port;
532 
533 	/**
534 	 * @vc_start_slot: The time slot that this payload starts on. Because payload start slots
535 	 * can't be determined ahead of time, the contents of this value are UNDEFINED at atomic
536 	 * check time. This shouldn't usually matter, as the start slot should never be relevant for
537 	 * atomic state computations.
538 	 *
539 	 * Since this value is determined at commit time instead of check time, this value is
540 	 * protected by the MST helpers ensuring that async commits operating on the given topology
541 	 * never run in parallel. In the event that a driver does need to read this value (e.g. to
542 	 * inform hardware of the starting timeslot for a payload), the driver may either:
543 	 *
544 	 * * Read this field during the atomic commit after
545 	 *   drm_dp_mst_atomic_wait_for_dependencies() has been called, which will ensure the
546 	 *   previous MST states payload start slots have been copied over to the new state. Note
547 	 *   that a new start slot won't be assigned/removed from this payload until
548 	 *   drm_dp_add_payload_part1()/drm_dp_remove_payload_part2() have been called.
549 	 * * Acquire the MST modesetting lock, and then wait for any pending MST-related commits to
550 	 *   get committed to hardware by calling drm_crtc_commit_wait() on each of the
551 	 *   &drm_crtc_commit structs in &drm_dp_mst_topology_state.commit_deps.
552 	 *
553 	 * If neither of the two above solutions suffice (e.g. the driver needs to read the start
554 	 * slot in the middle of an atomic commit without waiting for some reason), then drivers
555 	 * should cache this value themselves after changing payloads.
556 	 */
557 	s8 vc_start_slot;
558 
559 	/** @vcpi: The Virtual Channel Payload Identifier */
560 	u8 vcpi;
561 	/**
562 	 * @time_slots:
563 	 * The number of timeslots allocated to this payload from the source DP Tx to
564 	 * the immediate downstream DP Rx
565 	 */
566 	int time_slots;
567 	/** @pbn: The payload bandwidth for this payload */
568 	int pbn;
569 
570 	/** @delete: Whether or not we intend to delete this payload during this atomic commit */
571 	bool delete : 1;
572 	/** @dsc_enabled: Whether or not this payload has DSC enabled */
573 	bool dsc_enabled : 1;
574 
575 	/** @payload_allocation_status: The allocation status of this payload */
576 	enum drm_dp_mst_payload_allocation payload_allocation_status;
577 
578 	/** @next: The list node for this payload */
579 	struct list_head next;
580 };
581 
582 /**
583  * struct drm_dp_mst_topology_state - DisplayPort MST topology atomic state
584  *
585  * This struct represents the atomic state of the toplevel DisplayPort MST manager
586  */
587 struct drm_dp_mst_topology_state {
588 	/** @base: Base private state for atomic */
589 	struct drm_private_state base;
590 
591 	/** @mgr: The topology manager */
592 	struct drm_dp_mst_topology_mgr *mgr;
593 
594 	/**
595 	 * @pending_crtc_mask: A bitmask of all CRTCs this topology state touches, drivers may
596 	 * modify this to add additional dependencies if needed.
597 	 */
598 	u32 pending_crtc_mask;
599 	/**
600 	 * @commit_deps: A list of all CRTC commits affecting this topology, this field isn't
601 	 * populated until drm_dp_mst_atomic_wait_for_dependencies() is called.
602 	 */
603 	struct drm_crtc_commit **commit_deps;
604 	/** @num_commit_deps: The number of CRTC commits in @commit_deps */
605 	size_t num_commit_deps;
606 
607 	/** @payload_mask: A bitmask of allocated VCPIs, used for VCPI assignments */
608 	u32 payload_mask;
609 	/** @payloads: The list of payloads being created/destroyed in this state */
610 	struct list_head payloads;
611 
612 	/** @total_avail_slots: The total number of slots this topology can handle (63 or 64) */
613 	u8 total_avail_slots;
614 	/** @start_slot: The first usable time slot in this topology (1 or 0) */
615 	u8 start_slot;
616 
617 	/**
618 	 * @pbn_div: The current PBN divisor for this topology. The driver is expected to fill this
619 	 * out itself.
620 	 */
621 	fixed20_12 pbn_div;
622 };
623 
624 #define to_dp_mst_topology_mgr(x) container_of(x, struct drm_dp_mst_topology_mgr, base)
625 
626 /**
627  * struct drm_dp_mst_topology_mgr - DisplayPort MST manager
628  *
629  * This struct represents the toplevel displayport MST topology manager.
630  * There should be one instance of this for every MST capable DP connector
631  * on the GPU.
632  */
633 struct drm_dp_mst_topology_mgr {
634 	/**
635 	 * @base: Base private object for atomic
636 	 */
637 	struct drm_private_obj base;
638 
639 	/**
640 	 * @dev: device pointer for adding i2c devices etc.
641 	 */
642 	struct drm_device *dev;
643 	/**
644 	 * @cbs: callbacks for connector addition and destruction.
645 	 */
646 	const struct drm_dp_mst_topology_cbs *cbs;
647 	/**
648 	 * @max_dpcd_transaction_bytes: maximum number of bytes to read/write
649 	 * in one go.
650 	 */
651 	int max_dpcd_transaction_bytes;
652 	/**
653 	 * @aux: AUX channel for the DP MST connector this topolgy mgr is
654 	 * controlling.
655 	 */
656 	struct drm_dp_aux *aux;
657 	/**
658 	 * @max_payloads: maximum number of payloads the GPU can generate.
659 	 */
660 	int max_payloads;
661 	/**
662 	 * @conn_base_id: DRM connector ID this mgr is connected to. Only used
663 	 * to build the MST connector path value.
664 	 */
665 	int conn_base_id;
666 
667 	/**
668 	 * @up_req_recv: Message receiver state for up requests.
669 	 */
670 	struct drm_dp_sideband_msg_rx up_req_recv;
671 
672 	/**
673 	 * @down_rep_recv: Message receiver state for replies to down
674 	 * requests.
675 	 */
676 	struct drm_dp_sideband_msg_rx down_rep_recv;
677 
678 	/**
679 	 * @lock: protects @mst_state, @mst_primary, @dpcd, and
680 	 * @payload_id_table_cleared.
681 	 */
682 	struct mutex lock;
683 
684 	/**
685 	 * @probe_lock: Prevents @work and @up_req_work, the only writers of
686 	 * &drm_dp_mst_port.mstb and &drm_dp_mst_branch.ports, from racing
687 	 * while they update the topology.
688 	 */
689 	struct mutex probe_lock;
690 
691 	/**
692 	 * @mst_state: If this manager is enabled for an MST capable port. False
693 	 * if no MST sink/branch devices is connected.
694 	 */
695 	bool mst_state : 1;
696 
697 	/**
698 	 * @payload_id_table_cleared: Whether or not we've cleared the payload
699 	 * ID table for @mst_primary. Protected by @lock.
700 	 */
701 	bool payload_id_table_cleared : 1;
702 
703 	/**
704 	 * @payload_count: The number of currently active payloads in hardware. This value is only
705 	 * intended to be used internally by MST helpers for payload tracking, and is only safe to
706 	 * read/write from the atomic commit (not check) context.
707 	 */
708 	u8 payload_count;
709 
710 	/**
711 	 * @next_start_slot: The starting timeslot to use for new VC payloads. This value is used
712 	 * internally by MST helpers for payload tracking, and is only safe to read/write from the
713 	 * atomic commit (not check) context.
714 	 */
715 	u8 next_start_slot;
716 
717 	/**
718 	 * @mst_primary: Pointer to the primary/first branch device.
719 	 */
720 	struct drm_dp_mst_branch *mst_primary;
721 
722 	/**
723 	 * @dpcd: Cache of DPCD for primary port.
724 	 */
725 	u8 dpcd[DP_RECEIVER_CAP_SIZE];
726 	/**
727 	 * @sink_count: Sink count from DEVICE_SERVICE_IRQ_VECTOR_ESI0.
728 	 */
729 	u8 sink_count;
730 
731 	/**
732 	 * @funcs: Atomic helper callbacks
733 	 */
734 	const struct drm_private_state_funcs *funcs;
735 
736 	/**
737 	 * @qlock: protects @tx_msg_downq and &drm_dp_sideband_msg_tx.state
738 	 */
739 	struct mutex qlock;
740 
741 	/**
742 	 * @tx_msg_downq: List of pending down requests
743 	 */
744 	struct list_head tx_msg_downq;
745 
746 	/**
747 	 * @tx_waitq: Wait to queue stall for the tx worker.
748 	 */
749 	wait_queue_head_t tx_waitq;
750 	/**
751 	 * @work: Probe work.
752 	 */
753 	struct work_struct work;
754 	/**
755 	 * @tx_work: Sideband transmit worker. This can nest within the main
756 	 * @work worker for each transaction @work launches.
757 	 */
758 	struct work_struct tx_work;
759 
760 	/**
761 	 * @destroy_port_list: List of to be destroyed connectors.
762 	 */
763 	struct list_head destroy_port_list;
764 	/**
765 	 * @destroy_branch_device_list: List of to be destroyed branch
766 	 * devices.
767 	 */
768 	struct list_head destroy_branch_device_list;
769 	/**
770 	 * @delayed_destroy_lock: Protects @destroy_port_list and
771 	 * @destroy_branch_device_list.
772 	 */
773 	struct mutex delayed_destroy_lock;
774 
775 	/**
776 	 * @delayed_destroy_wq: Workqueue used for delayed_destroy_work items.
777 	 * A dedicated WQ makes it possible to drain any requeued work items
778 	 * on it.
779 	 */
780 	struct workqueue_struct *delayed_destroy_wq;
781 
782 	/**
783 	 * @delayed_destroy_work: Work item to destroy MST port and branch
784 	 * devices, needed to avoid locking inversion.
785 	 */
786 	struct work_struct delayed_destroy_work;
787 
788 	/**
789 	 * @up_req_list: List of pending up requests from the topology that
790 	 * need to be processed, in chronological order.
791 	 */
792 	struct list_head up_req_list;
793 	/**
794 	 * @up_req_lock: Protects @up_req_list
795 	 */
796 	struct mutex up_req_lock;
797 	/**
798 	 * @up_req_work: Work item to process up requests received from the
799 	 * topology. Needed to avoid blocking hotplug handling and sideband
800 	 * transmissions.
801 	 */
802 	struct work_struct up_req_work;
803 
804 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
805 	/**
806 	 * @topology_ref_history_lock: protects
807 	 * &drm_dp_mst_port.topology_ref_history and
808 	 * &drm_dp_mst_branch.topology_ref_history.
809 	 */
810 	struct mutex topology_ref_history_lock;
811 #endif
812 };
813 
814 int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
815 				 struct drm_device *dev, struct drm_dp_aux *aux,
816 				 int max_dpcd_transaction_bytes,
817 				 int max_payloads, int conn_base_id);
818 
819 void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr);
820 
821 bool drm_dp_read_mst_cap(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SIZE]);
822 int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state);
823 
824 int drm_dp_mst_hpd_irq_handle_event(struct drm_dp_mst_topology_mgr *mgr,
825 				    const u8 *esi,
826 				    u8 *ack,
827 				    bool *handled);
828 void drm_dp_mst_hpd_irq_send_new_request(struct drm_dp_mst_topology_mgr *mgr);
829 
830 int
831 drm_dp_mst_detect_port(struct drm_connector *connector,
832 		       struct drm_modeset_acquire_ctx *ctx,
833 		       struct drm_dp_mst_topology_mgr *mgr,
834 		       struct drm_dp_mst_port *port);
835 
836 const struct drm_edid *drm_dp_mst_edid_read(struct drm_connector *connector,
837 					    struct drm_dp_mst_topology_mgr *mgr,
838 					    struct drm_dp_mst_port *port);
839 struct edid *drm_dp_mst_get_edid(struct drm_connector *connector,
840 				 struct drm_dp_mst_topology_mgr *mgr,
841 				 struct drm_dp_mst_port *port);
842 
843 fixed20_12 drm_dp_get_vc_payload_bw(const struct drm_dp_mst_topology_mgr *mgr,
844 				    int link_rate, int link_lane_count);
845 
846 int drm_dp_calc_pbn_mode(int clock, int bpp);
847 
848 void drm_dp_mst_update_slots(struct drm_dp_mst_topology_state *mst_state, uint8_t link_encoding_cap);
849 
850 int drm_dp_add_payload_part1(struct drm_dp_mst_topology_mgr *mgr,
851 			     struct drm_dp_mst_topology_state *mst_state,
852 			     struct drm_dp_mst_atomic_payload *payload);
853 int drm_dp_add_payload_part2(struct drm_dp_mst_topology_mgr *mgr,
854 			     struct drm_atomic_state *state,
855 			     struct drm_dp_mst_atomic_payload *payload);
856 void drm_dp_remove_payload_part1(struct drm_dp_mst_topology_mgr *mgr,
857 				 struct drm_dp_mst_topology_state *mst_state,
858 				 struct drm_dp_mst_atomic_payload *payload);
859 void drm_dp_remove_payload_part2(struct drm_dp_mst_topology_mgr *mgr,
860 				 struct drm_dp_mst_topology_state *mst_state,
861 				 const struct drm_dp_mst_atomic_payload *old_payload,
862 				 struct drm_dp_mst_atomic_payload *new_payload);
863 
864 int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr);
865 
866 void drm_dp_mst_dump_topology(struct seq_file *m,
867 			      struct drm_dp_mst_topology_mgr *mgr);
868 
869 void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr);
870 int __must_check
871 drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr,
872 			       bool sync);
873 
874 ssize_t drm_dp_mst_dpcd_read(struct drm_dp_aux *aux,
875 			     unsigned int offset, void *buffer, size_t size);
876 ssize_t drm_dp_mst_dpcd_write(struct drm_dp_aux *aux,
877 			      unsigned int offset, void *buffer, size_t size);
878 
879 int drm_dp_mst_connector_late_register(struct drm_connector *connector,
880 				       struct drm_dp_mst_port *port);
881 void drm_dp_mst_connector_early_unregister(struct drm_connector *connector,
882 					   struct drm_dp_mst_port *port);
883 
884 struct drm_dp_mst_topology_state *
885 drm_atomic_get_mst_topology_state(struct drm_atomic_state *state,
886 				  struct drm_dp_mst_topology_mgr *mgr);
887 struct drm_dp_mst_topology_state *
888 drm_atomic_get_old_mst_topology_state(struct drm_atomic_state *state,
889 				      struct drm_dp_mst_topology_mgr *mgr);
890 struct drm_dp_mst_topology_state *
891 drm_atomic_get_new_mst_topology_state(struct drm_atomic_state *state,
892 				      struct drm_dp_mst_topology_mgr *mgr);
893 struct drm_dp_mst_atomic_payload *
894 drm_atomic_get_mst_payload_state(struct drm_dp_mst_topology_state *state,
895 				 struct drm_dp_mst_port *port);
896 bool drm_dp_mst_port_downstream_of_parent(struct drm_dp_mst_topology_mgr *mgr,
897 					  struct drm_dp_mst_port *port,
898 					  struct drm_dp_mst_port *parent);
899 int __must_check
900 drm_dp_atomic_find_time_slots(struct drm_atomic_state *state,
901 			      struct drm_dp_mst_topology_mgr *mgr,
902 			      struct drm_dp_mst_port *port, int pbn);
903 int drm_dp_mst_atomic_enable_dsc(struct drm_atomic_state *state,
904 				 struct drm_dp_mst_port *port,
905 				 int pbn, bool enable);
906 int __must_check
907 drm_dp_mst_add_affected_dsc_crtcs(struct drm_atomic_state *state,
908 				  struct drm_dp_mst_topology_mgr *mgr);
909 int __must_check
910 drm_dp_atomic_release_time_slots(struct drm_atomic_state *state,
911 				 struct drm_dp_mst_topology_mgr *mgr,
912 				 struct drm_dp_mst_port *port);
913 void drm_dp_mst_atomic_wait_for_dependencies(struct drm_atomic_state *state);
914 int __must_check drm_dp_mst_atomic_setup_commit(struct drm_atomic_state *state);
915 int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr,
916 				 struct drm_dp_mst_port *port, bool power_up);
917 int drm_dp_send_query_stream_enc_status(struct drm_dp_mst_topology_mgr *mgr,
918 		struct drm_dp_mst_port *port,
919 		struct drm_dp_query_stream_enc_status_ack_reply *status);
920 int __must_check drm_dp_mst_atomic_check_mgr(struct drm_atomic_state *state,
921 					     struct drm_dp_mst_topology_mgr *mgr,
922 					     struct drm_dp_mst_topology_state *mst_state,
923 					     struct drm_dp_mst_port **failing_port);
924 int __must_check drm_dp_mst_atomic_check(struct drm_atomic_state *state);
925 int __must_check drm_dp_mst_root_conn_atomic_check(struct drm_connector_state *new_conn_state,
926 						   struct drm_dp_mst_topology_mgr *mgr);
927 
928 void drm_dp_mst_get_port_malloc(struct drm_dp_mst_port *port);
929 void drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port);
930 
931 struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port);
932 
933 static inline struct drm_dp_mst_topology_state *
934 to_drm_dp_mst_topology_state(struct drm_private_state *state)
935 {
936 	return container_of(state, struct drm_dp_mst_topology_state, base);
937 }
938 
939 extern const struct drm_private_state_funcs drm_dp_mst_topology_state_funcs;
940 
941 /**
942  * __drm_dp_mst_state_iter_get - private atomic state iterator function for
943  * macro-internal use
944  * @state: &struct drm_atomic_state pointer
945  * @mgr: pointer to the &struct drm_dp_mst_topology_mgr iteration cursor
946  * @old_state: optional pointer to the old &struct drm_dp_mst_topology_state
947  * iteration cursor
948  * @new_state: optional pointer to the new &struct drm_dp_mst_topology_state
949  * iteration cursor
950  * @i: int iteration cursor, for macro-internal use
951  *
952  * Used by for_each_oldnew_mst_mgr_in_state(),
953  * for_each_old_mst_mgr_in_state(), and for_each_new_mst_mgr_in_state(). Don't
954  * call this directly.
955  *
956  * Returns:
957  * True if the current &struct drm_private_obj is a &struct
958  * drm_dp_mst_topology_mgr, false otherwise.
959  */
960 static inline bool
961 __drm_dp_mst_state_iter_get(struct drm_atomic_state *state,
962 			    struct drm_dp_mst_topology_mgr **mgr,
963 			    struct drm_dp_mst_topology_state **old_state,
964 			    struct drm_dp_mst_topology_state **new_state,
965 			    int i)
966 {
967 	struct __drm_private_objs_state *objs_state = &state->private_objs[i];
968 
969 	if (objs_state->ptr->funcs != &drm_dp_mst_topology_state_funcs)
970 		return false;
971 
972 	*mgr = to_dp_mst_topology_mgr(objs_state->ptr);
973 	if (old_state)
974 		*old_state = to_dp_mst_topology_state(objs_state->old_state);
975 	if (new_state)
976 		*new_state = to_dp_mst_topology_state(objs_state->new_state);
977 
978 	return true;
979 }
980 
981 /**
982  * for_each_oldnew_mst_mgr_in_state - iterate over all DP MST topology
983  * managers in an atomic update
984  * @__state: &struct drm_atomic_state pointer
985  * @mgr: &struct drm_dp_mst_topology_mgr iteration cursor
986  * @old_state: &struct drm_dp_mst_topology_state iteration cursor for the old
987  * state
988  * @new_state: &struct drm_dp_mst_topology_state iteration cursor for the new
989  * state
990  * @__i: int iteration cursor, for macro-internal use
991  *
992  * This iterates over all DRM DP MST topology managers in an atomic update,
993  * tracking both old and new state. This is useful in places where the state
994  * delta needs to be considered, for example in atomic check functions.
995  */
996 #define for_each_oldnew_mst_mgr_in_state(__state, mgr, old_state, new_state, __i) \
997 	for ((__i) = 0; (__i) < (__state)->num_private_objs; (__i)++) \
998 		for_each_if(__drm_dp_mst_state_iter_get((__state), &(mgr), &(old_state), &(new_state), (__i)))
999 
1000 /**
1001  * for_each_old_mst_mgr_in_state - iterate over all DP MST topology managers
1002  * in an atomic update
1003  * @__state: &struct drm_atomic_state pointer
1004  * @mgr: &struct drm_dp_mst_topology_mgr iteration cursor
1005  * @old_state: &struct drm_dp_mst_topology_state iteration cursor for the old
1006  * state
1007  * @__i: int iteration cursor, for macro-internal use
1008  *
1009  * This iterates over all DRM DP MST topology managers in an atomic update,
1010  * tracking only the old state. This is useful in disable functions, where we
1011  * need the old state the hardware is still in.
1012  */
1013 #define for_each_old_mst_mgr_in_state(__state, mgr, old_state, __i) \
1014 	for ((__i) = 0; (__i) < (__state)->num_private_objs; (__i)++) \
1015 		for_each_if(__drm_dp_mst_state_iter_get((__state), &(mgr), &(old_state), NULL, (__i)))
1016 
1017 /**
1018  * for_each_new_mst_mgr_in_state - iterate over all DP MST topology managers
1019  * in an atomic update
1020  * @__state: &struct drm_atomic_state pointer
1021  * @mgr: &struct drm_dp_mst_topology_mgr iteration cursor
1022  * @new_state: &struct drm_dp_mst_topology_state iteration cursor for the new
1023  * state
1024  * @__i: int iteration cursor, for macro-internal use
1025  *
1026  * This iterates over all DRM DP MST topology managers in an atomic update,
1027  * tracking only the new state. This is useful in enable functions, where we
1028  * need the new state the hardware should be in when the atomic commit
1029  * operation has completed.
1030  */
1031 #define for_each_new_mst_mgr_in_state(__state, mgr, new_state, __i) \
1032 	for ((__i) = 0; (__i) < (__state)->num_private_objs; (__i)++) \
1033 		for_each_if(__drm_dp_mst_state_iter_get((__state), &(mgr), NULL, &(new_state), (__i)))
1034 
1035 #endif
1036