xref: /linux/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c (revision 94cad89ae4505672ae65457d12f77c44ca87655b)
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28 
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37 
38 #include "vid.h"
39 #include "amdgpu.h"
40 #include "amdgpu_display.h"
41 #include "amdgpu_ucode.h"
42 #include "atom.h"
43 #include "amdgpu_dm.h"
44 #ifdef CONFIG_DRM_AMD_DC_HDCP
45 #include "amdgpu_dm_hdcp.h"
46 #include <drm/drm_hdcp.h>
47 #endif
48 #include "amdgpu_pm.h"
49 
50 #include "amd_shared.h"
51 #include "amdgpu_dm_irq.h"
52 #include "dm_helpers.h"
53 #include "amdgpu_dm_mst_types.h"
54 #if defined(CONFIG_DEBUG_FS)
55 #include "amdgpu_dm_debugfs.h"
56 #endif
57 
58 #include "ivsrcid/ivsrcid_vislands30.h"
59 
60 #include <linux/module.h>
61 #include <linux/moduleparam.h>
62 #include <linux/version.h>
63 #include <linux/types.h>
64 #include <linux/pm_runtime.h>
65 #include <linux/pci.h>
66 #include <linux/firmware.h>
67 #include <linux/component.h>
68 
69 #include <drm/drm_atomic.h>
70 #include <drm/drm_atomic_uapi.h>
71 #include <drm/drm_atomic_helper.h>
72 #include <drm/drm_dp_mst_helper.h>
73 #include <drm/drm_fb_helper.h>
74 #include <drm/drm_fourcc.h>
75 #include <drm/drm_edid.h>
76 #include <drm/drm_vblank.h>
77 #include <drm/drm_audio_component.h>
78 #include <drm/drm_hdcp.h>
79 
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
82 
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
87 
88 #include "soc15_common.h"
89 #endif
90 
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
94 
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
97 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
98 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
99 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
100 #endif
101 
102 #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
103 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
104 
105 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
106 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
107 
108 /* Number of bytes in PSP header for firmware. */
109 #define PSP_HEADER_BYTES 0x100
110 
111 /* Number of bytes in PSP footer for firmware. */
112 #define PSP_FOOTER_BYTES 0x100
113 
114 /**
115  * DOC: overview
116  *
117  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
118  * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
119  * requests into DC requests, and DC responses into DRM responses.
120  *
121  * The root control structure is &struct amdgpu_display_manager.
122  */
123 
124 /* basic init/fini API */
125 static int amdgpu_dm_init(struct amdgpu_device *adev);
126 static void amdgpu_dm_fini(struct amdgpu_device *adev);
127 
128 /*
129  * initializes drm_device display related structures, based on the information
130  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
131  * drm_encoder, drm_mode_config
132  *
133  * Returns 0 on success
134  */
135 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
136 /* removes and deallocates the drm structures, created by the above function */
137 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
138 
139 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
140 				struct drm_plane *plane,
141 				unsigned long possible_crtcs,
142 				const struct dc_plane_cap *plane_cap);
143 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
144 			       struct drm_plane *plane,
145 			       uint32_t link_index);
146 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
147 				    struct amdgpu_dm_connector *amdgpu_dm_connector,
148 				    uint32_t link_index,
149 				    struct amdgpu_encoder *amdgpu_encoder);
150 static int amdgpu_dm_encoder_init(struct drm_device *dev,
151 				  struct amdgpu_encoder *aencoder,
152 				  uint32_t link_index);
153 
154 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
155 
156 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
157 				   struct drm_atomic_state *state,
158 				   bool nonblock);
159 
160 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
161 
162 static int amdgpu_dm_atomic_check(struct drm_device *dev,
163 				  struct drm_atomic_state *state);
164 
165 static void handle_cursor_update(struct drm_plane *plane,
166 				 struct drm_plane_state *old_plane_state);
167 
168 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
169 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
170 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
171 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
172 
173 
174 /*
175  * dm_vblank_get_counter
176  *
177  * @brief
178  * Get counter for number of vertical blanks
179  *
180  * @param
181  * struct amdgpu_device *adev - [in] desired amdgpu device
182  * int disp_idx - [in] which CRTC to get the counter from
183  *
184  * @return
185  * Counter for vertical blanks
186  */
187 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
188 {
189 	if (crtc >= adev->mode_info.num_crtc)
190 		return 0;
191 	else {
192 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
193 		struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
194 				acrtc->base.state);
195 
196 
197 		if (acrtc_state->stream == NULL) {
198 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
199 				  crtc);
200 			return 0;
201 		}
202 
203 		return dc_stream_get_vblank_counter(acrtc_state->stream);
204 	}
205 }
206 
207 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
208 				  u32 *vbl, u32 *position)
209 {
210 	uint32_t v_blank_start, v_blank_end, h_position, v_position;
211 
212 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
213 		return -EINVAL;
214 	else {
215 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
216 		struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
217 						acrtc->base.state);
218 
219 		if (acrtc_state->stream ==  NULL) {
220 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
221 				  crtc);
222 			return 0;
223 		}
224 
225 		/*
226 		 * TODO rework base driver to use values directly.
227 		 * for now parse it back into reg-format
228 		 */
229 		dc_stream_get_scanoutpos(acrtc_state->stream,
230 					 &v_blank_start,
231 					 &v_blank_end,
232 					 &h_position,
233 					 &v_position);
234 
235 		*position = v_position | (h_position << 16);
236 		*vbl = v_blank_start | (v_blank_end << 16);
237 	}
238 
239 	return 0;
240 }
241 
242 static bool dm_is_idle(void *handle)
243 {
244 	/* XXX todo */
245 	return true;
246 }
247 
248 static int dm_wait_for_idle(void *handle)
249 {
250 	/* XXX todo */
251 	return 0;
252 }
253 
254 static bool dm_check_soft_reset(void *handle)
255 {
256 	return false;
257 }
258 
259 static int dm_soft_reset(void *handle)
260 {
261 	/* XXX todo */
262 	return 0;
263 }
264 
265 static struct amdgpu_crtc *
266 get_crtc_by_otg_inst(struct amdgpu_device *adev,
267 		     int otg_inst)
268 {
269 	struct drm_device *dev = adev->ddev;
270 	struct drm_crtc *crtc;
271 	struct amdgpu_crtc *amdgpu_crtc;
272 
273 	if (otg_inst == -1) {
274 		WARN_ON(1);
275 		return adev->mode_info.crtcs[0];
276 	}
277 
278 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
279 		amdgpu_crtc = to_amdgpu_crtc(crtc);
280 
281 		if (amdgpu_crtc->otg_inst == otg_inst)
282 			return amdgpu_crtc;
283 	}
284 
285 	return NULL;
286 }
287 
288 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
289 {
290 	return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
291 	       dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
292 }
293 
294 /**
295  * dm_pflip_high_irq() - Handle pageflip interrupt
296  * @interrupt_params: ignored
297  *
298  * Handles the pageflip interrupt by notifying all interested parties
299  * that the pageflip has been completed.
300  */
301 static void dm_pflip_high_irq(void *interrupt_params)
302 {
303 	struct amdgpu_crtc *amdgpu_crtc;
304 	struct common_irq_params *irq_params = interrupt_params;
305 	struct amdgpu_device *adev = irq_params->adev;
306 	unsigned long flags;
307 	struct drm_pending_vblank_event *e;
308 	struct dm_crtc_state *acrtc_state;
309 	uint32_t vpos, hpos, v_blank_start, v_blank_end;
310 	bool vrr_active;
311 
312 	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
313 
314 	/* IRQ could occur when in initial stage */
315 	/* TODO work and BO cleanup */
316 	if (amdgpu_crtc == NULL) {
317 		DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
318 		return;
319 	}
320 
321 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
322 
323 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
324 		DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
325 						 amdgpu_crtc->pflip_status,
326 						 AMDGPU_FLIP_SUBMITTED,
327 						 amdgpu_crtc->crtc_id,
328 						 amdgpu_crtc);
329 		spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
330 		return;
331 	}
332 
333 	/* page flip completed. */
334 	e = amdgpu_crtc->event;
335 	amdgpu_crtc->event = NULL;
336 
337 	if (!e)
338 		WARN_ON(1);
339 
340 	acrtc_state = to_dm_crtc_state(amdgpu_crtc->base.state);
341 	vrr_active = amdgpu_dm_vrr_active(acrtc_state);
342 
343 	/* Fixed refresh rate, or VRR scanout position outside front-porch? */
344 	if (!vrr_active ||
345 	    !dc_stream_get_scanoutpos(acrtc_state->stream, &v_blank_start,
346 				      &v_blank_end, &hpos, &vpos) ||
347 	    (vpos < v_blank_start)) {
348 		/* Update to correct count and vblank timestamp if racing with
349 		 * vblank irq. This also updates to the correct vblank timestamp
350 		 * even in VRR mode, as scanout is past the front-porch atm.
351 		 */
352 		drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
353 
354 		/* Wake up userspace by sending the pageflip event with proper
355 		 * count and timestamp of vblank of flip completion.
356 		 */
357 		if (e) {
358 			drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
359 
360 			/* Event sent, so done with vblank for this flip */
361 			drm_crtc_vblank_put(&amdgpu_crtc->base);
362 		}
363 	} else if (e) {
364 		/* VRR active and inside front-porch: vblank count and
365 		 * timestamp for pageflip event will only be up to date after
366 		 * drm_crtc_handle_vblank() has been executed from late vblank
367 		 * irq handler after start of back-porch (vline 0). We queue the
368 		 * pageflip event for send-out by drm_crtc_handle_vblank() with
369 		 * updated timestamp and count, once it runs after us.
370 		 *
371 		 * We need to open-code this instead of using the helper
372 		 * drm_crtc_arm_vblank_event(), as that helper would
373 		 * call drm_crtc_accurate_vblank_count(), which we must
374 		 * not call in VRR mode while we are in front-porch!
375 		 */
376 
377 		/* sequence will be replaced by real count during send-out. */
378 		e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
379 		e->pipe = amdgpu_crtc->crtc_id;
380 
381 		list_add_tail(&e->base.link, &adev->ddev->vblank_event_list);
382 		e = NULL;
383 	}
384 
385 	/* Keep track of vblank of this flip for flip throttling. We use the
386 	 * cooked hw counter, as that one incremented at start of this vblank
387 	 * of pageflip completion, so last_flip_vblank is the forbidden count
388 	 * for queueing new pageflips if vsync + VRR is enabled.
389 	 */
390 	amdgpu_crtc->last_flip_vblank =
391 		amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
392 
393 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
394 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
395 
396 	DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
397 			 amdgpu_crtc->crtc_id, amdgpu_crtc,
398 			 vrr_active, (int) !e);
399 }
400 
401 static void dm_vupdate_high_irq(void *interrupt_params)
402 {
403 	struct common_irq_params *irq_params = interrupt_params;
404 	struct amdgpu_device *adev = irq_params->adev;
405 	struct amdgpu_crtc *acrtc;
406 	struct dm_crtc_state *acrtc_state;
407 	unsigned long flags;
408 
409 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
410 
411 	if (acrtc) {
412 		acrtc_state = to_dm_crtc_state(acrtc->base.state);
413 
414 		DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
415 			      acrtc->crtc_id,
416 			      amdgpu_dm_vrr_active(acrtc_state));
417 
418 		/* Core vblank handling is done here after end of front-porch in
419 		 * vrr mode, as vblank timestamping will give valid results
420 		 * while now done after front-porch. This will also deliver
421 		 * page-flip completion events that have been queued to us
422 		 * if a pageflip happened inside front-porch.
423 		 */
424 		if (amdgpu_dm_vrr_active(acrtc_state)) {
425 			drm_crtc_handle_vblank(&acrtc->base);
426 
427 			/* BTR processing for pre-DCE12 ASICs */
428 			if (acrtc_state->stream &&
429 			    adev->family < AMDGPU_FAMILY_AI) {
430 				spin_lock_irqsave(&adev->ddev->event_lock, flags);
431 				mod_freesync_handle_v_update(
432 				    adev->dm.freesync_module,
433 				    acrtc_state->stream,
434 				    &acrtc_state->vrr_params);
435 
436 				dc_stream_adjust_vmin_vmax(
437 				    adev->dm.dc,
438 				    acrtc_state->stream,
439 				    &acrtc_state->vrr_params.adjust);
440 				spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
441 			}
442 		}
443 	}
444 }
445 
446 /**
447  * dm_crtc_high_irq() - Handles CRTC interrupt
448  * @interrupt_params: used for determining the CRTC instance
449  *
450  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
451  * event handler.
452  */
453 static void dm_crtc_high_irq(void *interrupt_params)
454 {
455 	struct common_irq_params *irq_params = interrupt_params;
456 	struct amdgpu_device *adev = irq_params->adev;
457 	struct amdgpu_crtc *acrtc;
458 	struct dm_crtc_state *acrtc_state;
459 	unsigned long flags;
460 
461 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
462 	if (!acrtc)
463 		return;
464 
465 	acrtc_state = to_dm_crtc_state(acrtc->base.state);
466 
467 	DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
468 			 amdgpu_dm_vrr_active(acrtc_state),
469 			 acrtc_state->active_planes);
470 
471 	/**
472 	 * Core vblank handling at start of front-porch is only possible
473 	 * in non-vrr mode, as only there vblank timestamping will give
474 	 * valid results while done in front-porch. Otherwise defer it
475 	 * to dm_vupdate_high_irq after end of front-porch.
476 	 */
477 	if (!amdgpu_dm_vrr_active(acrtc_state))
478 		drm_crtc_handle_vblank(&acrtc->base);
479 
480 	/**
481 	 * Following stuff must happen at start of vblank, for crc
482 	 * computation and below-the-range btr support in vrr mode.
483 	 */
484 	amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
485 
486 	/* BTR updates need to happen before VUPDATE on Vega and above. */
487 	if (adev->family < AMDGPU_FAMILY_AI)
488 		return;
489 
490 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
491 
492 	if (acrtc_state->stream && acrtc_state->vrr_params.supported &&
493 	    acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
494 		mod_freesync_handle_v_update(adev->dm.freesync_module,
495 					     acrtc_state->stream,
496 					     &acrtc_state->vrr_params);
497 
498 		dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc_state->stream,
499 					   &acrtc_state->vrr_params.adjust);
500 	}
501 
502 	/*
503 	 * If there aren't any active_planes then DCH HUBP may be clock-gated.
504 	 * In that case, pageflip completion interrupts won't fire and pageflip
505 	 * completion events won't get delivered. Prevent this by sending
506 	 * pending pageflip events from here if a flip is still pending.
507 	 *
508 	 * If any planes are enabled, use dm_pflip_high_irq() instead, to
509 	 * avoid race conditions between flip programming and completion,
510 	 * which could cause too early flip completion events.
511 	 */
512 	if (adev->family >= AMDGPU_FAMILY_RV &&
513 	    acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
514 	    acrtc_state->active_planes == 0) {
515 		if (acrtc->event) {
516 			drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
517 			acrtc->event = NULL;
518 			drm_crtc_vblank_put(&acrtc->base);
519 		}
520 		acrtc->pflip_status = AMDGPU_FLIP_NONE;
521 	}
522 
523 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
524 }
525 
526 static int dm_set_clockgating_state(void *handle,
527 		  enum amd_clockgating_state state)
528 {
529 	return 0;
530 }
531 
532 static int dm_set_powergating_state(void *handle,
533 		  enum amd_powergating_state state)
534 {
535 	return 0;
536 }
537 
538 /* Prototypes of private functions */
539 static int dm_early_init(void* handle);
540 
541 /* Allocate memory for FBC compressed data  */
542 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
543 {
544 	struct drm_device *dev = connector->dev;
545 	struct amdgpu_device *adev = dev->dev_private;
546 	struct dm_comressor_info *compressor = &adev->dm.compressor;
547 	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
548 	struct drm_display_mode *mode;
549 	unsigned long max_size = 0;
550 
551 	if (adev->dm.dc->fbc_compressor == NULL)
552 		return;
553 
554 	if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
555 		return;
556 
557 	if (compressor->bo_ptr)
558 		return;
559 
560 
561 	list_for_each_entry(mode, &connector->modes, head) {
562 		if (max_size < mode->htotal * mode->vtotal)
563 			max_size = mode->htotal * mode->vtotal;
564 	}
565 
566 	if (max_size) {
567 		int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
568 			    AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
569 			    &compressor->gpu_addr, &compressor->cpu_addr);
570 
571 		if (r)
572 			DRM_ERROR("DM: Failed to initialize FBC\n");
573 		else {
574 			adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
575 			DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
576 		}
577 
578 	}
579 
580 }
581 
582 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
583 					  int pipe, bool *enabled,
584 					  unsigned char *buf, int max_bytes)
585 {
586 	struct drm_device *dev = dev_get_drvdata(kdev);
587 	struct amdgpu_device *adev = dev->dev_private;
588 	struct drm_connector *connector;
589 	struct drm_connector_list_iter conn_iter;
590 	struct amdgpu_dm_connector *aconnector;
591 	int ret = 0;
592 
593 	*enabled = false;
594 
595 	mutex_lock(&adev->dm.audio_lock);
596 
597 	drm_connector_list_iter_begin(dev, &conn_iter);
598 	drm_for_each_connector_iter(connector, &conn_iter) {
599 		aconnector = to_amdgpu_dm_connector(connector);
600 		if (aconnector->audio_inst != port)
601 			continue;
602 
603 		*enabled = true;
604 		ret = drm_eld_size(connector->eld);
605 		memcpy(buf, connector->eld, min(max_bytes, ret));
606 
607 		break;
608 	}
609 	drm_connector_list_iter_end(&conn_iter);
610 
611 	mutex_unlock(&adev->dm.audio_lock);
612 
613 	DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
614 
615 	return ret;
616 }
617 
618 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
619 	.get_eld = amdgpu_dm_audio_component_get_eld,
620 };
621 
622 static int amdgpu_dm_audio_component_bind(struct device *kdev,
623 				       struct device *hda_kdev, void *data)
624 {
625 	struct drm_device *dev = dev_get_drvdata(kdev);
626 	struct amdgpu_device *adev = dev->dev_private;
627 	struct drm_audio_component *acomp = data;
628 
629 	acomp->ops = &amdgpu_dm_audio_component_ops;
630 	acomp->dev = kdev;
631 	adev->dm.audio_component = acomp;
632 
633 	return 0;
634 }
635 
636 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
637 					  struct device *hda_kdev, void *data)
638 {
639 	struct drm_device *dev = dev_get_drvdata(kdev);
640 	struct amdgpu_device *adev = dev->dev_private;
641 	struct drm_audio_component *acomp = data;
642 
643 	acomp->ops = NULL;
644 	acomp->dev = NULL;
645 	adev->dm.audio_component = NULL;
646 }
647 
648 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
649 	.bind	= amdgpu_dm_audio_component_bind,
650 	.unbind	= amdgpu_dm_audio_component_unbind,
651 };
652 
653 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
654 {
655 	int i, ret;
656 
657 	if (!amdgpu_audio)
658 		return 0;
659 
660 	adev->mode_info.audio.enabled = true;
661 
662 	adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
663 
664 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
665 		adev->mode_info.audio.pin[i].channels = -1;
666 		adev->mode_info.audio.pin[i].rate = -1;
667 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
668 		adev->mode_info.audio.pin[i].status_bits = 0;
669 		adev->mode_info.audio.pin[i].category_code = 0;
670 		adev->mode_info.audio.pin[i].connected = false;
671 		adev->mode_info.audio.pin[i].id =
672 			adev->dm.dc->res_pool->audios[i]->inst;
673 		adev->mode_info.audio.pin[i].offset = 0;
674 	}
675 
676 	ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
677 	if (ret < 0)
678 		return ret;
679 
680 	adev->dm.audio_registered = true;
681 
682 	return 0;
683 }
684 
685 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
686 {
687 	if (!amdgpu_audio)
688 		return;
689 
690 	if (!adev->mode_info.audio.enabled)
691 		return;
692 
693 	if (adev->dm.audio_registered) {
694 		component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
695 		adev->dm.audio_registered = false;
696 	}
697 
698 	/* TODO: Disable audio? */
699 
700 	adev->mode_info.audio.enabled = false;
701 }
702 
703 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
704 {
705 	struct drm_audio_component *acomp = adev->dm.audio_component;
706 
707 	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
708 		DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
709 
710 		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
711 						 pin, -1);
712 	}
713 }
714 
715 static int dm_dmub_hw_init(struct amdgpu_device *adev)
716 {
717 	const struct dmcub_firmware_header_v1_0 *hdr;
718 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
719 	struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
720 	const struct firmware *dmub_fw = adev->dm.dmub_fw;
721 	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
722 	struct abm *abm = adev->dm.dc->res_pool->abm;
723 	struct dmub_srv_hw_params hw_params;
724 	enum dmub_status status;
725 	const unsigned char *fw_inst_const, *fw_bss_data;
726 	uint32_t i, fw_inst_const_size, fw_bss_data_size;
727 	bool has_hw_support;
728 
729 	if (!dmub_srv)
730 		/* DMUB isn't supported on the ASIC. */
731 		return 0;
732 
733 	if (!fb_info) {
734 		DRM_ERROR("No framebuffer info for DMUB service.\n");
735 		return -EINVAL;
736 	}
737 
738 	if (!dmub_fw) {
739 		/* Firmware required for DMUB support. */
740 		DRM_ERROR("No firmware provided for DMUB.\n");
741 		return -EINVAL;
742 	}
743 
744 	status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
745 	if (status != DMUB_STATUS_OK) {
746 		DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
747 		return -EINVAL;
748 	}
749 
750 	if (!has_hw_support) {
751 		DRM_INFO("DMUB unsupported on ASIC\n");
752 		return 0;
753 	}
754 
755 	hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
756 
757 	fw_inst_const = dmub_fw->data +
758 			le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
759 			PSP_HEADER_BYTES;
760 
761 	fw_bss_data = dmub_fw->data +
762 		      le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
763 		      le32_to_cpu(hdr->inst_const_bytes);
764 
765 	/* Copy firmware and bios info into FB memory. */
766 	fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
767 			     PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
768 
769 	fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
770 
771 	/* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
772 	 * amdgpu_ucode_init_single_fw will load dmub firmware
773 	 * fw_inst_const part to cw0; otherwise, the firmware back door load
774 	 * will be done by dm_dmub_hw_init
775 	 */
776 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
777 		memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
778 				fw_inst_const_size);
779 	}
780 
781 	if (fw_bss_data_size)
782 		memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
783 		       fw_bss_data, fw_bss_data_size);
784 
785 	/* Copy firmware bios info into FB memory. */
786 	memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
787 	       adev->bios_size);
788 
789 	/* Reset regions that need to be reset. */
790 	memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
791 	fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
792 
793 	memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
794 	       fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
795 
796 	memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
797 	       fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
798 
799 	/* Initialize hardware. */
800 	memset(&hw_params, 0, sizeof(hw_params));
801 	hw_params.fb_base = adev->gmc.fb_start;
802 	hw_params.fb_offset = adev->gmc.aper_base;
803 
804 	/* backdoor load firmware and trigger dmub running */
805 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
806 		hw_params.load_inst_const = true;
807 
808 	if (dmcu)
809 		hw_params.psp_version = dmcu->psp_version;
810 
811 	for (i = 0; i < fb_info->num_fb; ++i)
812 		hw_params.fb[i] = &fb_info->fb[i];
813 
814 	status = dmub_srv_hw_init(dmub_srv, &hw_params);
815 	if (status != DMUB_STATUS_OK) {
816 		DRM_ERROR("Error initializing DMUB HW: %d\n", status);
817 		return -EINVAL;
818 	}
819 
820 	/* Wait for firmware load to finish. */
821 	status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
822 	if (status != DMUB_STATUS_OK)
823 		DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
824 
825 	/* Init DMCU and ABM if available. */
826 	if (dmcu && abm) {
827 		dmcu->funcs->dmcu_init(dmcu);
828 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
829 	}
830 
831 	adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
832 	if (!adev->dm.dc->ctx->dmub_srv) {
833 		DRM_ERROR("Couldn't allocate DC DMUB server!\n");
834 		return -ENOMEM;
835 	}
836 
837 	DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
838 		 adev->dm.dmcub_fw_version);
839 
840 	return 0;
841 }
842 
843 static int amdgpu_dm_init(struct amdgpu_device *adev)
844 {
845 	struct dc_init_data init_data;
846 #ifdef CONFIG_DRM_AMD_DC_HDCP
847 	struct dc_callback_init init_params;
848 #endif
849 	int r;
850 
851 	adev->dm.ddev = adev->ddev;
852 	adev->dm.adev = adev;
853 
854 	/* Zero all the fields */
855 	memset(&init_data, 0, sizeof(init_data));
856 #ifdef CONFIG_DRM_AMD_DC_HDCP
857 	memset(&init_params, 0, sizeof(init_params));
858 #endif
859 
860 	mutex_init(&adev->dm.dc_lock);
861 	mutex_init(&adev->dm.audio_lock);
862 
863 	if(amdgpu_dm_irq_init(adev)) {
864 		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
865 		goto error;
866 	}
867 
868 	init_data.asic_id.chip_family = adev->family;
869 
870 	init_data.asic_id.pci_revision_id = adev->pdev->revision;
871 	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
872 
873 	init_data.asic_id.vram_width = adev->gmc.vram_width;
874 	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
875 	init_data.asic_id.atombios_base_address =
876 		adev->mode_info.atom_context->bios;
877 
878 	init_data.driver = adev;
879 
880 	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
881 
882 	if (!adev->dm.cgs_device) {
883 		DRM_ERROR("amdgpu: failed to create cgs device.\n");
884 		goto error;
885 	}
886 
887 	init_data.cgs_device = adev->dm.cgs_device;
888 
889 	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
890 
891 	switch (adev->asic_type) {
892 	case CHIP_CARRIZO:
893 	case CHIP_STONEY:
894 	case CHIP_RAVEN:
895 	case CHIP_RENOIR:
896 		init_data.flags.gpu_vm_support = true;
897 		break;
898 	default:
899 		break;
900 	}
901 
902 	if (amdgpu_dc_feature_mask & DC_FBC_MASK)
903 		init_data.flags.fbc_support = true;
904 
905 	if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
906 		init_data.flags.multi_mon_pp_mclk_switch = true;
907 
908 	if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
909 		init_data.flags.disable_fractional_pwm = true;
910 
911 	init_data.flags.power_down_display_on_boot = true;
912 
913 	init_data.soc_bounding_box = adev->dm.soc_bounding_box;
914 
915 	/* Display Core create. */
916 	adev->dm.dc = dc_create(&init_data);
917 
918 	if (adev->dm.dc) {
919 		DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
920 	} else {
921 		DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
922 		goto error;
923 	}
924 
925 	if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
926 		adev->dm.dc->debug.force_single_disp_pipe_split = false;
927 		adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
928 	}
929 
930 	if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
931 		adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
932 
933 	if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
934 		adev->dm.dc->debug.disable_stutter = true;
935 
936 	if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
937 		adev->dm.dc->debug.disable_dsc = true;
938 
939 	if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
940 		adev->dm.dc->debug.disable_clock_gate = true;
941 
942 	r = dm_dmub_hw_init(adev);
943 	if (r) {
944 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
945 		goto error;
946 	}
947 
948 	dc_hardware_init(adev->dm.dc);
949 
950 	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
951 	if (!adev->dm.freesync_module) {
952 		DRM_ERROR(
953 		"amdgpu: failed to initialize freesync_module.\n");
954 	} else
955 		DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
956 				adev->dm.freesync_module);
957 
958 	amdgpu_dm_init_color_mod();
959 
960 #ifdef CONFIG_DRM_AMD_DC_HDCP
961 	if (adev->asic_type >= CHIP_RAVEN) {
962 		adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
963 
964 		if (!adev->dm.hdcp_workqueue)
965 			DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
966 		else
967 			DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
968 
969 		dc_init_callbacks(adev->dm.dc, &init_params);
970 	}
971 #endif
972 	if (amdgpu_dm_initialize_drm_device(adev)) {
973 		DRM_ERROR(
974 		"amdgpu: failed to initialize sw for display support.\n");
975 		goto error;
976 	}
977 
978 	/* Update the actual used number of crtc */
979 	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
980 
981 	/* create fake encoders for MST */
982 	dm_dp_create_fake_mst_encoders(adev);
983 
984 	/* TODO: Add_display_info? */
985 
986 	/* TODO use dynamic cursor width */
987 	adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
988 	adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
989 
990 	if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
991 		DRM_ERROR(
992 		"amdgpu: failed to initialize sw for display support.\n");
993 		goto error;
994 	}
995 
996 	DRM_DEBUG_DRIVER("KMS initialized.\n");
997 
998 	return 0;
999 error:
1000 	amdgpu_dm_fini(adev);
1001 
1002 	return -EINVAL;
1003 }
1004 
1005 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1006 {
1007 	int i;
1008 
1009 	for (i = 0; i < adev->dm.display_indexes_num; i++) {
1010 		drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1011 	}
1012 
1013 	amdgpu_dm_audio_fini(adev);
1014 
1015 	amdgpu_dm_destroy_drm_device(&adev->dm);
1016 
1017 #ifdef CONFIG_DRM_AMD_DC_HDCP
1018 	if (adev->dm.hdcp_workqueue) {
1019 		hdcp_destroy(adev->dm.hdcp_workqueue);
1020 		adev->dm.hdcp_workqueue = NULL;
1021 	}
1022 
1023 	if (adev->dm.dc)
1024 		dc_deinit_callbacks(adev->dm.dc);
1025 #endif
1026 	if (adev->dm.dc->ctx->dmub_srv) {
1027 		dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1028 		adev->dm.dc->ctx->dmub_srv = NULL;
1029 	}
1030 
1031 	if (adev->dm.dmub_bo)
1032 		amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1033 				      &adev->dm.dmub_bo_gpu_addr,
1034 				      &adev->dm.dmub_bo_cpu_addr);
1035 
1036 	/* DC Destroy TODO: Replace destroy DAL */
1037 	if (adev->dm.dc)
1038 		dc_destroy(&adev->dm.dc);
1039 	/*
1040 	 * TODO: pageflip, vlank interrupt
1041 	 *
1042 	 * amdgpu_dm_irq_fini(adev);
1043 	 */
1044 
1045 	if (adev->dm.cgs_device) {
1046 		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1047 		adev->dm.cgs_device = NULL;
1048 	}
1049 	if (adev->dm.freesync_module) {
1050 		mod_freesync_destroy(adev->dm.freesync_module);
1051 		adev->dm.freesync_module = NULL;
1052 	}
1053 
1054 	mutex_destroy(&adev->dm.audio_lock);
1055 	mutex_destroy(&adev->dm.dc_lock);
1056 
1057 	return;
1058 }
1059 
1060 static int load_dmcu_fw(struct amdgpu_device *adev)
1061 {
1062 	const char *fw_name_dmcu = NULL;
1063 	int r;
1064 	const struct dmcu_firmware_header_v1_0 *hdr;
1065 
1066 	switch(adev->asic_type) {
1067 	case CHIP_BONAIRE:
1068 	case CHIP_HAWAII:
1069 	case CHIP_KAVERI:
1070 	case CHIP_KABINI:
1071 	case CHIP_MULLINS:
1072 	case CHIP_TONGA:
1073 	case CHIP_FIJI:
1074 	case CHIP_CARRIZO:
1075 	case CHIP_STONEY:
1076 	case CHIP_POLARIS11:
1077 	case CHIP_POLARIS10:
1078 	case CHIP_POLARIS12:
1079 	case CHIP_VEGAM:
1080 	case CHIP_VEGA10:
1081 	case CHIP_VEGA12:
1082 	case CHIP_VEGA20:
1083 	case CHIP_NAVI10:
1084 	case CHIP_NAVI14:
1085 	case CHIP_RENOIR:
1086 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
1087 	case CHIP_SIENNA_CICHLID:
1088 	case CHIP_NAVY_FLOUNDER:
1089 #endif
1090 		return 0;
1091 	case CHIP_NAVI12:
1092 		fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1093 		break;
1094 	case CHIP_RAVEN:
1095 		if (ASICREV_IS_PICASSO(adev->external_rev_id))
1096 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1097 		else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1098 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1099 		else
1100 			return 0;
1101 		break;
1102 	default:
1103 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1104 		return -EINVAL;
1105 	}
1106 
1107 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1108 		DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1109 		return 0;
1110 	}
1111 
1112 	r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1113 	if (r == -ENOENT) {
1114 		/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1115 		DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1116 		adev->dm.fw_dmcu = NULL;
1117 		return 0;
1118 	}
1119 	if (r) {
1120 		dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1121 			fw_name_dmcu);
1122 		return r;
1123 	}
1124 
1125 	r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1126 	if (r) {
1127 		dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1128 			fw_name_dmcu);
1129 		release_firmware(adev->dm.fw_dmcu);
1130 		adev->dm.fw_dmcu = NULL;
1131 		return r;
1132 	}
1133 
1134 	hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1135 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1136 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1137 	adev->firmware.fw_size +=
1138 		ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1139 
1140 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1141 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1142 	adev->firmware.fw_size +=
1143 		ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1144 
1145 	adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1146 
1147 	DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1148 
1149 	return 0;
1150 }
1151 
1152 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1153 {
1154 	struct amdgpu_device *adev = ctx;
1155 
1156 	return dm_read_reg(adev->dm.dc->ctx, address);
1157 }
1158 
1159 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1160 				     uint32_t value)
1161 {
1162 	struct amdgpu_device *adev = ctx;
1163 
1164 	return dm_write_reg(adev->dm.dc->ctx, address, value);
1165 }
1166 
1167 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1168 {
1169 	struct dmub_srv_create_params create_params;
1170 	struct dmub_srv_region_params region_params;
1171 	struct dmub_srv_region_info region_info;
1172 	struct dmub_srv_fb_params fb_params;
1173 	struct dmub_srv_fb_info *fb_info;
1174 	struct dmub_srv *dmub_srv;
1175 	const struct dmcub_firmware_header_v1_0 *hdr;
1176 	const char *fw_name_dmub;
1177 	enum dmub_asic dmub_asic;
1178 	enum dmub_status status;
1179 	int r;
1180 
1181 	switch (adev->asic_type) {
1182 	case CHIP_RENOIR:
1183 		dmub_asic = DMUB_ASIC_DCN21;
1184 		fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1185 		break;
1186 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
1187 	case CHIP_SIENNA_CICHLID:
1188 	case CHIP_NAVY_FLOUNDER:
1189 		dmub_asic = DMUB_ASIC_DCN30;
1190 		fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1191 		break;
1192 #endif
1193 
1194 	default:
1195 		/* ASIC doesn't support DMUB. */
1196 		return 0;
1197 	}
1198 
1199 	r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1200 	if (r) {
1201 		DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1202 		return 0;
1203 	}
1204 
1205 	r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1206 	if (r) {
1207 		DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1208 		return 0;
1209 	}
1210 
1211 	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1212 
1213 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1214 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1215 			AMDGPU_UCODE_ID_DMCUB;
1216 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1217 			adev->dm.dmub_fw;
1218 		adev->firmware.fw_size +=
1219 			ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1220 
1221 		DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1222 			 adev->dm.dmcub_fw_version);
1223 	}
1224 
1225 	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1226 
1227 	adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1228 	dmub_srv = adev->dm.dmub_srv;
1229 
1230 	if (!dmub_srv) {
1231 		DRM_ERROR("Failed to allocate DMUB service!\n");
1232 		return -ENOMEM;
1233 	}
1234 
1235 	memset(&create_params, 0, sizeof(create_params));
1236 	create_params.user_ctx = adev;
1237 	create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1238 	create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1239 	create_params.asic = dmub_asic;
1240 
1241 	/* Create the DMUB service. */
1242 	status = dmub_srv_create(dmub_srv, &create_params);
1243 	if (status != DMUB_STATUS_OK) {
1244 		DRM_ERROR("Error creating DMUB service: %d\n", status);
1245 		return -EINVAL;
1246 	}
1247 
1248 	/* Calculate the size of all the regions for the DMUB service. */
1249 	memset(&region_params, 0, sizeof(region_params));
1250 
1251 	region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1252 					PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1253 	region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1254 	region_params.vbios_size = adev->bios_size;
1255 	region_params.fw_bss_data = region_params.bss_data_size ?
1256 		adev->dm.dmub_fw->data +
1257 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1258 		le32_to_cpu(hdr->inst_const_bytes) : NULL;
1259 	region_params.fw_inst_const =
1260 		adev->dm.dmub_fw->data +
1261 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1262 		PSP_HEADER_BYTES;
1263 
1264 	status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1265 					   &region_info);
1266 
1267 	if (status != DMUB_STATUS_OK) {
1268 		DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1269 		return -EINVAL;
1270 	}
1271 
1272 	/*
1273 	 * Allocate a framebuffer based on the total size of all the regions.
1274 	 * TODO: Move this into GART.
1275 	 */
1276 	r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1277 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1278 				    &adev->dm.dmub_bo_gpu_addr,
1279 				    &adev->dm.dmub_bo_cpu_addr);
1280 	if (r)
1281 		return r;
1282 
1283 	/* Rebase the regions on the framebuffer address. */
1284 	memset(&fb_params, 0, sizeof(fb_params));
1285 	fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1286 	fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1287 	fb_params.region_info = &region_info;
1288 
1289 	adev->dm.dmub_fb_info =
1290 		kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1291 	fb_info = adev->dm.dmub_fb_info;
1292 
1293 	if (!fb_info) {
1294 		DRM_ERROR(
1295 			"Failed to allocate framebuffer info for DMUB service!\n");
1296 		return -ENOMEM;
1297 	}
1298 
1299 	status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1300 	if (status != DMUB_STATUS_OK) {
1301 		DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1302 		return -EINVAL;
1303 	}
1304 
1305 	return 0;
1306 }
1307 
1308 static int dm_sw_init(void *handle)
1309 {
1310 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1311 	int r;
1312 
1313 	r = dm_dmub_sw_init(adev);
1314 	if (r)
1315 		return r;
1316 
1317 	return load_dmcu_fw(adev);
1318 }
1319 
1320 static int dm_sw_fini(void *handle)
1321 {
1322 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1323 
1324 	kfree(adev->dm.dmub_fb_info);
1325 	adev->dm.dmub_fb_info = NULL;
1326 
1327 	if (adev->dm.dmub_srv) {
1328 		dmub_srv_destroy(adev->dm.dmub_srv);
1329 		adev->dm.dmub_srv = NULL;
1330 	}
1331 
1332 	release_firmware(adev->dm.dmub_fw);
1333 	adev->dm.dmub_fw = NULL;
1334 
1335 	release_firmware(adev->dm.fw_dmcu);
1336 	adev->dm.fw_dmcu = NULL;
1337 
1338 	return 0;
1339 }
1340 
1341 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1342 {
1343 	struct amdgpu_dm_connector *aconnector;
1344 	struct drm_connector *connector;
1345 	struct drm_connector_list_iter iter;
1346 	int ret = 0;
1347 
1348 	drm_connector_list_iter_begin(dev, &iter);
1349 	drm_for_each_connector_iter(connector, &iter) {
1350 		aconnector = to_amdgpu_dm_connector(connector);
1351 		if (aconnector->dc_link->type == dc_connection_mst_branch &&
1352 		    aconnector->mst_mgr.aux) {
1353 			DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1354 					 aconnector,
1355 					 aconnector->base.base.id);
1356 
1357 			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1358 			if (ret < 0) {
1359 				DRM_ERROR("DM_MST: Failed to start MST\n");
1360 				aconnector->dc_link->type =
1361 					dc_connection_single;
1362 				break;
1363 			}
1364 		}
1365 	}
1366 	drm_connector_list_iter_end(&iter);
1367 
1368 	return ret;
1369 }
1370 
1371 static int dm_late_init(void *handle)
1372 {
1373 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1374 
1375 	struct dmcu_iram_parameters params;
1376 	unsigned int linear_lut[16];
1377 	int i;
1378 	struct dmcu *dmcu = NULL;
1379 	bool ret;
1380 
1381 	if (!adev->dm.fw_dmcu && !adev->dm.dmub_fw)
1382 		return detect_mst_link_for_all_connectors(adev->ddev);
1383 
1384 	dmcu = adev->dm.dc->res_pool->dmcu;
1385 
1386 	for (i = 0; i < 16; i++)
1387 		linear_lut[i] = 0xFFFF * i / 15;
1388 
1389 	params.set = 0;
1390 	params.backlight_ramping_start = 0xCCCC;
1391 	params.backlight_ramping_reduction = 0xCCCCCCCC;
1392 	params.backlight_lut_array_size = 16;
1393 	params.backlight_lut_array = linear_lut;
1394 
1395 	/* Min backlight level after ABM reduction,  Don't allow below 1%
1396 	 * 0xFFFF x 0.01 = 0x28F
1397 	 */
1398 	params.min_abm_backlight = 0x28F;
1399 
1400 	ret = dmcu_load_iram(dmcu, params);
1401 
1402 	if (!ret)
1403 		return -EINVAL;
1404 
1405 	return detect_mst_link_for_all_connectors(adev->ddev);
1406 }
1407 
1408 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1409 {
1410 	struct amdgpu_dm_connector *aconnector;
1411 	struct drm_connector *connector;
1412 	struct drm_connector_list_iter iter;
1413 	struct drm_dp_mst_topology_mgr *mgr;
1414 	int ret;
1415 	bool need_hotplug = false;
1416 
1417 	drm_connector_list_iter_begin(dev, &iter);
1418 	drm_for_each_connector_iter(connector, &iter) {
1419 		aconnector = to_amdgpu_dm_connector(connector);
1420 		if (aconnector->dc_link->type != dc_connection_mst_branch ||
1421 		    aconnector->mst_port)
1422 			continue;
1423 
1424 		mgr = &aconnector->mst_mgr;
1425 
1426 		if (suspend) {
1427 			drm_dp_mst_topology_mgr_suspend(mgr);
1428 		} else {
1429 			ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1430 			if (ret < 0) {
1431 				drm_dp_mst_topology_mgr_set_mst(mgr, false);
1432 				need_hotplug = true;
1433 			}
1434 		}
1435 	}
1436 	drm_connector_list_iter_end(&iter);
1437 
1438 	if (need_hotplug)
1439 		drm_kms_helper_hotplug_event(dev);
1440 }
1441 
1442 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1443 {
1444 	struct smu_context *smu = &adev->smu;
1445 	int ret = 0;
1446 
1447 	if (!is_support_sw_smu(adev))
1448 		return 0;
1449 
1450 	/* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1451 	 * on window driver dc implementation.
1452 	 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1453 	 * should be passed to smu during boot up and resume from s3.
1454 	 * boot up: dc calculate dcn watermark clock settings within dc_create,
1455 	 * dcn20_resource_construct
1456 	 * then call pplib functions below to pass the settings to smu:
1457 	 * smu_set_watermarks_for_clock_ranges
1458 	 * smu_set_watermarks_table
1459 	 * navi10_set_watermarks_table
1460 	 * smu_write_watermarks_table
1461 	 *
1462 	 * For Renoir, clock settings of dcn watermark are also fixed values.
1463 	 * dc has implemented different flow for window driver:
1464 	 * dc_hardware_init / dc_set_power_state
1465 	 * dcn10_init_hw
1466 	 * notify_wm_ranges
1467 	 * set_wm_ranges
1468 	 * -- Linux
1469 	 * smu_set_watermarks_for_clock_ranges
1470 	 * renoir_set_watermarks_table
1471 	 * smu_write_watermarks_table
1472 	 *
1473 	 * For Linux,
1474 	 * dc_hardware_init -> amdgpu_dm_init
1475 	 * dc_set_power_state --> dm_resume
1476 	 *
1477 	 * therefore, this function apply to navi10/12/14 but not Renoir
1478 	 * *
1479 	 */
1480 	switch(adev->asic_type) {
1481 	case CHIP_NAVI10:
1482 	case CHIP_NAVI14:
1483 	case CHIP_NAVI12:
1484 		break;
1485 	default:
1486 		return 0;
1487 	}
1488 
1489 	mutex_lock(&smu->mutex);
1490 
1491 	/* pass data to smu controller */
1492 	if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
1493 			!(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
1494 		ret = smu_write_watermarks_table(smu);
1495 
1496 		if (ret) {
1497 			mutex_unlock(&smu->mutex);
1498 			DRM_ERROR("Failed to update WMTABLE!\n");
1499 			return ret;
1500 		}
1501 		smu->watermarks_bitmap |= WATERMARKS_LOADED;
1502 	}
1503 
1504 	mutex_unlock(&smu->mutex);
1505 
1506 	return 0;
1507 }
1508 
1509 /**
1510  * dm_hw_init() - Initialize DC device
1511  * @handle: The base driver device containing the amdgpu_dm device.
1512  *
1513  * Initialize the &struct amdgpu_display_manager device. This involves calling
1514  * the initializers of each DM component, then populating the struct with them.
1515  *
1516  * Although the function implies hardware initialization, both hardware and
1517  * software are initialized here. Splitting them out to their relevant init
1518  * hooks is a future TODO item.
1519  *
1520  * Some notable things that are initialized here:
1521  *
1522  * - Display Core, both software and hardware
1523  * - DC modules that we need (freesync and color management)
1524  * - DRM software states
1525  * - Interrupt sources and handlers
1526  * - Vblank support
1527  * - Debug FS entries, if enabled
1528  */
1529 static int dm_hw_init(void *handle)
1530 {
1531 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1532 	/* Create DAL display manager */
1533 	amdgpu_dm_init(adev);
1534 	amdgpu_dm_hpd_init(adev);
1535 
1536 	return 0;
1537 }
1538 
1539 /**
1540  * dm_hw_fini() - Teardown DC device
1541  * @handle: The base driver device containing the amdgpu_dm device.
1542  *
1543  * Teardown components within &struct amdgpu_display_manager that require
1544  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1545  * were loaded. Also flush IRQ workqueues and disable them.
1546  */
1547 static int dm_hw_fini(void *handle)
1548 {
1549 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1550 
1551 	amdgpu_dm_hpd_fini(adev);
1552 
1553 	amdgpu_dm_irq_fini(adev);
1554 	amdgpu_dm_fini(adev);
1555 	return 0;
1556 }
1557 
1558 
1559 static int dm_enable_vblank(struct drm_crtc *crtc);
1560 static void dm_disable_vblank(struct drm_crtc *crtc);
1561 
1562 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1563 				 struct dc_state *state, bool enable)
1564 {
1565 	enum dc_irq_source irq_source;
1566 	struct amdgpu_crtc *acrtc;
1567 	int rc = -EBUSY;
1568 	int i = 0;
1569 
1570 	for (i = 0; i < state->stream_count; i++) {
1571 		acrtc = get_crtc_by_otg_inst(
1572 				adev, state->stream_status[i].primary_otg_inst);
1573 
1574 		if (acrtc && state->stream_status[i].plane_count != 0) {
1575 			irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1576 			rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1577 			DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
1578 				  acrtc->crtc_id, enable ? "en" : "dis", rc);
1579 			if (rc)
1580 				DRM_WARN("Failed to %s pflip interrupts\n",
1581 					 enable ? "enable" : "disable");
1582 
1583 			if (enable) {
1584 				rc = dm_enable_vblank(&acrtc->base);
1585 				if (rc)
1586 					DRM_WARN("Failed to enable vblank interrupts\n");
1587 			} else {
1588 				dm_disable_vblank(&acrtc->base);
1589 			}
1590 
1591 		}
1592 	}
1593 
1594 }
1595 
1596 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1597 {
1598 	struct dc_state *context = NULL;
1599 	enum dc_status res = DC_ERROR_UNEXPECTED;
1600 	int i;
1601 	struct dc_stream_state *del_streams[MAX_PIPES];
1602 	int del_streams_count = 0;
1603 
1604 	memset(del_streams, 0, sizeof(del_streams));
1605 
1606 	context = dc_create_state(dc);
1607 	if (context == NULL)
1608 		goto context_alloc_fail;
1609 
1610 	dc_resource_state_copy_construct_current(dc, context);
1611 
1612 	/* First remove from context all streams */
1613 	for (i = 0; i < context->stream_count; i++) {
1614 		struct dc_stream_state *stream = context->streams[i];
1615 
1616 		del_streams[del_streams_count++] = stream;
1617 	}
1618 
1619 	/* Remove all planes for removed streams and then remove the streams */
1620 	for (i = 0; i < del_streams_count; i++) {
1621 		if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1622 			res = DC_FAIL_DETACH_SURFACES;
1623 			goto fail;
1624 		}
1625 
1626 		res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1627 		if (res != DC_OK)
1628 			goto fail;
1629 	}
1630 
1631 
1632 	res = dc_validate_global_state(dc, context, false);
1633 
1634 	if (res != DC_OK) {
1635 		DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1636 		goto fail;
1637 	}
1638 
1639 	res = dc_commit_state(dc, context);
1640 
1641 fail:
1642 	dc_release_state(context);
1643 
1644 context_alloc_fail:
1645 	return res;
1646 }
1647 
1648 static int dm_suspend(void *handle)
1649 {
1650 	struct amdgpu_device *adev = handle;
1651 	struct amdgpu_display_manager *dm = &adev->dm;
1652 	int ret = 0;
1653 
1654 	if (adev->in_gpu_reset) {
1655 		mutex_lock(&dm->dc_lock);
1656 		dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1657 
1658 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1659 
1660 		amdgpu_dm_commit_zero_streams(dm->dc);
1661 
1662 		amdgpu_dm_irq_suspend(adev);
1663 
1664 		return ret;
1665 	}
1666 
1667 	WARN_ON(adev->dm.cached_state);
1668 	adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
1669 
1670 	s3_handle_mst(adev->ddev, true);
1671 
1672 	amdgpu_dm_irq_suspend(adev);
1673 
1674 
1675 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1676 
1677 	return 0;
1678 }
1679 
1680 static struct amdgpu_dm_connector *
1681 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1682 					     struct drm_crtc *crtc)
1683 {
1684 	uint32_t i;
1685 	struct drm_connector_state *new_con_state;
1686 	struct drm_connector *connector;
1687 	struct drm_crtc *crtc_from_state;
1688 
1689 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
1690 		crtc_from_state = new_con_state->crtc;
1691 
1692 		if (crtc_from_state == crtc)
1693 			return to_amdgpu_dm_connector(connector);
1694 	}
1695 
1696 	return NULL;
1697 }
1698 
1699 static void emulated_link_detect(struct dc_link *link)
1700 {
1701 	struct dc_sink_init_data sink_init_data = { 0 };
1702 	struct display_sink_capability sink_caps = { 0 };
1703 	enum dc_edid_status edid_status;
1704 	struct dc_context *dc_ctx = link->ctx;
1705 	struct dc_sink *sink = NULL;
1706 	struct dc_sink *prev_sink = NULL;
1707 
1708 	link->type = dc_connection_none;
1709 	prev_sink = link->local_sink;
1710 
1711 	if (prev_sink != NULL)
1712 		dc_sink_retain(prev_sink);
1713 
1714 	switch (link->connector_signal) {
1715 	case SIGNAL_TYPE_HDMI_TYPE_A: {
1716 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1717 		sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1718 		break;
1719 	}
1720 
1721 	case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1722 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1723 		sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1724 		break;
1725 	}
1726 
1727 	case SIGNAL_TYPE_DVI_DUAL_LINK: {
1728 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1729 		sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1730 		break;
1731 	}
1732 
1733 	case SIGNAL_TYPE_LVDS: {
1734 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1735 		sink_caps.signal = SIGNAL_TYPE_LVDS;
1736 		break;
1737 	}
1738 
1739 	case SIGNAL_TYPE_EDP: {
1740 		sink_caps.transaction_type =
1741 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1742 		sink_caps.signal = SIGNAL_TYPE_EDP;
1743 		break;
1744 	}
1745 
1746 	case SIGNAL_TYPE_DISPLAY_PORT: {
1747 		sink_caps.transaction_type =
1748 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1749 		sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1750 		break;
1751 	}
1752 
1753 	default:
1754 		DC_ERROR("Invalid connector type! signal:%d\n",
1755 			link->connector_signal);
1756 		return;
1757 	}
1758 
1759 	sink_init_data.link = link;
1760 	sink_init_data.sink_signal = sink_caps.signal;
1761 
1762 	sink = dc_sink_create(&sink_init_data);
1763 	if (!sink) {
1764 		DC_ERROR("Failed to create sink!\n");
1765 		return;
1766 	}
1767 
1768 	/* dc_sink_create returns a new reference */
1769 	link->local_sink = sink;
1770 
1771 	edid_status = dm_helpers_read_local_edid(
1772 			link->ctx,
1773 			link,
1774 			sink);
1775 
1776 	if (edid_status != EDID_OK)
1777 		DC_ERROR("Failed to read EDID");
1778 
1779 }
1780 
1781 static void dm_gpureset_commit_state(struct dc_state *dc_state,
1782 				     struct amdgpu_display_manager *dm)
1783 {
1784 	struct {
1785 		struct dc_surface_update surface_updates[MAX_SURFACES];
1786 		struct dc_plane_info plane_infos[MAX_SURFACES];
1787 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
1788 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
1789 		struct dc_stream_update stream_update;
1790 	} * bundle;
1791 	int k, m;
1792 
1793 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
1794 
1795 	if (!bundle) {
1796 		dm_error("Failed to allocate update bundle\n");
1797 		goto cleanup;
1798 	}
1799 
1800 	for (k = 0; k < dc_state->stream_count; k++) {
1801 		bundle->stream_update.stream = dc_state->streams[k];
1802 
1803 		for (m = 0; m < dc_state->stream_status->plane_count; m++) {
1804 			bundle->surface_updates[m].surface =
1805 				dc_state->stream_status->plane_states[m];
1806 			bundle->surface_updates[m].surface->force_full_update =
1807 				true;
1808 		}
1809 		dc_commit_updates_for_stream(
1810 			dm->dc, bundle->surface_updates,
1811 			dc_state->stream_status->plane_count,
1812 			dc_state->streams[k], &bundle->stream_update, dc_state);
1813 	}
1814 
1815 cleanup:
1816 	kfree(bundle);
1817 
1818 	return;
1819 }
1820 
1821 static int dm_resume(void *handle)
1822 {
1823 	struct amdgpu_device *adev = handle;
1824 	struct drm_device *ddev = adev->ddev;
1825 	struct amdgpu_display_manager *dm = &adev->dm;
1826 	struct amdgpu_dm_connector *aconnector;
1827 	struct drm_connector *connector;
1828 	struct drm_connector_list_iter iter;
1829 	struct drm_crtc *crtc;
1830 	struct drm_crtc_state *new_crtc_state;
1831 	struct dm_crtc_state *dm_new_crtc_state;
1832 	struct drm_plane *plane;
1833 	struct drm_plane_state *new_plane_state;
1834 	struct dm_plane_state *dm_new_plane_state;
1835 	struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
1836 	enum dc_connection_type new_connection_type = dc_connection_none;
1837 	struct dc_state *dc_state;
1838 	int i, r, j;
1839 
1840 	if (adev->in_gpu_reset) {
1841 		dc_state = dm->cached_dc_state;
1842 
1843 		r = dm_dmub_hw_init(adev);
1844 		if (r)
1845 			DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1846 
1847 		dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1848 		dc_resume(dm->dc);
1849 
1850 		amdgpu_dm_irq_resume_early(adev);
1851 
1852 		for (i = 0; i < dc_state->stream_count; i++) {
1853 			dc_state->streams[i]->mode_changed = true;
1854 			for (j = 0; j < dc_state->stream_status->plane_count; j++) {
1855 				dc_state->stream_status->plane_states[j]->update_flags.raw
1856 					= 0xffffffff;
1857 			}
1858 		}
1859 
1860 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
1861 
1862 		dm_gpureset_commit_state(dm->cached_dc_state, dm);
1863 
1864 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
1865 
1866 		dc_release_state(dm->cached_dc_state);
1867 		dm->cached_dc_state = NULL;
1868 
1869 		amdgpu_dm_irq_resume_late(adev);
1870 
1871 		mutex_unlock(&dm->dc_lock);
1872 
1873 		return 0;
1874 	}
1875 	/* Recreate dc_state - DC invalidates it when setting power state to S3. */
1876 	dc_release_state(dm_state->context);
1877 	dm_state->context = dc_create_state(dm->dc);
1878 	/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
1879 	dc_resource_state_construct(dm->dc, dm_state->context);
1880 
1881 	/* Before powering on DC we need to re-initialize DMUB. */
1882 	r = dm_dmub_hw_init(adev);
1883 	if (r)
1884 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1885 
1886 	/* power on hardware */
1887 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1888 
1889 	/* program HPD filter */
1890 	dc_resume(dm->dc);
1891 
1892 	/*
1893 	 * early enable HPD Rx IRQ, should be done before set mode as short
1894 	 * pulse interrupts are used for MST
1895 	 */
1896 	amdgpu_dm_irq_resume_early(adev);
1897 
1898 	/* On resume we need to rewrite the MSTM control bits to enable MST*/
1899 	s3_handle_mst(ddev, false);
1900 
1901 	/* Do detection*/
1902 	drm_connector_list_iter_begin(ddev, &iter);
1903 	drm_for_each_connector_iter(connector, &iter) {
1904 		aconnector = to_amdgpu_dm_connector(connector);
1905 
1906 		/*
1907 		 * this is the case when traversing through already created
1908 		 * MST connectors, should be skipped
1909 		 */
1910 		if (aconnector->mst_port)
1911 			continue;
1912 
1913 		mutex_lock(&aconnector->hpd_lock);
1914 		if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
1915 			DRM_ERROR("KMS: Failed to detect connector\n");
1916 
1917 		if (aconnector->base.force && new_connection_type == dc_connection_none)
1918 			emulated_link_detect(aconnector->dc_link);
1919 		else
1920 			dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
1921 
1922 		if (aconnector->fake_enable && aconnector->dc_link->local_sink)
1923 			aconnector->fake_enable = false;
1924 
1925 		if (aconnector->dc_sink)
1926 			dc_sink_release(aconnector->dc_sink);
1927 		aconnector->dc_sink = NULL;
1928 		amdgpu_dm_update_connector_after_detect(aconnector);
1929 		mutex_unlock(&aconnector->hpd_lock);
1930 	}
1931 	drm_connector_list_iter_end(&iter);
1932 
1933 	/* Force mode set in atomic commit */
1934 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
1935 		new_crtc_state->active_changed = true;
1936 
1937 	/*
1938 	 * atomic_check is expected to create the dc states. We need to release
1939 	 * them here, since they were duplicated as part of the suspend
1940 	 * procedure.
1941 	 */
1942 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
1943 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1944 		if (dm_new_crtc_state->stream) {
1945 			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
1946 			dc_stream_release(dm_new_crtc_state->stream);
1947 			dm_new_crtc_state->stream = NULL;
1948 		}
1949 	}
1950 
1951 	for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
1952 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
1953 		if (dm_new_plane_state->dc_state) {
1954 			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
1955 			dc_plane_state_release(dm_new_plane_state->dc_state);
1956 			dm_new_plane_state->dc_state = NULL;
1957 		}
1958 	}
1959 
1960 	drm_atomic_helper_resume(ddev, dm->cached_state);
1961 
1962 	dm->cached_state = NULL;
1963 
1964 	amdgpu_dm_irq_resume_late(adev);
1965 
1966 	amdgpu_dm_smu_write_watermarks_table(adev);
1967 
1968 	return 0;
1969 }
1970 
1971 /**
1972  * DOC: DM Lifecycle
1973  *
1974  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
1975  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
1976  * the base driver's device list to be initialized and torn down accordingly.
1977  *
1978  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
1979  */
1980 
1981 static const struct amd_ip_funcs amdgpu_dm_funcs = {
1982 	.name = "dm",
1983 	.early_init = dm_early_init,
1984 	.late_init = dm_late_init,
1985 	.sw_init = dm_sw_init,
1986 	.sw_fini = dm_sw_fini,
1987 	.hw_init = dm_hw_init,
1988 	.hw_fini = dm_hw_fini,
1989 	.suspend = dm_suspend,
1990 	.resume = dm_resume,
1991 	.is_idle = dm_is_idle,
1992 	.wait_for_idle = dm_wait_for_idle,
1993 	.check_soft_reset = dm_check_soft_reset,
1994 	.soft_reset = dm_soft_reset,
1995 	.set_clockgating_state = dm_set_clockgating_state,
1996 	.set_powergating_state = dm_set_powergating_state,
1997 };
1998 
1999 const struct amdgpu_ip_block_version dm_ip_block =
2000 {
2001 	.type = AMD_IP_BLOCK_TYPE_DCE,
2002 	.major = 1,
2003 	.minor = 0,
2004 	.rev = 0,
2005 	.funcs = &amdgpu_dm_funcs,
2006 };
2007 
2008 
2009 /**
2010  * DOC: atomic
2011  *
2012  * *WIP*
2013  */
2014 
2015 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2016 	.fb_create = amdgpu_display_user_framebuffer_create,
2017 	.output_poll_changed = drm_fb_helper_output_poll_changed,
2018 	.atomic_check = amdgpu_dm_atomic_check,
2019 	.atomic_commit = amdgpu_dm_atomic_commit,
2020 };
2021 
2022 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2023 	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2024 };
2025 
2026 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2027 {
2028 	u32 max_cll, min_cll, max, min, q, r;
2029 	struct amdgpu_dm_backlight_caps *caps;
2030 	struct amdgpu_display_manager *dm;
2031 	struct drm_connector *conn_base;
2032 	struct amdgpu_device *adev;
2033 	struct dc_link *link = NULL;
2034 	static const u8 pre_computed_values[] = {
2035 		50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2036 		71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2037 
2038 	if (!aconnector || !aconnector->dc_link)
2039 		return;
2040 
2041 	link = aconnector->dc_link;
2042 	if (link->connector_signal != SIGNAL_TYPE_EDP)
2043 		return;
2044 
2045 	conn_base = &aconnector->base;
2046 	adev = conn_base->dev->dev_private;
2047 	dm = &adev->dm;
2048 	caps = &dm->backlight_caps;
2049 	caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2050 	caps->aux_support = false;
2051 	max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2052 	min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2053 
2054 	if (caps->ext_caps->bits.oled == 1 ||
2055 	    caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2056 	    caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2057 		caps->aux_support = true;
2058 
2059 	/* From the specification (CTA-861-G), for calculating the maximum
2060 	 * luminance we need to use:
2061 	 *	Luminance = 50*2**(CV/32)
2062 	 * Where CV is a one-byte value.
2063 	 * For calculating this expression we may need float point precision;
2064 	 * to avoid this complexity level, we take advantage that CV is divided
2065 	 * by a constant. From the Euclids division algorithm, we know that CV
2066 	 * can be written as: CV = 32*q + r. Next, we replace CV in the
2067 	 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2068 	 * need to pre-compute the value of r/32. For pre-computing the values
2069 	 * We just used the following Ruby line:
2070 	 *	(0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2071 	 * The results of the above expressions can be verified at
2072 	 * pre_computed_values.
2073 	 */
2074 	q = max_cll >> 5;
2075 	r = max_cll % 32;
2076 	max = (1 << q) * pre_computed_values[r];
2077 
2078 	// min luminance: maxLum * (CV/255)^2 / 100
2079 	q = DIV_ROUND_CLOSEST(min_cll, 255);
2080 	min = max * DIV_ROUND_CLOSEST((q * q), 100);
2081 
2082 	caps->aux_max_input_signal = max;
2083 	caps->aux_min_input_signal = min;
2084 }
2085 
2086 void amdgpu_dm_update_connector_after_detect(
2087 		struct amdgpu_dm_connector *aconnector)
2088 {
2089 	struct drm_connector *connector = &aconnector->base;
2090 	struct drm_device *dev = connector->dev;
2091 	struct dc_sink *sink;
2092 
2093 	/* MST handled by drm_mst framework */
2094 	if (aconnector->mst_mgr.mst_state == true)
2095 		return;
2096 
2097 
2098 	sink = aconnector->dc_link->local_sink;
2099 	if (sink)
2100 		dc_sink_retain(sink);
2101 
2102 	/*
2103 	 * Edid mgmt connector gets first update only in mode_valid hook and then
2104 	 * the connector sink is set to either fake or physical sink depends on link status.
2105 	 * Skip if already done during boot.
2106 	 */
2107 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2108 			&& aconnector->dc_em_sink) {
2109 
2110 		/*
2111 		 * For S3 resume with headless use eml_sink to fake stream
2112 		 * because on resume connector->sink is set to NULL
2113 		 */
2114 		mutex_lock(&dev->mode_config.mutex);
2115 
2116 		if (sink) {
2117 			if (aconnector->dc_sink) {
2118 				amdgpu_dm_update_freesync_caps(connector, NULL);
2119 				/*
2120 				 * retain and release below are used to
2121 				 * bump up refcount for sink because the link doesn't point
2122 				 * to it anymore after disconnect, so on next crtc to connector
2123 				 * reshuffle by UMD we will get into unwanted dc_sink release
2124 				 */
2125 				dc_sink_release(aconnector->dc_sink);
2126 			}
2127 			aconnector->dc_sink = sink;
2128 			dc_sink_retain(aconnector->dc_sink);
2129 			amdgpu_dm_update_freesync_caps(connector,
2130 					aconnector->edid);
2131 		} else {
2132 			amdgpu_dm_update_freesync_caps(connector, NULL);
2133 			if (!aconnector->dc_sink) {
2134 				aconnector->dc_sink = aconnector->dc_em_sink;
2135 				dc_sink_retain(aconnector->dc_sink);
2136 			}
2137 		}
2138 
2139 		mutex_unlock(&dev->mode_config.mutex);
2140 
2141 		if (sink)
2142 			dc_sink_release(sink);
2143 		return;
2144 	}
2145 
2146 	/*
2147 	 * TODO: temporary guard to look for proper fix
2148 	 * if this sink is MST sink, we should not do anything
2149 	 */
2150 	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2151 		dc_sink_release(sink);
2152 		return;
2153 	}
2154 
2155 	if (aconnector->dc_sink == sink) {
2156 		/*
2157 		 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2158 		 * Do nothing!!
2159 		 */
2160 		DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2161 				aconnector->connector_id);
2162 		if (sink)
2163 			dc_sink_release(sink);
2164 		return;
2165 	}
2166 
2167 	DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2168 		aconnector->connector_id, aconnector->dc_sink, sink);
2169 
2170 	mutex_lock(&dev->mode_config.mutex);
2171 
2172 	/*
2173 	 * 1. Update status of the drm connector
2174 	 * 2. Send an event and let userspace tell us what to do
2175 	 */
2176 	if (sink) {
2177 		/*
2178 		 * TODO: check if we still need the S3 mode update workaround.
2179 		 * If yes, put it here.
2180 		 */
2181 		if (aconnector->dc_sink)
2182 			amdgpu_dm_update_freesync_caps(connector, NULL);
2183 
2184 		aconnector->dc_sink = sink;
2185 		dc_sink_retain(aconnector->dc_sink);
2186 		if (sink->dc_edid.length == 0) {
2187 			aconnector->edid = NULL;
2188 			if (aconnector->dc_link->aux_mode) {
2189 				drm_dp_cec_unset_edid(
2190 					&aconnector->dm_dp_aux.aux);
2191 			}
2192 		} else {
2193 			aconnector->edid =
2194 				(struct edid *)sink->dc_edid.raw_edid;
2195 
2196 			drm_connector_update_edid_property(connector,
2197 							   aconnector->edid);
2198 
2199 			if (aconnector->dc_link->aux_mode)
2200 				drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2201 						    aconnector->edid);
2202 		}
2203 
2204 		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2205 		update_connector_ext_caps(aconnector);
2206 	} else {
2207 		drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2208 		amdgpu_dm_update_freesync_caps(connector, NULL);
2209 		drm_connector_update_edid_property(connector, NULL);
2210 		aconnector->num_modes = 0;
2211 		dc_sink_release(aconnector->dc_sink);
2212 		aconnector->dc_sink = NULL;
2213 		aconnector->edid = NULL;
2214 #ifdef CONFIG_DRM_AMD_DC_HDCP
2215 		/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2216 		if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2217 			connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2218 #endif
2219 	}
2220 
2221 	mutex_unlock(&dev->mode_config.mutex);
2222 
2223 	if (sink)
2224 		dc_sink_release(sink);
2225 }
2226 
2227 static void handle_hpd_irq(void *param)
2228 {
2229 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2230 	struct drm_connector *connector = &aconnector->base;
2231 	struct drm_device *dev = connector->dev;
2232 	enum dc_connection_type new_connection_type = dc_connection_none;
2233 #ifdef CONFIG_DRM_AMD_DC_HDCP
2234 	struct amdgpu_device *adev = dev->dev_private;
2235 #endif
2236 
2237 	/*
2238 	 * In case of failure or MST no need to update connector status or notify the OS
2239 	 * since (for MST case) MST does this in its own context.
2240 	 */
2241 	mutex_lock(&aconnector->hpd_lock);
2242 
2243 #ifdef CONFIG_DRM_AMD_DC_HDCP
2244 	if (adev->dm.hdcp_workqueue)
2245 		hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2246 #endif
2247 	if (aconnector->fake_enable)
2248 		aconnector->fake_enable = false;
2249 
2250 	if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2251 		DRM_ERROR("KMS: Failed to detect connector\n");
2252 
2253 	if (aconnector->base.force && new_connection_type == dc_connection_none) {
2254 		emulated_link_detect(aconnector->dc_link);
2255 
2256 
2257 		drm_modeset_lock_all(dev);
2258 		dm_restore_drm_connector_state(dev, connector);
2259 		drm_modeset_unlock_all(dev);
2260 
2261 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2262 			drm_kms_helper_hotplug_event(dev);
2263 
2264 	} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2265 		amdgpu_dm_update_connector_after_detect(aconnector);
2266 
2267 
2268 		drm_modeset_lock_all(dev);
2269 		dm_restore_drm_connector_state(dev, connector);
2270 		drm_modeset_unlock_all(dev);
2271 
2272 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2273 			drm_kms_helper_hotplug_event(dev);
2274 	}
2275 	mutex_unlock(&aconnector->hpd_lock);
2276 
2277 }
2278 
2279 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2280 {
2281 	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2282 	uint8_t dret;
2283 	bool new_irq_handled = false;
2284 	int dpcd_addr;
2285 	int dpcd_bytes_to_read;
2286 
2287 	const int max_process_count = 30;
2288 	int process_count = 0;
2289 
2290 	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2291 
2292 	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2293 		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2294 		/* DPCD 0x200 - 0x201 for downstream IRQ */
2295 		dpcd_addr = DP_SINK_COUNT;
2296 	} else {
2297 		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2298 		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
2299 		dpcd_addr = DP_SINK_COUNT_ESI;
2300 	}
2301 
2302 	dret = drm_dp_dpcd_read(
2303 		&aconnector->dm_dp_aux.aux,
2304 		dpcd_addr,
2305 		esi,
2306 		dpcd_bytes_to_read);
2307 
2308 	while (dret == dpcd_bytes_to_read &&
2309 		process_count < max_process_count) {
2310 		uint8_t retry;
2311 		dret = 0;
2312 
2313 		process_count++;
2314 
2315 		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2316 		/* handle HPD short pulse irq */
2317 		if (aconnector->mst_mgr.mst_state)
2318 			drm_dp_mst_hpd_irq(
2319 				&aconnector->mst_mgr,
2320 				esi,
2321 				&new_irq_handled);
2322 
2323 		if (new_irq_handled) {
2324 			/* ACK at DPCD to notify down stream */
2325 			const int ack_dpcd_bytes_to_write =
2326 				dpcd_bytes_to_read - 1;
2327 
2328 			for (retry = 0; retry < 3; retry++) {
2329 				uint8_t wret;
2330 
2331 				wret = drm_dp_dpcd_write(
2332 					&aconnector->dm_dp_aux.aux,
2333 					dpcd_addr + 1,
2334 					&esi[1],
2335 					ack_dpcd_bytes_to_write);
2336 				if (wret == ack_dpcd_bytes_to_write)
2337 					break;
2338 			}
2339 
2340 			/* check if there is new irq to be handled */
2341 			dret = drm_dp_dpcd_read(
2342 				&aconnector->dm_dp_aux.aux,
2343 				dpcd_addr,
2344 				esi,
2345 				dpcd_bytes_to_read);
2346 
2347 			new_irq_handled = false;
2348 		} else {
2349 			break;
2350 		}
2351 	}
2352 
2353 	if (process_count == max_process_count)
2354 		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2355 }
2356 
2357 static void handle_hpd_rx_irq(void *param)
2358 {
2359 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2360 	struct drm_connector *connector = &aconnector->base;
2361 	struct drm_device *dev = connector->dev;
2362 	struct dc_link *dc_link = aconnector->dc_link;
2363 	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2364 	enum dc_connection_type new_connection_type = dc_connection_none;
2365 #ifdef CONFIG_DRM_AMD_DC_HDCP
2366 	union hpd_irq_data hpd_irq_data;
2367 	struct amdgpu_device *adev = dev->dev_private;
2368 
2369 	memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2370 #endif
2371 
2372 	/*
2373 	 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2374 	 * conflict, after implement i2c helper, this mutex should be
2375 	 * retired.
2376 	 */
2377 	if (dc_link->type != dc_connection_mst_branch)
2378 		mutex_lock(&aconnector->hpd_lock);
2379 
2380 
2381 #ifdef CONFIG_DRM_AMD_DC_HDCP
2382 	if (dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL) &&
2383 #else
2384 	if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
2385 #endif
2386 			!is_mst_root_connector) {
2387 		/* Downstream Port status changed. */
2388 		if (!dc_link_detect_sink(dc_link, &new_connection_type))
2389 			DRM_ERROR("KMS: Failed to detect connector\n");
2390 
2391 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
2392 			emulated_link_detect(dc_link);
2393 
2394 			if (aconnector->fake_enable)
2395 				aconnector->fake_enable = false;
2396 
2397 			amdgpu_dm_update_connector_after_detect(aconnector);
2398 
2399 
2400 			drm_modeset_lock_all(dev);
2401 			dm_restore_drm_connector_state(dev, connector);
2402 			drm_modeset_unlock_all(dev);
2403 
2404 			drm_kms_helper_hotplug_event(dev);
2405 		} else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2406 
2407 			if (aconnector->fake_enable)
2408 				aconnector->fake_enable = false;
2409 
2410 			amdgpu_dm_update_connector_after_detect(aconnector);
2411 
2412 
2413 			drm_modeset_lock_all(dev);
2414 			dm_restore_drm_connector_state(dev, connector);
2415 			drm_modeset_unlock_all(dev);
2416 
2417 			drm_kms_helper_hotplug_event(dev);
2418 		}
2419 	}
2420 #ifdef CONFIG_DRM_AMD_DC_HDCP
2421 	if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2422 		if (adev->dm.hdcp_workqueue)
2423 			hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
2424 	}
2425 #endif
2426 	if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2427 	    (dc_link->type == dc_connection_mst_branch))
2428 		dm_handle_hpd_rx_irq(aconnector);
2429 
2430 	if (dc_link->type != dc_connection_mst_branch) {
2431 		drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2432 		mutex_unlock(&aconnector->hpd_lock);
2433 	}
2434 }
2435 
2436 static void register_hpd_handlers(struct amdgpu_device *adev)
2437 {
2438 	struct drm_device *dev = adev->ddev;
2439 	struct drm_connector *connector;
2440 	struct amdgpu_dm_connector *aconnector;
2441 	const struct dc_link *dc_link;
2442 	struct dc_interrupt_params int_params = {0};
2443 
2444 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2445 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2446 
2447 	list_for_each_entry(connector,
2448 			&dev->mode_config.connector_list, head)	{
2449 
2450 		aconnector = to_amdgpu_dm_connector(connector);
2451 		dc_link = aconnector->dc_link;
2452 
2453 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2454 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2455 			int_params.irq_source = dc_link->irq_source_hpd;
2456 
2457 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2458 					handle_hpd_irq,
2459 					(void *) aconnector);
2460 		}
2461 
2462 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2463 
2464 			/* Also register for DP short pulse (hpd_rx). */
2465 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2466 			int_params.irq_source =	dc_link->irq_source_hpd_rx;
2467 
2468 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2469 					handle_hpd_rx_irq,
2470 					(void *) aconnector);
2471 		}
2472 	}
2473 }
2474 
2475 /* Register IRQ sources and initialize IRQ callbacks */
2476 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2477 {
2478 	struct dc *dc = adev->dm.dc;
2479 	struct common_irq_params *c_irq_params;
2480 	struct dc_interrupt_params int_params = {0};
2481 	int r;
2482 	int i;
2483 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2484 
2485 	if (adev->asic_type >= CHIP_VEGA10)
2486 		client_id = SOC15_IH_CLIENTID_DCE;
2487 
2488 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2489 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2490 
2491 	/*
2492 	 * Actions of amdgpu_irq_add_id():
2493 	 * 1. Register a set() function with base driver.
2494 	 *    Base driver will call set() function to enable/disable an
2495 	 *    interrupt in DC hardware.
2496 	 * 2. Register amdgpu_dm_irq_handler().
2497 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2498 	 *    coming from DC hardware.
2499 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2500 	 *    for acknowledging and handling. */
2501 
2502 	/* Use VBLANK interrupt */
2503 	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2504 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2505 		if (r) {
2506 			DRM_ERROR("Failed to add crtc irq id!\n");
2507 			return r;
2508 		}
2509 
2510 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2511 		int_params.irq_source =
2512 			dc_interrupt_to_irq_source(dc, i, 0);
2513 
2514 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2515 
2516 		c_irq_params->adev = adev;
2517 		c_irq_params->irq_src = int_params.irq_source;
2518 
2519 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2520 				dm_crtc_high_irq, c_irq_params);
2521 	}
2522 
2523 	/* Use VUPDATE interrupt */
2524 	for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2525 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2526 		if (r) {
2527 			DRM_ERROR("Failed to add vupdate irq id!\n");
2528 			return r;
2529 		}
2530 
2531 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2532 		int_params.irq_source =
2533 			dc_interrupt_to_irq_source(dc, i, 0);
2534 
2535 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2536 
2537 		c_irq_params->adev = adev;
2538 		c_irq_params->irq_src = int_params.irq_source;
2539 
2540 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2541 				dm_vupdate_high_irq, c_irq_params);
2542 	}
2543 
2544 	/* Use GRPH_PFLIP interrupt */
2545 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2546 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2547 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2548 		if (r) {
2549 			DRM_ERROR("Failed to add page flip irq id!\n");
2550 			return r;
2551 		}
2552 
2553 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2554 		int_params.irq_source =
2555 			dc_interrupt_to_irq_source(dc, i, 0);
2556 
2557 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2558 
2559 		c_irq_params->adev = adev;
2560 		c_irq_params->irq_src = int_params.irq_source;
2561 
2562 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2563 				dm_pflip_high_irq, c_irq_params);
2564 
2565 	}
2566 
2567 	/* HPD */
2568 	r = amdgpu_irq_add_id(adev, client_id,
2569 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2570 	if (r) {
2571 		DRM_ERROR("Failed to add hpd irq id!\n");
2572 		return r;
2573 	}
2574 
2575 	register_hpd_handlers(adev);
2576 
2577 	return 0;
2578 }
2579 
2580 #if defined(CONFIG_DRM_AMD_DC_DCN)
2581 /* Register IRQ sources and initialize IRQ callbacks */
2582 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2583 {
2584 	struct dc *dc = adev->dm.dc;
2585 	struct common_irq_params *c_irq_params;
2586 	struct dc_interrupt_params int_params = {0};
2587 	int r;
2588 	int i;
2589 
2590 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2591 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2592 
2593 	/*
2594 	 * Actions of amdgpu_irq_add_id():
2595 	 * 1. Register a set() function with base driver.
2596 	 *    Base driver will call set() function to enable/disable an
2597 	 *    interrupt in DC hardware.
2598 	 * 2. Register amdgpu_dm_irq_handler().
2599 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2600 	 *    coming from DC hardware.
2601 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2602 	 *    for acknowledging and handling.
2603 	 */
2604 
2605 	/* Use VSTARTUP interrupt */
2606 	for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2607 			i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2608 			i++) {
2609 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
2610 
2611 		if (r) {
2612 			DRM_ERROR("Failed to add crtc irq id!\n");
2613 			return r;
2614 		}
2615 
2616 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2617 		int_params.irq_source =
2618 			dc_interrupt_to_irq_source(dc, i, 0);
2619 
2620 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2621 
2622 		c_irq_params->adev = adev;
2623 		c_irq_params->irq_src = int_params.irq_source;
2624 
2625 		amdgpu_dm_irq_register_interrupt(
2626 			adev, &int_params, dm_crtc_high_irq, c_irq_params);
2627 	}
2628 
2629 	/* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
2630 	 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
2631 	 * to trigger at end of each vblank, regardless of state of the lock,
2632 	 * matching DCE behaviour.
2633 	 */
2634 	for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
2635 	     i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
2636 	     i++) {
2637 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
2638 
2639 		if (r) {
2640 			DRM_ERROR("Failed to add vupdate irq id!\n");
2641 			return r;
2642 		}
2643 
2644 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2645 		int_params.irq_source =
2646 			dc_interrupt_to_irq_source(dc, i, 0);
2647 
2648 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2649 
2650 		c_irq_params->adev = adev;
2651 		c_irq_params->irq_src = int_params.irq_source;
2652 
2653 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2654 				dm_vupdate_high_irq, c_irq_params);
2655 	}
2656 
2657 	/* Use GRPH_PFLIP interrupt */
2658 	for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2659 			i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2660 			i++) {
2661 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
2662 		if (r) {
2663 			DRM_ERROR("Failed to add page flip irq id!\n");
2664 			return r;
2665 		}
2666 
2667 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2668 		int_params.irq_source =
2669 			dc_interrupt_to_irq_source(dc, i, 0);
2670 
2671 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2672 
2673 		c_irq_params->adev = adev;
2674 		c_irq_params->irq_src = int_params.irq_source;
2675 
2676 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2677 				dm_pflip_high_irq, c_irq_params);
2678 
2679 	}
2680 
2681 	/* HPD */
2682 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
2683 			&adev->hpd_irq);
2684 	if (r) {
2685 		DRM_ERROR("Failed to add hpd irq id!\n");
2686 		return r;
2687 	}
2688 
2689 	register_hpd_handlers(adev);
2690 
2691 	return 0;
2692 }
2693 #endif
2694 
2695 /*
2696  * Acquires the lock for the atomic state object and returns
2697  * the new atomic state.
2698  *
2699  * This should only be called during atomic check.
2700  */
2701 static int dm_atomic_get_state(struct drm_atomic_state *state,
2702 			       struct dm_atomic_state **dm_state)
2703 {
2704 	struct drm_device *dev = state->dev;
2705 	struct amdgpu_device *adev = dev->dev_private;
2706 	struct amdgpu_display_manager *dm = &adev->dm;
2707 	struct drm_private_state *priv_state;
2708 
2709 	if (*dm_state)
2710 		return 0;
2711 
2712 	priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
2713 	if (IS_ERR(priv_state))
2714 		return PTR_ERR(priv_state);
2715 
2716 	*dm_state = to_dm_atomic_state(priv_state);
2717 
2718 	return 0;
2719 }
2720 
2721 static struct dm_atomic_state *
2722 dm_atomic_get_new_state(struct drm_atomic_state *state)
2723 {
2724 	struct drm_device *dev = state->dev;
2725 	struct amdgpu_device *adev = dev->dev_private;
2726 	struct amdgpu_display_manager *dm = &adev->dm;
2727 	struct drm_private_obj *obj;
2728 	struct drm_private_state *new_obj_state;
2729 	int i;
2730 
2731 	for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
2732 		if (obj->funcs == dm->atomic_obj.funcs)
2733 			return to_dm_atomic_state(new_obj_state);
2734 	}
2735 
2736 	return NULL;
2737 }
2738 
2739 static struct dm_atomic_state *
2740 dm_atomic_get_old_state(struct drm_atomic_state *state)
2741 {
2742 	struct drm_device *dev = state->dev;
2743 	struct amdgpu_device *adev = dev->dev_private;
2744 	struct amdgpu_display_manager *dm = &adev->dm;
2745 	struct drm_private_obj *obj;
2746 	struct drm_private_state *old_obj_state;
2747 	int i;
2748 
2749 	for_each_old_private_obj_in_state(state, obj, old_obj_state, i) {
2750 		if (obj->funcs == dm->atomic_obj.funcs)
2751 			return to_dm_atomic_state(old_obj_state);
2752 	}
2753 
2754 	return NULL;
2755 }
2756 
2757 static struct drm_private_state *
2758 dm_atomic_duplicate_state(struct drm_private_obj *obj)
2759 {
2760 	struct dm_atomic_state *old_state, *new_state;
2761 
2762 	new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
2763 	if (!new_state)
2764 		return NULL;
2765 
2766 	__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
2767 
2768 	old_state = to_dm_atomic_state(obj->state);
2769 
2770 	if (old_state && old_state->context)
2771 		new_state->context = dc_copy_state(old_state->context);
2772 
2773 	if (!new_state->context) {
2774 		kfree(new_state);
2775 		return NULL;
2776 	}
2777 
2778 	return &new_state->base;
2779 }
2780 
2781 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
2782 				    struct drm_private_state *state)
2783 {
2784 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
2785 
2786 	if (dm_state && dm_state->context)
2787 		dc_release_state(dm_state->context);
2788 
2789 	kfree(dm_state);
2790 }
2791 
2792 static struct drm_private_state_funcs dm_atomic_state_funcs = {
2793 	.atomic_duplicate_state = dm_atomic_duplicate_state,
2794 	.atomic_destroy_state = dm_atomic_destroy_state,
2795 };
2796 
2797 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
2798 {
2799 	struct dm_atomic_state *state;
2800 	int r;
2801 
2802 	adev->mode_info.mode_config_initialized = true;
2803 
2804 	adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
2805 	adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
2806 
2807 	adev->ddev->mode_config.max_width = 16384;
2808 	adev->ddev->mode_config.max_height = 16384;
2809 
2810 	adev->ddev->mode_config.preferred_depth = 24;
2811 	adev->ddev->mode_config.prefer_shadow = 1;
2812 	/* indicates support for immediate flip */
2813 	adev->ddev->mode_config.async_page_flip = true;
2814 
2815 	adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
2816 
2817 	state = kzalloc(sizeof(*state), GFP_KERNEL);
2818 	if (!state)
2819 		return -ENOMEM;
2820 
2821 	state->context = dc_create_state(adev->dm.dc);
2822 	if (!state->context) {
2823 		kfree(state);
2824 		return -ENOMEM;
2825 	}
2826 
2827 	dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
2828 
2829 	drm_atomic_private_obj_init(adev->ddev,
2830 				    &adev->dm.atomic_obj,
2831 				    &state->base,
2832 				    &dm_atomic_state_funcs);
2833 
2834 	r = amdgpu_display_modeset_create_props(adev);
2835 	if (r)
2836 		return r;
2837 
2838 	r = amdgpu_dm_audio_init(adev);
2839 	if (r)
2840 		return r;
2841 
2842 	return 0;
2843 }
2844 
2845 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
2846 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
2847 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
2848 
2849 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2850 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2851 
2852 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
2853 {
2854 #if defined(CONFIG_ACPI)
2855 	struct amdgpu_dm_backlight_caps caps;
2856 
2857 	if (dm->backlight_caps.caps_valid)
2858 		return;
2859 
2860 	amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
2861 	if (caps.caps_valid) {
2862 		dm->backlight_caps.caps_valid = true;
2863 		if (caps.aux_support)
2864 			return;
2865 		dm->backlight_caps.min_input_signal = caps.min_input_signal;
2866 		dm->backlight_caps.max_input_signal = caps.max_input_signal;
2867 	} else {
2868 		dm->backlight_caps.min_input_signal =
2869 				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2870 		dm->backlight_caps.max_input_signal =
2871 				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2872 	}
2873 #else
2874 	if (dm->backlight_caps.aux_support)
2875 		return;
2876 
2877 	dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2878 	dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2879 #endif
2880 }
2881 
2882 static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
2883 {
2884 	bool rc;
2885 
2886 	if (!link)
2887 		return 1;
2888 
2889 	rc = dc_link_set_backlight_level_nits(link, true, brightness,
2890 					      AUX_BL_DEFAULT_TRANSITION_TIME_MS);
2891 
2892 	return rc ? 0 : 1;
2893 }
2894 
2895 static u32 convert_brightness(const struct amdgpu_dm_backlight_caps *caps,
2896 			      const uint32_t user_brightness)
2897 {
2898 	u32 min, max, conversion_pace;
2899 	u32 brightness = user_brightness;
2900 
2901 	if (!caps)
2902 		goto out;
2903 
2904 	if (!caps->aux_support) {
2905 		max = caps->max_input_signal;
2906 		min = caps->min_input_signal;
2907 		/*
2908 		 * The brightness input is in the range 0-255
2909 		 * It needs to be rescaled to be between the
2910 		 * requested min and max input signal
2911 		 * It also needs to be scaled up by 0x101 to
2912 		 * match the DC interface which has a range of
2913 		 * 0 to 0xffff
2914 		 */
2915 		conversion_pace = 0x101;
2916 		brightness =
2917 			user_brightness
2918 			* conversion_pace
2919 			* (max - min)
2920 			/ AMDGPU_MAX_BL_LEVEL
2921 			+ min * conversion_pace;
2922 	} else {
2923 		/* TODO
2924 		 * We are doing a linear interpolation here, which is OK but
2925 		 * does not provide the optimal result. We probably want
2926 		 * something close to the Perceptual Quantizer (PQ) curve.
2927 		 */
2928 		max = caps->aux_max_input_signal;
2929 		min = caps->aux_min_input_signal;
2930 
2931 		brightness = (AMDGPU_MAX_BL_LEVEL - user_brightness) * min
2932 			       + user_brightness * max;
2933 		// Multiple the value by 1000 since we use millinits
2934 		brightness *= 1000;
2935 		brightness = DIV_ROUND_CLOSEST(brightness, AMDGPU_MAX_BL_LEVEL);
2936 	}
2937 
2938 out:
2939 	return brightness;
2940 }
2941 
2942 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
2943 {
2944 	struct amdgpu_display_manager *dm = bl_get_data(bd);
2945 	struct amdgpu_dm_backlight_caps caps;
2946 	struct dc_link *link = NULL;
2947 	u32 brightness;
2948 	bool rc;
2949 
2950 	amdgpu_dm_update_backlight_caps(dm);
2951 	caps = dm->backlight_caps;
2952 
2953 	link = (struct dc_link *)dm->backlight_link;
2954 
2955 	brightness = convert_brightness(&caps, bd->props.brightness);
2956 	// Change brightness based on AUX property
2957 	if (caps.aux_support)
2958 		return set_backlight_via_aux(link, brightness);
2959 
2960 	rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
2961 
2962 	return rc ? 0 : 1;
2963 }
2964 
2965 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
2966 {
2967 	struct amdgpu_display_manager *dm = bl_get_data(bd);
2968 	int ret = dc_link_get_backlight_level(dm->backlight_link);
2969 
2970 	if (ret == DC_ERROR_UNEXPECTED)
2971 		return bd->props.brightness;
2972 	return ret;
2973 }
2974 
2975 static const struct backlight_ops amdgpu_dm_backlight_ops = {
2976 	.options = BL_CORE_SUSPENDRESUME,
2977 	.get_brightness = amdgpu_dm_backlight_get_brightness,
2978 	.update_status	= amdgpu_dm_backlight_update_status,
2979 };
2980 
2981 static void
2982 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
2983 {
2984 	char bl_name[16];
2985 	struct backlight_properties props = { 0 };
2986 
2987 	amdgpu_dm_update_backlight_caps(dm);
2988 
2989 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
2990 	props.brightness = AMDGPU_MAX_BL_LEVEL;
2991 	props.type = BACKLIGHT_RAW;
2992 
2993 	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
2994 			dm->adev->ddev->primary->index);
2995 
2996 	dm->backlight_dev = backlight_device_register(bl_name,
2997 			dm->adev->ddev->dev,
2998 			dm,
2999 			&amdgpu_dm_backlight_ops,
3000 			&props);
3001 
3002 	if (IS_ERR(dm->backlight_dev))
3003 		DRM_ERROR("DM: Backlight registration failed!\n");
3004 	else
3005 		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3006 }
3007 
3008 #endif
3009 
3010 static int initialize_plane(struct amdgpu_display_manager *dm,
3011 			    struct amdgpu_mode_info *mode_info, int plane_id,
3012 			    enum drm_plane_type plane_type,
3013 			    const struct dc_plane_cap *plane_cap)
3014 {
3015 	struct drm_plane *plane;
3016 	unsigned long possible_crtcs;
3017 	int ret = 0;
3018 
3019 	plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3020 	if (!plane) {
3021 		DRM_ERROR("KMS: Failed to allocate plane\n");
3022 		return -ENOMEM;
3023 	}
3024 	plane->type = plane_type;
3025 
3026 	/*
3027 	 * HACK: IGT tests expect that the primary plane for a CRTC
3028 	 * can only have one possible CRTC. Only expose support for
3029 	 * any CRTC if they're not going to be used as a primary plane
3030 	 * for a CRTC - like overlay or underlay planes.
3031 	 */
3032 	possible_crtcs = 1 << plane_id;
3033 	if (plane_id >= dm->dc->caps.max_streams)
3034 		possible_crtcs = 0xff;
3035 
3036 	ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3037 
3038 	if (ret) {
3039 		DRM_ERROR("KMS: Failed to initialize plane\n");
3040 		kfree(plane);
3041 		return ret;
3042 	}
3043 
3044 	if (mode_info)
3045 		mode_info->planes[plane_id] = plane;
3046 
3047 	return ret;
3048 }
3049 
3050 
3051 static void register_backlight_device(struct amdgpu_display_manager *dm,
3052 				      struct dc_link *link)
3053 {
3054 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3055 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3056 
3057 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3058 	    link->type != dc_connection_none) {
3059 		/*
3060 		 * Event if registration failed, we should continue with
3061 		 * DM initialization because not having a backlight control
3062 		 * is better then a black screen.
3063 		 */
3064 		amdgpu_dm_register_backlight_device(dm);
3065 
3066 		if (dm->backlight_dev)
3067 			dm->backlight_link = link;
3068 	}
3069 #endif
3070 }
3071 
3072 
3073 /*
3074  * In this architecture, the association
3075  * connector -> encoder -> crtc
3076  * id not really requried. The crtc and connector will hold the
3077  * display_index as an abstraction to use with DAL component
3078  *
3079  * Returns 0 on success
3080  */
3081 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3082 {
3083 	struct amdgpu_display_manager *dm = &adev->dm;
3084 	int32_t i;
3085 	struct amdgpu_dm_connector *aconnector = NULL;
3086 	struct amdgpu_encoder *aencoder = NULL;
3087 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
3088 	uint32_t link_cnt;
3089 	int32_t primary_planes;
3090 	enum dc_connection_type new_connection_type = dc_connection_none;
3091 	const struct dc_plane_cap *plane;
3092 
3093 	link_cnt = dm->dc->caps.max_links;
3094 	if (amdgpu_dm_mode_config_init(dm->adev)) {
3095 		DRM_ERROR("DM: Failed to initialize mode config\n");
3096 		return -EINVAL;
3097 	}
3098 
3099 	/* There is one primary plane per CRTC */
3100 	primary_planes = dm->dc->caps.max_streams;
3101 	ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3102 
3103 	/*
3104 	 * Initialize primary planes, implicit planes for legacy IOCTLS.
3105 	 * Order is reversed to match iteration order in atomic check.
3106 	 */
3107 	for (i = (primary_planes - 1); i >= 0; i--) {
3108 		plane = &dm->dc->caps.planes[i];
3109 
3110 		if (initialize_plane(dm, mode_info, i,
3111 				     DRM_PLANE_TYPE_PRIMARY, plane)) {
3112 			DRM_ERROR("KMS: Failed to initialize primary plane\n");
3113 			goto fail;
3114 		}
3115 	}
3116 
3117 	/*
3118 	 * Initialize overlay planes, index starting after primary planes.
3119 	 * These planes have a higher DRM index than the primary planes since
3120 	 * they should be considered as having a higher z-order.
3121 	 * Order is reversed to match iteration order in atomic check.
3122 	 *
3123 	 * Only support DCN for now, and only expose one so we don't encourage
3124 	 * userspace to use up all the pipes.
3125 	 */
3126 	for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3127 		struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3128 
3129 		if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3130 			continue;
3131 
3132 		if (!plane->blends_with_above || !plane->blends_with_below)
3133 			continue;
3134 
3135 		if (!plane->pixel_format_support.argb8888)
3136 			continue;
3137 
3138 		if (initialize_plane(dm, NULL, primary_planes + i,
3139 				     DRM_PLANE_TYPE_OVERLAY, plane)) {
3140 			DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3141 			goto fail;
3142 		}
3143 
3144 		/* Only create one overlay plane. */
3145 		break;
3146 	}
3147 
3148 	for (i = 0; i < dm->dc->caps.max_streams; i++)
3149 		if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3150 			DRM_ERROR("KMS: Failed to initialize crtc\n");
3151 			goto fail;
3152 		}
3153 
3154 	dm->display_indexes_num = dm->dc->caps.max_streams;
3155 
3156 	/* loops over all connectors on the board */
3157 	for (i = 0; i < link_cnt; i++) {
3158 		struct dc_link *link = NULL;
3159 
3160 		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3161 			DRM_ERROR(
3162 				"KMS: Cannot support more than %d display indexes\n",
3163 					AMDGPU_DM_MAX_DISPLAY_INDEX);
3164 			continue;
3165 		}
3166 
3167 		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3168 		if (!aconnector)
3169 			goto fail;
3170 
3171 		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3172 		if (!aencoder)
3173 			goto fail;
3174 
3175 		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3176 			DRM_ERROR("KMS: Failed to initialize encoder\n");
3177 			goto fail;
3178 		}
3179 
3180 		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3181 			DRM_ERROR("KMS: Failed to initialize connector\n");
3182 			goto fail;
3183 		}
3184 
3185 		link = dc_get_link_at_index(dm->dc, i);
3186 
3187 		if (!dc_link_detect_sink(link, &new_connection_type))
3188 			DRM_ERROR("KMS: Failed to detect connector\n");
3189 
3190 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
3191 			emulated_link_detect(link);
3192 			amdgpu_dm_update_connector_after_detect(aconnector);
3193 
3194 		} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3195 			amdgpu_dm_update_connector_after_detect(aconnector);
3196 			register_backlight_device(dm, link);
3197 			if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3198 				amdgpu_dm_set_psr_caps(link);
3199 		}
3200 
3201 
3202 	}
3203 
3204 	/* Software is initialized. Now we can register interrupt handlers. */
3205 	switch (adev->asic_type) {
3206 	case CHIP_BONAIRE:
3207 	case CHIP_HAWAII:
3208 	case CHIP_KAVERI:
3209 	case CHIP_KABINI:
3210 	case CHIP_MULLINS:
3211 	case CHIP_TONGA:
3212 	case CHIP_FIJI:
3213 	case CHIP_CARRIZO:
3214 	case CHIP_STONEY:
3215 	case CHIP_POLARIS11:
3216 	case CHIP_POLARIS10:
3217 	case CHIP_POLARIS12:
3218 	case CHIP_VEGAM:
3219 	case CHIP_VEGA10:
3220 	case CHIP_VEGA12:
3221 	case CHIP_VEGA20:
3222 		if (dce110_register_irq_handlers(dm->adev)) {
3223 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3224 			goto fail;
3225 		}
3226 		break;
3227 #if defined(CONFIG_DRM_AMD_DC_DCN)
3228 	case CHIP_RAVEN:
3229 	case CHIP_NAVI12:
3230 	case CHIP_NAVI10:
3231 	case CHIP_NAVI14:
3232 	case CHIP_RENOIR:
3233 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3234 	case CHIP_SIENNA_CICHLID:
3235 	case CHIP_NAVY_FLOUNDER:
3236 #endif
3237 		if (dcn10_register_irq_handlers(dm->adev)) {
3238 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3239 			goto fail;
3240 		}
3241 		break;
3242 #endif
3243 	default:
3244 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3245 		goto fail;
3246 	}
3247 
3248 	/* No userspace support. */
3249 	dm->dc->debug.disable_tri_buf = true;
3250 
3251 	return 0;
3252 fail:
3253 	kfree(aencoder);
3254 	kfree(aconnector);
3255 
3256 	return -EINVAL;
3257 }
3258 
3259 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3260 {
3261 	drm_mode_config_cleanup(dm->ddev);
3262 	drm_atomic_private_obj_fini(&dm->atomic_obj);
3263 	return;
3264 }
3265 
3266 /******************************************************************************
3267  * amdgpu_display_funcs functions
3268  *****************************************************************************/
3269 
3270 /*
3271  * dm_bandwidth_update - program display watermarks
3272  *
3273  * @adev: amdgpu_device pointer
3274  *
3275  * Calculate and program the display watermarks and line buffer allocation.
3276  */
3277 static void dm_bandwidth_update(struct amdgpu_device *adev)
3278 {
3279 	/* TODO: implement later */
3280 }
3281 
3282 static const struct amdgpu_display_funcs dm_display_funcs = {
3283 	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3284 	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3285 	.backlight_set_level = NULL, /* never called for DC */
3286 	.backlight_get_level = NULL, /* never called for DC */
3287 	.hpd_sense = NULL,/* called unconditionally */
3288 	.hpd_set_polarity = NULL, /* called unconditionally */
3289 	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3290 	.page_flip_get_scanoutpos =
3291 		dm_crtc_get_scanoutpos,/* called unconditionally */
3292 	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3293 	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
3294 };
3295 
3296 #if defined(CONFIG_DEBUG_KERNEL_DC)
3297 
3298 static ssize_t s3_debug_store(struct device *device,
3299 			      struct device_attribute *attr,
3300 			      const char *buf,
3301 			      size_t count)
3302 {
3303 	int ret;
3304 	int s3_state;
3305 	struct drm_device *drm_dev = dev_get_drvdata(device);
3306 	struct amdgpu_device *adev = drm_dev->dev_private;
3307 
3308 	ret = kstrtoint(buf, 0, &s3_state);
3309 
3310 	if (ret == 0) {
3311 		if (s3_state) {
3312 			dm_resume(adev);
3313 			drm_kms_helper_hotplug_event(adev->ddev);
3314 		} else
3315 			dm_suspend(adev);
3316 	}
3317 
3318 	return ret == 0 ? count : 0;
3319 }
3320 
3321 DEVICE_ATTR_WO(s3_debug);
3322 
3323 #endif
3324 
3325 static int dm_early_init(void *handle)
3326 {
3327 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3328 
3329 	switch (adev->asic_type) {
3330 	case CHIP_BONAIRE:
3331 	case CHIP_HAWAII:
3332 		adev->mode_info.num_crtc = 6;
3333 		adev->mode_info.num_hpd = 6;
3334 		adev->mode_info.num_dig = 6;
3335 		break;
3336 	case CHIP_KAVERI:
3337 		adev->mode_info.num_crtc = 4;
3338 		adev->mode_info.num_hpd = 6;
3339 		adev->mode_info.num_dig = 7;
3340 		break;
3341 	case CHIP_KABINI:
3342 	case CHIP_MULLINS:
3343 		adev->mode_info.num_crtc = 2;
3344 		adev->mode_info.num_hpd = 6;
3345 		adev->mode_info.num_dig = 6;
3346 		break;
3347 	case CHIP_FIJI:
3348 	case CHIP_TONGA:
3349 		adev->mode_info.num_crtc = 6;
3350 		adev->mode_info.num_hpd = 6;
3351 		adev->mode_info.num_dig = 7;
3352 		break;
3353 	case CHIP_CARRIZO:
3354 		adev->mode_info.num_crtc = 3;
3355 		adev->mode_info.num_hpd = 6;
3356 		adev->mode_info.num_dig = 9;
3357 		break;
3358 	case CHIP_STONEY:
3359 		adev->mode_info.num_crtc = 2;
3360 		adev->mode_info.num_hpd = 6;
3361 		adev->mode_info.num_dig = 9;
3362 		break;
3363 	case CHIP_POLARIS11:
3364 	case CHIP_POLARIS12:
3365 		adev->mode_info.num_crtc = 5;
3366 		adev->mode_info.num_hpd = 5;
3367 		adev->mode_info.num_dig = 5;
3368 		break;
3369 	case CHIP_POLARIS10:
3370 	case CHIP_VEGAM:
3371 		adev->mode_info.num_crtc = 6;
3372 		adev->mode_info.num_hpd = 6;
3373 		adev->mode_info.num_dig = 6;
3374 		break;
3375 	case CHIP_VEGA10:
3376 	case CHIP_VEGA12:
3377 	case CHIP_VEGA20:
3378 		adev->mode_info.num_crtc = 6;
3379 		adev->mode_info.num_hpd = 6;
3380 		adev->mode_info.num_dig = 6;
3381 		break;
3382 #if defined(CONFIG_DRM_AMD_DC_DCN)
3383 	case CHIP_RAVEN:
3384 		adev->mode_info.num_crtc = 4;
3385 		adev->mode_info.num_hpd = 4;
3386 		adev->mode_info.num_dig = 4;
3387 		break;
3388 #endif
3389 	case CHIP_NAVI10:
3390 	case CHIP_NAVI12:
3391 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3392 	case CHIP_SIENNA_CICHLID:
3393 	case CHIP_NAVY_FLOUNDER:
3394 #endif
3395 		adev->mode_info.num_crtc = 6;
3396 		adev->mode_info.num_hpd = 6;
3397 		adev->mode_info.num_dig = 6;
3398 		break;
3399 	case CHIP_NAVI14:
3400 		adev->mode_info.num_crtc = 5;
3401 		adev->mode_info.num_hpd = 5;
3402 		adev->mode_info.num_dig = 5;
3403 		break;
3404 	case CHIP_RENOIR:
3405 		adev->mode_info.num_crtc = 4;
3406 		adev->mode_info.num_hpd = 4;
3407 		adev->mode_info.num_dig = 4;
3408 		break;
3409 	default:
3410 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3411 		return -EINVAL;
3412 	}
3413 
3414 	amdgpu_dm_set_irq_funcs(adev);
3415 
3416 	if (adev->mode_info.funcs == NULL)
3417 		adev->mode_info.funcs = &dm_display_funcs;
3418 
3419 	/*
3420 	 * Note: Do NOT change adev->audio_endpt_rreg and
3421 	 * adev->audio_endpt_wreg because they are initialised in
3422 	 * amdgpu_device_init()
3423 	 */
3424 #if defined(CONFIG_DEBUG_KERNEL_DC)
3425 	device_create_file(
3426 		adev->ddev->dev,
3427 		&dev_attr_s3_debug);
3428 #endif
3429 
3430 	return 0;
3431 }
3432 
3433 static bool modeset_required(struct drm_crtc_state *crtc_state,
3434 			     struct dc_stream_state *new_stream,
3435 			     struct dc_stream_state *old_stream)
3436 {
3437 	if (!drm_atomic_crtc_needs_modeset(crtc_state))
3438 		return false;
3439 
3440 	if (!crtc_state->enable)
3441 		return false;
3442 
3443 	return crtc_state->active;
3444 }
3445 
3446 static bool modereset_required(struct drm_crtc_state *crtc_state)
3447 {
3448 	if (!drm_atomic_crtc_needs_modeset(crtc_state))
3449 		return false;
3450 
3451 	return !crtc_state->enable || !crtc_state->active;
3452 }
3453 
3454 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3455 {
3456 	drm_encoder_cleanup(encoder);
3457 	kfree(encoder);
3458 }
3459 
3460 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3461 	.destroy = amdgpu_dm_encoder_destroy,
3462 };
3463 
3464 
3465 static int fill_dc_scaling_info(const struct drm_plane_state *state,
3466 				struct dc_scaling_info *scaling_info)
3467 {
3468 	int scale_w, scale_h;
3469 
3470 	memset(scaling_info, 0, sizeof(*scaling_info));
3471 
3472 	/* Source is fixed 16.16 but we ignore mantissa for now... */
3473 	scaling_info->src_rect.x = state->src_x >> 16;
3474 	scaling_info->src_rect.y = state->src_y >> 16;
3475 
3476 	scaling_info->src_rect.width = state->src_w >> 16;
3477 	if (scaling_info->src_rect.width == 0)
3478 		return -EINVAL;
3479 
3480 	scaling_info->src_rect.height = state->src_h >> 16;
3481 	if (scaling_info->src_rect.height == 0)
3482 		return -EINVAL;
3483 
3484 	scaling_info->dst_rect.x = state->crtc_x;
3485 	scaling_info->dst_rect.y = state->crtc_y;
3486 
3487 	if (state->crtc_w == 0)
3488 		return -EINVAL;
3489 
3490 	scaling_info->dst_rect.width = state->crtc_w;
3491 
3492 	if (state->crtc_h == 0)
3493 		return -EINVAL;
3494 
3495 	scaling_info->dst_rect.height = state->crtc_h;
3496 
3497 	/* DRM doesn't specify clipping on destination output. */
3498 	scaling_info->clip_rect = scaling_info->dst_rect;
3499 
3500 	/* TODO: Validate scaling per-format with DC plane caps */
3501 	scale_w = scaling_info->dst_rect.width * 1000 /
3502 		  scaling_info->src_rect.width;
3503 
3504 	if (scale_w < 250 || scale_w > 16000)
3505 		return -EINVAL;
3506 
3507 	scale_h = scaling_info->dst_rect.height * 1000 /
3508 		  scaling_info->src_rect.height;
3509 
3510 	if (scale_h < 250 || scale_h > 16000)
3511 		return -EINVAL;
3512 
3513 	/*
3514 	 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3515 	 * assume reasonable defaults based on the format.
3516 	 */
3517 
3518 	return 0;
3519 }
3520 
3521 static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
3522 		       uint64_t *tiling_flags, bool *tmz_surface)
3523 {
3524 	struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
3525 	int r = amdgpu_bo_reserve(rbo, false);
3526 
3527 	if (unlikely(r)) {
3528 		/* Don't show error message when returning -ERESTARTSYS */
3529 		if (r != -ERESTARTSYS)
3530 			DRM_ERROR("Unable to reserve buffer: %d\n", r);
3531 		return r;
3532 	}
3533 
3534 	if (tiling_flags)
3535 		amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
3536 
3537 	if (tmz_surface)
3538 		*tmz_surface = amdgpu_bo_encrypted(rbo);
3539 
3540 	amdgpu_bo_unreserve(rbo);
3541 
3542 	return r;
3543 }
3544 
3545 static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags)
3546 {
3547 	uint32_t offset = AMDGPU_TILING_GET(tiling_flags, DCC_OFFSET_256B);
3548 
3549 	return offset ? (address + offset * 256) : 0;
3550 }
3551 
3552 static int
3553 fill_plane_dcc_attributes(struct amdgpu_device *adev,
3554 			  const struct amdgpu_framebuffer *afb,
3555 			  const enum surface_pixel_format format,
3556 			  const enum dc_rotation_angle rotation,
3557 			  const struct plane_size *plane_size,
3558 			  const union dc_tiling_info *tiling_info,
3559 			  const uint64_t info,
3560 			  struct dc_plane_dcc_param *dcc,
3561 			  struct dc_plane_address *address,
3562 			  bool force_disable_dcc)
3563 {
3564 	struct dc *dc = adev->dm.dc;
3565 	struct dc_dcc_surface_param input;
3566 	struct dc_surface_dcc_cap output;
3567 	uint32_t offset = AMDGPU_TILING_GET(info, DCC_OFFSET_256B);
3568 	uint32_t i64b = AMDGPU_TILING_GET(info, DCC_INDEPENDENT_64B) != 0;
3569 	uint64_t dcc_address;
3570 
3571 	memset(&input, 0, sizeof(input));
3572 	memset(&output, 0, sizeof(output));
3573 
3574 	if (force_disable_dcc)
3575 		return 0;
3576 
3577 	if (!offset)
3578 		return 0;
3579 
3580 	if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3581 		return 0;
3582 
3583 	if (!dc->cap_funcs.get_dcc_compression_cap)
3584 		return -EINVAL;
3585 
3586 	input.format = format;
3587 	input.surface_size.width = plane_size->surface_size.width;
3588 	input.surface_size.height = plane_size->surface_size.height;
3589 	input.swizzle_mode = tiling_info->gfx9.swizzle;
3590 
3591 	if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
3592 		input.scan = SCAN_DIRECTION_HORIZONTAL;
3593 	else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
3594 		input.scan = SCAN_DIRECTION_VERTICAL;
3595 
3596 	if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
3597 		return -EINVAL;
3598 
3599 	if (!output.capable)
3600 		return -EINVAL;
3601 
3602 	if (i64b == 0 && output.grph.rgb.independent_64b_blks != 0)
3603 		return -EINVAL;
3604 
3605 	dcc->enable = 1;
3606 	dcc->meta_pitch =
3607 		AMDGPU_TILING_GET(info, DCC_PITCH_MAX) + 1;
3608 	dcc->independent_64b_blks = i64b;
3609 
3610 	dcc_address = get_dcc_address(afb->address, info);
3611 	address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
3612 	address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
3613 
3614 	return 0;
3615 }
3616 
3617 static int
3618 fill_plane_buffer_attributes(struct amdgpu_device *adev,
3619 			     const struct amdgpu_framebuffer *afb,
3620 			     const enum surface_pixel_format format,
3621 			     const enum dc_rotation_angle rotation,
3622 			     const uint64_t tiling_flags,
3623 			     union dc_tiling_info *tiling_info,
3624 			     struct plane_size *plane_size,
3625 			     struct dc_plane_dcc_param *dcc,
3626 			     struct dc_plane_address *address,
3627 			     bool tmz_surface,
3628 			     bool force_disable_dcc)
3629 {
3630 	const struct drm_framebuffer *fb = &afb->base;
3631 	int ret;
3632 
3633 	memset(tiling_info, 0, sizeof(*tiling_info));
3634 	memset(plane_size, 0, sizeof(*plane_size));
3635 	memset(dcc, 0, sizeof(*dcc));
3636 	memset(address, 0, sizeof(*address));
3637 
3638 	address->tmz_surface = tmz_surface;
3639 
3640 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
3641 		plane_size->surface_size.x = 0;
3642 		plane_size->surface_size.y = 0;
3643 		plane_size->surface_size.width = fb->width;
3644 		plane_size->surface_size.height = fb->height;
3645 		plane_size->surface_pitch =
3646 			fb->pitches[0] / fb->format->cpp[0];
3647 
3648 		address->type = PLN_ADDR_TYPE_GRAPHICS;
3649 		address->grph.addr.low_part = lower_32_bits(afb->address);
3650 		address->grph.addr.high_part = upper_32_bits(afb->address);
3651 	} else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
3652 		uint64_t chroma_addr = afb->address + fb->offsets[1];
3653 
3654 		plane_size->surface_size.x = 0;
3655 		plane_size->surface_size.y = 0;
3656 		plane_size->surface_size.width = fb->width;
3657 		plane_size->surface_size.height = fb->height;
3658 		plane_size->surface_pitch =
3659 			fb->pitches[0] / fb->format->cpp[0];
3660 
3661 		plane_size->chroma_size.x = 0;
3662 		plane_size->chroma_size.y = 0;
3663 		/* TODO: set these based on surface format */
3664 		plane_size->chroma_size.width = fb->width / 2;
3665 		plane_size->chroma_size.height = fb->height / 2;
3666 
3667 		plane_size->chroma_pitch =
3668 			fb->pitches[1] / fb->format->cpp[1];
3669 
3670 		address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
3671 		address->video_progressive.luma_addr.low_part =
3672 			lower_32_bits(afb->address);
3673 		address->video_progressive.luma_addr.high_part =
3674 			upper_32_bits(afb->address);
3675 		address->video_progressive.chroma_addr.low_part =
3676 			lower_32_bits(chroma_addr);
3677 		address->video_progressive.chroma_addr.high_part =
3678 			upper_32_bits(chroma_addr);
3679 	}
3680 
3681 	/* Fill GFX8 params */
3682 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3683 		unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3684 
3685 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3686 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3687 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3688 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3689 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3690 
3691 		/* XXX fix me for VI */
3692 		tiling_info->gfx8.num_banks = num_banks;
3693 		tiling_info->gfx8.array_mode =
3694 				DC_ARRAY_2D_TILED_THIN1;
3695 		tiling_info->gfx8.tile_split = tile_split;
3696 		tiling_info->gfx8.bank_width = bankw;
3697 		tiling_info->gfx8.bank_height = bankh;
3698 		tiling_info->gfx8.tile_aspect = mtaspect;
3699 		tiling_info->gfx8.tile_mode =
3700 				DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3701 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3702 			== DC_ARRAY_1D_TILED_THIN1) {
3703 		tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3704 	}
3705 
3706 	tiling_info->gfx8.pipe_config =
3707 			AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
3708 
3709 	if (adev->asic_type == CHIP_VEGA10 ||
3710 	    adev->asic_type == CHIP_VEGA12 ||
3711 	    adev->asic_type == CHIP_VEGA20 ||
3712 	    adev->asic_type == CHIP_NAVI10 ||
3713 	    adev->asic_type == CHIP_NAVI14 ||
3714 	    adev->asic_type == CHIP_NAVI12 ||
3715 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3716 		adev->asic_type == CHIP_SIENNA_CICHLID ||
3717 		adev->asic_type == CHIP_NAVY_FLOUNDER ||
3718 #endif
3719 	    adev->asic_type == CHIP_RENOIR ||
3720 	    adev->asic_type == CHIP_RAVEN) {
3721 		/* Fill GFX9 params */
3722 		tiling_info->gfx9.num_pipes =
3723 			adev->gfx.config.gb_addr_config_fields.num_pipes;
3724 		tiling_info->gfx9.num_banks =
3725 			adev->gfx.config.gb_addr_config_fields.num_banks;
3726 		tiling_info->gfx9.pipe_interleave =
3727 			adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3728 		tiling_info->gfx9.num_shader_engines =
3729 			adev->gfx.config.gb_addr_config_fields.num_se;
3730 		tiling_info->gfx9.max_compressed_frags =
3731 			adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3732 		tiling_info->gfx9.num_rb_per_se =
3733 			adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3734 		tiling_info->gfx9.swizzle =
3735 			AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
3736 		tiling_info->gfx9.shaderEnable = 1;
3737 
3738 #ifdef CONFIG_DRM_AMD_DC_DCN3_0
3739 		if (adev->asic_type == CHIP_SIENNA_CICHLID ||
3740 		    adev->asic_type == CHIP_NAVY_FLOUNDER)
3741 			tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
3742 #endif
3743 		ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
3744 						plane_size, tiling_info,
3745 						tiling_flags, dcc, address,
3746 						force_disable_dcc);
3747 		if (ret)
3748 			return ret;
3749 	}
3750 
3751 	return 0;
3752 }
3753 
3754 static void
3755 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
3756 			       bool *per_pixel_alpha, bool *global_alpha,
3757 			       int *global_alpha_value)
3758 {
3759 	*per_pixel_alpha = false;
3760 	*global_alpha = false;
3761 	*global_alpha_value = 0xff;
3762 
3763 	if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
3764 		return;
3765 
3766 	if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
3767 		static const uint32_t alpha_formats[] = {
3768 			DRM_FORMAT_ARGB8888,
3769 			DRM_FORMAT_RGBA8888,
3770 			DRM_FORMAT_ABGR8888,
3771 		};
3772 		uint32_t format = plane_state->fb->format->format;
3773 		unsigned int i;
3774 
3775 		for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
3776 			if (format == alpha_formats[i]) {
3777 				*per_pixel_alpha = true;
3778 				break;
3779 			}
3780 		}
3781 	}
3782 
3783 	if (plane_state->alpha < 0xffff) {
3784 		*global_alpha = true;
3785 		*global_alpha_value = plane_state->alpha >> 8;
3786 	}
3787 }
3788 
3789 static int
3790 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
3791 			    const enum surface_pixel_format format,
3792 			    enum dc_color_space *color_space)
3793 {
3794 	bool full_range;
3795 
3796 	*color_space = COLOR_SPACE_SRGB;
3797 
3798 	/* DRM color properties only affect non-RGB formats. */
3799 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3800 		return 0;
3801 
3802 	full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
3803 
3804 	switch (plane_state->color_encoding) {
3805 	case DRM_COLOR_YCBCR_BT601:
3806 		if (full_range)
3807 			*color_space = COLOR_SPACE_YCBCR601;
3808 		else
3809 			*color_space = COLOR_SPACE_YCBCR601_LIMITED;
3810 		break;
3811 
3812 	case DRM_COLOR_YCBCR_BT709:
3813 		if (full_range)
3814 			*color_space = COLOR_SPACE_YCBCR709;
3815 		else
3816 			*color_space = COLOR_SPACE_YCBCR709_LIMITED;
3817 		break;
3818 
3819 	case DRM_COLOR_YCBCR_BT2020:
3820 		if (full_range)
3821 			*color_space = COLOR_SPACE_2020_YCBCR;
3822 		else
3823 			return -EINVAL;
3824 		break;
3825 
3826 	default:
3827 		return -EINVAL;
3828 	}
3829 
3830 	return 0;
3831 }
3832 
3833 static int
3834 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
3835 			    const struct drm_plane_state *plane_state,
3836 			    const uint64_t tiling_flags,
3837 			    struct dc_plane_info *plane_info,
3838 			    struct dc_plane_address *address,
3839 			    bool tmz_surface,
3840 			    bool force_disable_dcc)
3841 {
3842 	const struct drm_framebuffer *fb = plane_state->fb;
3843 	const struct amdgpu_framebuffer *afb =
3844 		to_amdgpu_framebuffer(plane_state->fb);
3845 	struct drm_format_name_buf format_name;
3846 	int ret;
3847 
3848 	memset(plane_info, 0, sizeof(*plane_info));
3849 
3850 	switch (fb->format->format) {
3851 	case DRM_FORMAT_C8:
3852 		plane_info->format =
3853 			SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
3854 		break;
3855 	case DRM_FORMAT_RGB565:
3856 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
3857 		break;
3858 	case DRM_FORMAT_XRGB8888:
3859 	case DRM_FORMAT_ARGB8888:
3860 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
3861 		break;
3862 	case DRM_FORMAT_XRGB2101010:
3863 	case DRM_FORMAT_ARGB2101010:
3864 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
3865 		break;
3866 	case DRM_FORMAT_XBGR2101010:
3867 	case DRM_FORMAT_ABGR2101010:
3868 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
3869 		break;
3870 	case DRM_FORMAT_XBGR8888:
3871 	case DRM_FORMAT_ABGR8888:
3872 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
3873 		break;
3874 	case DRM_FORMAT_NV21:
3875 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
3876 		break;
3877 	case DRM_FORMAT_NV12:
3878 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
3879 		break;
3880 	case DRM_FORMAT_P010:
3881 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
3882 		break;
3883 	case DRM_FORMAT_XRGB16161616F:
3884 	case DRM_FORMAT_ARGB16161616F:
3885 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
3886 		break;
3887 	case DRM_FORMAT_XBGR16161616F:
3888 	case DRM_FORMAT_ABGR16161616F:
3889 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
3890 		break;
3891 	default:
3892 		DRM_ERROR(
3893 			"Unsupported screen format %s\n",
3894 			drm_get_format_name(fb->format->format, &format_name));
3895 		return -EINVAL;
3896 	}
3897 
3898 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
3899 	case DRM_MODE_ROTATE_0:
3900 		plane_info->rotation = ROTATION_ANGLE_0;
3901 		break;
3902 	case DRM_MODE_ROTATE_90:
3903 		plane_info->rotation = ROTATION_ANGLE_90;
3904 		break;
3905 	case DRM_MODE_ROTATE_180:
3906 		plane_info->rotation = ROTATION_ANGLE_180;
3907 		break;
3908 	case DRM_MODE_ROTATE_270:
3909 		plane_info->rotation = ROTATION_ANGLE_270;
3910 		break;
3911 	default:
3912 		plane_info->rotation = ROTATION_ANGLE_0;
3913 		break;
3914 	}
3915 
3916 	plane_info->visible = true;
3917 	plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
3918 
3919 	plane_info->layer_index = 0;
3920 
3921 	ret = fill_plane_color_attributes(plane_state, plane_info->format,
3922 					  &plane_info->color_space);
3923 	if (ret)
3924 		return ret;
3925 
3926 	ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
3927 					   plane_info->rotation, tiling_flags,
3928 					   &plane_info->tiling_info,
3929 					   &plane_info->plane_size,
3930 					   &plane_info->dcc, address, tmz_surface,
3931 					   force_disable_dcc);
3932 	if (ret)
3933 		return ret;
3934 
3935 	fill_blending_from_plane_state(
3936 		plane_state, &plane_info->per_pixel_alpha,
3937 		&plane_info->global_alpha, &plane_info->global_alpha_value);
3938 
3939 	return 0;
3940 }
3941 
3942 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
3943 				    struct dc_plane_state *dc_plane_state,
3944 				    struct drm_plane_state *plane_state,
3945 				    struct drm_crtc_state *crtc_state)
3946 {
3947 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
3948 	const struct amdgpu_framebuffer *amdgpu_fb =
3949 		to_amdgpu_framebuffer(plane_state->fb);
3950 	struct dc_scaling_info scaling_info;
3951 	struct dc_plane_info plane_info;
3952 	uint64_t tiling_flags;
3953 	int ret;
3954 	bool tmz_surface = false;
3955 	bool force_disable_dcc = false;
3956 
3957 	ret = fill_dc_scaling_info(plane_state, &scaling_info);
3958 	if (ret)
3959 		return ret;
3960 
3961 	dc_plane_state->src_rect = scaling_info.src_rect;
3962 	dc_plane_state->dst_rect = scaling_info.dst_rect;
3963 	dc_plane_state->clip_rect = scaling_info.clip_rect;
3964 	dc_plane_state->scaling_quality = scaling_info.scaling_quality;
3965 
3966 	ret = get_fb_info(amdgpu_fb, &tiling_flags, &tmz_surface);
3967 	if (ret)
3968 		return ret;
3969 
3970 	force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
3971 	ret = fill_dc_plane_info_and_addr(adev, plane_state, tiling_flags,
3972 					  &plane_info,
3973 					  &dc_plane_state->address,
3974 					  tmz_surface,
3975 					  force_disable_dcc);
3976 	if (ret)
3977 		return ret;
3978 
3979 	dc_plane_state->format = plane_info.format;
3980 	dc_plane_state->color_space = plane_info.color_space;
3981 	dc_plane_state->format = plane_info.format;
3982 	dc_plane_state->plane_size = plane_info.plane_size;
3983 	dc_plane_state->rotation = plane_info.rotation;
3984 	dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
3985 	dc_plane_state->stereo_format = plane_info.stereo_format;
3986 	dc_plane_state->tiling_info = plane_info.tiling_info;
3987 	dc_plane_state->visible = plane_info.visible;
3988 	dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
3989 	dc_plane_state->global_alpha = plane_info.global_alpha;
3990 	dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
3991 	dc_plane_state->dcc = plane_info.dcc;
3992 	dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
3993 
3994 	/*
3995 	 * Always set input transfer function, since plane state is refreshed
3996 	 * every time.
3997 	 */
3998 	ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
3999 	if (ret)
4000 		return ret;
4001 
4002 	return 0;
4003 }
4004 
4005 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4006 					   const struct dm_connector_state *dm_state,
4007 					   struct dc_stream_state *stream)
4008 {
4009 	enum amdgpu_rmx_type rmx_type;
4010 
4011 	struct rect src = { 0 }; /* viewport in composition space*/
4012 	struct rect dst = { 0 }; /* stream addressable area */
4013 
4014 	/* no mode. nothing to be done */
4015 	if (!mode)
4016 		return;
4017 
4018 	/* Full screen scaling by default */
4019 	src.width = mode->hdisplay;
4020 	src.height = mode->vdisplay;
4021 	dst.width = stream->timing.h_addressable;
4022 	dst.height = stream->timing.v_addressable;
4023 
4024 	if (dm_state) {
4025 		rmx_type = dm_state->scaling;
4026 		if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4027 			if (src.width * dst.height <
4028 					src.height * dst.width) {
4029 				/* height needs less upscaling/more downscaling */
4030 				dst.width = src.width *
4031 						dst.height / src.height;
4032 			} else {
4033 				/* width needs less upscaling/more downscaling */
4034 				dst.height = src.height *
4035 						dst.width / src.width;
4036 			}
4037 		} else if (rmx_type == RMX_CENTER) {
4038 			dst = src;
4039 		}
4040 
4041 		dst.x = (stream->timing.h_addressable - dst.width) / 2;
4042 		dst.y = (stream->timing.v_addressable - dst.height) / 2;
4043 
4044 		if (dm_state->underscan_enable) {
4045 			dst.x += dm_state->underscan_hborder / 2;
4046 			dst.y += dm_state->underscan_vborder / 2;
4047 			dst.width -= dm_state->underscan_hborder;
4048 			dst.height -= dm_state->underscan_vborder;
4049 		}
4050 	}
4051 
4052 	stream->src = src;
4053 	stream->dst = dst;
4054 
4055 	DRM_DEBUG_DRIVER("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
4056 			dst.x, dst.y, dst.width, dst.height);
4057 
4058 }
4059 
4060 static enum dc_color_depth
4061 convert_color_depth_from_display_info(const struct drm_connector *connector,
4062 				      bool is_y420, int requested_bpc)
4063 {
4064 	uint8_t bpc;
4065 
4066 	if (is_y420) {
4067 		bpc = 8;
4068 
4069 		/* Cap display bpc based on HDMI 2.0 HF-VSDB */
4070 		if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
4071 			bpc = 16;
4072 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
4073 			bpc = 12;
4074 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
4075 			bpc = 10;
4076 	} else {
4077 		bpc = (uint8_t)connector->display_info.bpc;
4078 		/* Assume 8 bpc by default if no bpc is specified. */
4079 		bpc = bpc ? bpc : 8;
4080 	}
4081 
4082 	if (requested_bpc > 0) {
4083 		/*
4084 		 * Cap display bpc based on the user requested value.
4085 		 *
4086 		 * The value for state->max_bpc may not correctly updated
4087 		 * depending on when the connector gets added to the state
4088 		 * or if this was called outside of atomic check, so it
4089 		 * can't be used directly.
4090 		 */
4091 		bpc = min_t(u8, bpc, requested_bpc);
4092 
4093 		/* Round down to the nearest even number. */
4094 		bpc = bpc - (bpc & 1);
4095 	}
4096 
4097 	switch (bpc) {
4098 	case 0:
4099 		/*
4100 		 * Temporary Work around, DRM doesn't parse color depth for
4101 		 * EDID revision before 1.4
4102 		 * TODO: Fix edid parsing
4103 		 */
4104 		return COLOR_DEPTH_888;
4105 	case 6:
4106 		return COLOR_DEPTH_666;
4107 	case 8:
4108 		return COLOR_DEPTH_888;
4109 	case 10:
4110 		return COLOR_DEPTH_101010;
4111 	case 12:
4112 		return COLOR_DEPTH_121212;
4113 	case 14:
4114 		return COLOR_DEPTH_141414;
4115 	case 16:
4116 		return COLOR_DEPTH_161616;
4117 	default:
4118 		return COLOR_DEPTH_UNDEFINED;
4119 	}
4120 }
4121 
4122 static enum dc_aspect_ratio
4123 get_aspect_ratio(const struct drm_display_mode *mode_in)
4124 {
4125 	/* 1-1 mapping, since both enums follow the HDMI spec. */
4126 	return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
4127 }
4128 
4129 static enum dc_color_space
4130 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
4131 {
4132 	enum dc_color_space color_space = COLOR_SPACE_SRGB;
4133 
4134 	switch (dc_crtc_timing->pixel_encoding)	{
4135 	case PIXEL_ENCODING_YCBCR422:
4136 	case PIXEL_ENCODING_YCBCR444:
4137 	case PIXEL_ENCODING_YCBCR420:
4138 	{
4139 		/*
4140 		 * 27030khz is the separation point between HDTV and SDTV
4141 		 * according to HDMI spec, we use YCbCr709 and YCbCr601
4142 		 * respectively
4143 		 */
4144 		if (dc_crtc_timing->pix_clk_100hz > 270300) {
4145 			if (dc_crtc_timing->flags.Y_ONLY)
4146 				color_space =
4147 					COLOR_SPACE_YCBCR709_LIMITED;
4148 			else
4149 				color_space = COLOR_SPACE_YCBCR709;
4150 		} else {
4151 			if (dc_crtc_timing->flags.Y_ONLY)
4152 				color_space =
4153 					COLOR_SPACE_YCBCR601_LIMITED;
4154 			else
4155 				color_space = COLOR_SPACE_YCBCR601;
4156 		}
4157 
4158 	}
4159 	break;
4160 	case PIXEL_ENCODING_RGB:
4161 		color_space = COLOR_SPACE_SRGB;
4162 		break;
4163 
4164 	default:
4165 		WARN_ON(1);
4166 		break;
4167 	}
4168 
4169 	return color_space;
4170 }
4171 
4172 static bool adjust_colour_depth_from_display_info(
4173 	struct dc_crtc_timing *timing_out,
4174 	const struct drm_display_info *info)
4175 {
4176 	enum dc_color_depth depth = timing_out->display_color_depth;
4177 	int normalized_clk;
4178 	do {
4179 		normalized_clk = timing_out->pix_clk_100hz / 10;
4180 		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
4181 		if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
4182 			normalized_clk /= 2;
4183 		/* Adjusting pix clock following on HDMI spec based on colour depth */
4184 		switch (depth) {
4185 		case COLOR_DEPTH_888:
4186 			break;
4187 		case COLOR_DEPTH_101010:
4188 			normalized_clk = (normalized_clk * 30) / 24;
4189 			break;
4190 		case COLOR_DEPTH_121212:
4191 			normalized_clk = (normalized_clk * 36) / 24;
4192 			break;
4193 		case COLOR_DEPTH_161616:
4194 			normalized_clk = (normalized_clk * 48) / 24;
4195 			break;
4196 		default:
4197 			/* The above depths are the only ones valid for HDMI. */
4198 			return false;
4199 		}
4200 		if (normalized_clk <= info->max_tmds_clock) {
4201 			timing_out->display_color_depth = depth;
4202 			return true;
4203 		}
4204 	} while (--depth > COLOR_DEPTH_666);
4205 	return false;
4206 }
4207 
4208 static void fill_stream_properties_from_drm_display_mode(
4209 	struct dc_stream_state *stream,
4210 	const struct drm_display_mode *mode_in,
4211 	const struct drm_connector *connector,
4212 	const struct drm_connector_state *connector_state,
4213 	const struct dc_stream_state *old_stream,
4214 	int requested_bpc)
4215 {
4216 	struct dc_crtc_timing *timing_out = &stream->timing;
4217 	const struct drm_display_info *info = &connector->display_info;
4218 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4219 	struct hdmi_vendor_infoframe hv_frame;
4220 	struct hdmi_avi_infoframe avi_frame;
4221 
4222 	memset(&hv_frame, 0, sizeof(hv_frame));
4223 	memset(&avi_frame, 0, sizeof(avi_frame));
4224 
4225 	timing_out->h_border_left = 0;
4226 	timing_out->h_border_right = 0;
4227 	timing_out->v_border_top = 0;
4228 	timing_out->v_border_bottom = 0;
4229 	/* TODO: un-hardcode */
4230 	if (drm_mode_is_420_only(info, mode_in)
4231 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4232 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4233 	else if (drm_mode_is_420_also(info, mode_in)
4234 			&& aconnector->force_yuv420_output)
4235 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4236 	else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
4237 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4238 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
4239 	else
4240 		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
4241 
4242 	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
4243 	timing_out->display_color_depth = convert_color_depth_from_display_info(
4244 		connector,
4245 		(timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
4246 		requested_bpc);
4247 	timing_out->scan_type = SCANNING_TYPE_NODATA;
4248 	timing_out->hdmi_vic = 0;
4249 
4250 	if(old_stream) {
4251 		timing_out->vic = old_stream->timing.vic;
4252 		timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4253 		timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4254 	} else {
4255 		timing_out->vic = drm_match_cea_mode(mode_in);
4256 		if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4257 			timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4258 		if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4259 			timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4260 	}
4261 
4262 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4263 		drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4264 		timing_out->vic = avi_frame.video_code;
4265 		drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4266 		timing_out->hdmi_vic = hv_frame.vic;
4267 	}
4268 
4269 	timing_out->h_addressable = mode_in->crtc_hdisplay;
4270 	timing_out->h_total = mode_in->crtc_htotal;
4271 	timing_out->h_sync_width =
4272 		mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
4273 	timing_out->h_front_porch =
4274 		mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
4275 	timing_out->v_total = mode_in->crtc_vtotal;
4276 	timing_out->v_addressable = mode_in->crtc_vdisplay;
4277 	timing_out->v_front_porch =
4278 		mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
4279 	timing_out->v_sync_width =
4280 		mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
4281 	timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
4282 	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
4283 
4284 	stream->output_color_space = get_output_color_space(timing_out);
4285 
4286 	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
4287 	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
4288 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4289 		if (!adjust_colour_depth_from_display_info(timing_out, info) &&
4290 		    drm_mode_is_420_also(info, mode_in) &&
4291 		    timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
4292 			timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4293 			adjust_colour_depth_from_display_info(timing_out, info);
4294 		}
4295 	}
4296 }
4297 
4298 static void fill_audio_info(struct audio_info *audio_info,
4299 			    const struct drm_connector *drm_connector,
4300 			    const struct dc_sink *dc_sink)
4301 {
4302 	int i = 0;
4303 	int cea_revision = 0;
4304 	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
4305 
4306 	audio_info->manufacture_id = edid_caps->manufacturer_id;
4307 	audio_info->product_id = edid_caps->product_id;
4308 
4309 	cea_revision = drm_connector->display_info.cea_rev;
4310 
4311 	strscpy(audio_info->display_name,
4312 		edid_caps->display_name,
4313 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
4314 
4315 	if (cea_revision >= 3) {
4316 		audio_info->mode_count = edid_caps->audio_mode_count;
4317 
4318 		for (i = 0; i < audio_info->mode_count; ++i) {
4319 			audio_info->modes[i].format_code =
4320 					(enum audio_format_code)
4321 					(edid_caps->audio_modes[i].format_code);
4322 			audio_info->modes[i].channel_count =
4323 					edid_caps->audio_modes[i].channel_count;
4324 			audio_info->modes[i].sample_rates.all =
4325 					edid_caps->audio_modes[i].sample_rate;
4326 			audio_info->modes[i].sample_size =
4327 					edid_caps->audio_modes[i].sample_size;
4328 		}
4329 	}
4330 
4331 	audio_info->flags.all = edid_caps->speaker_flags;
4332 
4333 	/* TODO: We only check for the progressive mode, check for interlace mode too */
4334 	if (drm_connector->latency_present[0]) {
4335 		audio_info->video_latency = drm_connector->video_latency[0];
4336 		audio_info->audio_latency = drm_connector->audio_latency[0];
4337 	}
4338 
4339 	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
4340 
4341 }
4342 
4343 static void
4344 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
4345 				      struct drm_display_mode *dst_mode)
4346 {
4347 	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
4348 	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
4349 	dst_mode->crtc_clock = src_mode->crtc_clock;
4350 	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
4351 	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
4352 	dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
4353 	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
4354 	dst_mode->crtc_htotal = src_mode->crtc_htotal;
4355 	dst_mode->crtc_hskew = src_mode->crtc_hskew;
4356 	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
4357 	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
4358 	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
4359 	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
4360 	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
4361 }
4362 
4363 static void
4364 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
4365 					const struct drm_display_mode *native_mode,
4366 					bool scale_enabled)
4367 {
4368 	if (scale_enabled) {
4369 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4370 	} else if (native_mode->clock == drm_mode->clock &&
4371 			native_mode->htotal == drm_mode->htotal &&
4372 			native_mode->vtotal == drm_mode->vtotal) {
4373 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4374 	} else {
4375 		/* no scaling nor amdgpu inserted, no need to patch */
4376 	}
4377 }
4378 
4379 static struct dc_sink *
4380 create_fake_sink(struct amdgpu_dm_connector *aconnector)
4381 {
4382 	struct dc_sink_init_data sink_init_data = { 0 };
4383 	struct dc_sink *sink = NULL;
4384 	sink_init_data.link = aconnector->dc_link;
4385 	sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
4386 
4387 	sink = dc_sink_create(&sink_init_data);
4388 	if (!sink) {
4389 		DRM_ERROR("Failed to create sink!\n");
4390 		return NULL;
4391 	}
4392 	sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
4393 
4394 	return sink;
4395 }
4396 
4397 static void set_multisync_trigger_params(
4398 		struct dc_stream_state *stream)
4399 {
4400 	if (stream->triggered_crtc_reset.enabled) {
4401 		stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
4402 		stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
4403 	}
4404 }
4405 
4406 static void set_master_stream(struct dc_stream_state *stream_set[],
4407 			      int stream_count)
4408 {
4409 	int j, highest_rfr = 0, master_stream = 0;
4410 
4411 	for (j = 0;  j < stream_count; j++) {
4412 		if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
4413 			int refresh_rate = 0;
4414 
4415 			refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
4416 				(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
4417 			if (refresh_rate > highest_rfr) {
4418 				highest_rfr = refresh_rate;
4419 				master_stream = j;
4420 			}
4421 		}
4422 	}
4423 	for (j = 0;  j < stream_count; j++) {
4424 		if (stream_set[j])
4425 			stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
4426 	}
4427 }
4428 
4429 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
4430 {
4431 	int i = 0;
4432 
4433 	if (context->stream_count < 2)
4434 		return;
4435 	for (i = 0; i < context->stream_count ; i++) {
4436 		if (!context->streams[i])
4437 			continue;
4438 		/*
4439 		 * TODO: add a function to read AMD VSDB bits and set
4440 		 * crtc_sync_master.multi_sync_enabled flag
4441 		 * For now it's set to false
4442 		 */
4443 		set_multisync_trigger_params(context->streams[i]);
4444 	}
4445 	set_master_stream(context->streams, context->stream_count);
4446 }
4447 
4448 static struct dc_stream_state *
4449 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
4450 		       const struct drm_display_mode *drm_mode,
4451 		       const struct dm_connector_state *dm_state,
4452 		       const struct dc_stream_state *old_stream,
4453 		       int requested_bpc)
4454 {
4455 	struct drm_display_mode *preferred_mode = NULL;
4456 	struct drm_connector *drm_connector;
4457 	const struct drm_connector_state *con_state =
4458 		dm_state ? &dm_state->base : NULL;
4459 	struct dc_stream_state *stream = NULL;
4460 	struct drm_display_mode mode = *drm_mode;
4461 	bool native_mode_found = false;
4462 	bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
4463 	int mode_refresh;
4464 	int preferred_refresh = 0;
4465 #if defined(CONFIG_DRM_AMD_DC_DCN)
4466 	struct dsc_dec_dpcd_caps dsc_caps;
4467 #endif
4468 	uint32_t link_bandwidth_kbps;
4469 
4470 	struct dc_sink *sink = NULL;
4471 	if (aconnector == NULL) {
4472 		DRM_ERROR("aconnector is NULL!\n");
4473 		return stream;
4474 	}
4475 
4476 	drm_connector = &aconnector->base;
4477 
4478 	if (!aconnector->dc_sink) {
4479 		sink = create_fake_sink(aconnector);
4480 		if (!sink)
4481 			return stream;
4482 	} else {
4483 		sink = aconnector->dc_sink;
4484 		dc_sink_retain(sink);
4485 	}
4486 
4487 	stream = dc_create_stream_for_sink(sink);
4488 
4489 	if (stream == NULL) {
4490 		DRM_ERROR("Failed to create stream for sink!\n");
4491 		goto finish;
4492 	}
4493 
4494 	stream->dm_stream_context = aconnector;
4495 
4496 	stream->timing.flags.LTE_340MCSC_SCRAMBLE =
4497 		drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
4498 
4499 	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
4500 		/* Search for preferred mode */
4501 		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
4502 			native_mode_found = true;
4503 			break;
4504 		}
4505 	}
4506 	if (!native_mode_found)
4507 		preferred_mode = list_first_entry_or_null(
4508 				&aconnector->base.modes,
4509 				struct drm_display_mode,
4510 				head);
4511 
4512 	mode_refresh = drm_mode_vrefresh(&mode);
4513 
4514 	if (preferred_mode == NULL) {
4515 		/*
4516 		 * This may not be an error, the use case is when we have no
4517 		 * usermode calls to reset and set mode upon hotplug. In this
4518 		 * case, we call set mode ourselves to restore the previous mode
4519 		 * and the modelist may not be filled in in time.
4520 		 */
4521 		DRM_DEBUG_DRIVER("No preferred mode found\n");
4522 	} else {
4523 		decide_crtc_timing_for_drm_display_mode(
4524 				&mode, preferred_mode,
4525 				dm_state ? (dm_state->scaling != RMX_OFF) : false);
4526 		preferred_refresh = drm_mode_vrefresh(preferred_mode);
4527 	}
4528 
4529 	if (!dm_state)
4530 		drm_mode_set_crtcinfo(&mode, 0);
4531 
4532 	/*
4533 	* If scaling is enabled and refresh rate didn't change
4534 	* we copy the vic and polarities of the old timings
4535 	*/
4536 	if (!scale || mode_refresh != preferred_refresh)
4537 		fill_stream_properties_from_drm_display_mode(stream,
4538 			&mode, &aconnector->base, con_state, NULL, requested_bpc);
4539 	else
4540 		fill_stream_properties_from_drm_display_mode(stream,
4541 			&mode, &aconnector->base, con_state, old_stream, requested_bpc);
4542 
4543 	stream->timing.flags.DSC = 0;
4544 
4545 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
4546 #if defined(CONFIG_DRM_AMD_DC_DCN)
4547 		dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
4548 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
4549 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_ext_caps.raw,
4550 				      &dsc_caps);
4551 #endif
4552 		link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
4553 							     dc_link_get_link_cap(aconnector->dc_link));
4554 
4555 #if defined(CONFIG_DRM_AMD_DC_DCN)
4556 		if (dsc_caps.is_dsc_supported)
4557 			if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
4558 						  &dsc_caps,
4559 						  aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
4560 						  link_bandwidth_kbps,
4561 						  &stream->timing,
4562 						  &stream->timing.dsc_cfg))
4563 				stream->timing.flags.DSC = 1;
4564 #endif
4565 	}
4566 
4567 	update_stream_scaling_settings(&mode, dm_state, stream);
4568 
4569 	fill_audio_info(
4570 		&stream->audio_info,
4571 		drm_connector,
4572 		sink);
4573 
4574 	update_stream_signal(stream, sink);
4575 
4576 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4577 		mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket, false, false);
4578 	if (stream->link->psr_settings.psr_feature_enabled) {
4579 		//
4580 		// should decide stream support vsc sdp colorimetry capability
4581 		// before building vsc info packet
4582 		//
4583 		stream->use_vsc_sdp_for_colorimetry = false;
4584 		if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
4585 			stream->use_vsc_sdp_for_colorimetry =
4586 				aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
4587 		} else {
4588 			if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
4589 				stream->use_vsc_sdp_for_colorimetry = true;
4590 		}
4591 		mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
4592 	}
4593 finish:
4594 	dc_sink_release(sink);
4595 
4596 	return stream;
4597 }
4598 
4599 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
4600 {
4601 	drm_crtc_cleanup(crtc);
4602 	kfree(crtc);
4603 }
4604 
4605 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
4606 				  struct drm_crtc_state *state)
4607 {
4608 	struct dm_crtc_state *cur = to_dm_crtc_state(state);
4609 
4610 	/* TODO Destroy dc_stream objects are stream object is flattened */
4611 	if (cur->stream)
4612 		dc_stream_release(cur->stream);
4613 
4614 
4615 	__drm_atomic_helper_crtc_destroy_state(state);
4616 
4617 
4618 	kfree(state);
4619 }
4620 
4621 static void dm_crtc_reset_state(struct drm_crtc *crtc)
4622 {
4623 	struct dm_crtc_state *state;
4624 
4625 	if (crtc->state)
4626 		dm_crtc_destroy_state(crtc, crtc->state);
4627 
4628 	state = kzalloc(sizeof(*state), GFP_KERNEL);
4629 	if (WARN_ON(!state))
4630 		return;
4631 
4632 	__drm_atomic_helper_crtc_reset(crtc, &state->base);
4633 }
4634 
4635 static struct drm_crtc_state *
4636 dm_crtc_duplicate_state(struct drm_crtc *crtc)
4637 {
4638 	struct dm_crtc_state *state, *cur;
4639 
4640 	cur = to_dm_crtc_state(crtc->state);
4641 
4642 	if (WARN_ON(!crtc->state))
4643 		return NULL;
4644 
4645 	state = kzalloc(sizeof(*state), GFP_KERNEL);
4646 	if (!state)
4647 		return NULL;
4648 
4649 	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
4650 
4651 	if (cur->stream) {
4652 		state->stream = cur->stream;
4653 		dc_stream_retain(state->stream);
4654 	}
4655 
4656 	state->active_planes = cur->active_planes;
4657 	state->vrr_params = cur->vrr_params;
4658 	state->vrr_infopacket = cur->vrr_infopacket;
4659 	state->abm_level = cur->abm_level;
4660 	state->vrr_supported = cur->vrr_supported;
4661 	state->freesync_config = cur->freesync_config;
4662 	state->crc_src = cur->crc_src;
4663 	state->cm_has_degamma = cur->cm_has_degamma;
4664 	state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
4665 
4666 	/* TODO Duplicate dc_stream after objects are stream object is flattened */
4667 
4668 	return &state->base;
4669 }
4670 
4671 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
4672 {
4673 	enum dc_irq_source irq_source;
4674 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4675 	struct amdgpu_device *adev = crtc->dev->dev_private;
4676 	int rc;
4677 
4678 	irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
4679 
4680 	rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4681 
4682 	DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
4683 			 acrtc->crtc_id, enable ? "en" : "dis", rc);
4684 	return rc;
4685 }
4686 
4687 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
4688 {
4689 	enum dc_irq_source irq_source;
4690 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4691 	struct amdgpu_device *adev = crtc->dev->dev_private;
4692 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
4693 	int rc = 0;
4694 
4695 	if (enable) {
4696 		/* vblank irq on -> Only need vupdate irq in vrr mode */
4697 		if (amdgpu_dm_vrr_active(acrtc_state))
4698 			rc = dm_set_vupdate_irq(crtc, true);
4699 	} else {
4700 		/* vblank irq off -> vupdate irq off */
4701 		rc = dm_set_vupdate_irq(crtc, false);
4702 	}
4703 
4704 	if (rc)
4705 		return rc;
4706 
4707 	irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
4708 	return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4709 }
4710 
4711 static int dm_enable_vblank(struct drm_crtc *crtc)
4712 {
4713 	return dm_set_vblank(crtc, true);
4714 }
4715 
4716 static void dm_disable_vblank(struct drm_crtc *crtc)
4717 {
4718 	dm_set_vblank(crtc, false);
4719 }
4720 
4721 /* Implemented only the options currently availible for the driver */
4722 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
4723 	.reset = dm_crtc_reset_state,
4724 	.destroy = amdgpu_dm_crtc_destroy,
4725 	.gamma_set = drm_atomic_helper_legacy_gamma_set,
4726 	.set_config = drm_atomic_helper_set_config,
4727 	.page_flip = drm_atomic_helper_page_flip,
4728 	.atomic_duplicate_state = dm_crtc_duplicate_state,
4729 	.atomic_destroy_state = dm_crtc_destroy_state,
4730 	.set_crc_source = amdgpu_dm_crtc_set_crc_source,
4731 	.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
4732 	.get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
4733 	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
4734 	.enable_vblank = dm_enable_vblank,
4735 	.disable_vblank = dm_disable_vblank,
4736 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
4737 };
4738 
4739 static enum drm_connector_status
4740 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
4741 {
4742 	bool connected;
4743 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4744 
4745 	/*
4746 	 * Notes:
4747 	 * 1. This interface is NOT called in context of HPD irq.
4748 	 * 2. This interface *is called* in context of user-mode ioctl. Which
4749 	 * makes it a bad place for *any* MST-related activity.
4750 	 */
4751 
4752 	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
4753 	    !aconnector->fake_enable)
4754 		connected = (aconnector->dc_sink != NULL);
4755 	else
4756 		connected = (aconnector->base.force == DRM_FORCE_ON);
4757 
4758 	return (connected ? connector_status_connected :
4759 			connector_status_disconnected);
4760 }
4761 
4762 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
4763 					    struct drm_connector_state *connector_state,
4764 					    struct drm_property *property,
4765 					    uint64_t val)
4766 {
4767 	struct drm_device *dev = connector->dev;
4768 	struct amdgpu_device *adev = dev->dev_private;
4769 	struct dm_connector_state *dm_old_state =
4770 		to_dm_connector_state(connector->state);
4771 	struct dm_connector_state *dm_new_state =
4772 		to_dm_connector_state(connector_state);
4773 
4774 	int ret = -EINVAL;
4775 
4776 	if (property == dev->mode_config.scaling_mode_property) {
4777 		enum amdgpu_rmx_type rmx_type;
4778 
4779 		switch (val) {
4780 		case DRM_MODE_SCALE_CENTER:
4781 			rmx_type = RMX_CENTER;
4782 			break;
4783 		case DRM_MODE_SCALE_ASPECT:
4784 			rmx_type = RMX_ASPECT;
4785 			break;
4786 		case DRM_MODE_SCALE_FULLSCREEN:
4787 			rmx_type = RMX_FULL;
4788 			break;
4789 		case DRM_MODE_SCALE_NONE:
4790 		default:
4791 			rmx_type = RMX_OFF;
4792 			break;
4793 		}
4794 
4795 		if (dm_old_state->scaling == rmx_type)
4796 			return 0;
4797 
4798 		dm_new_state->scaling = rmx_type;
4799 		ret = 0;
4800 	} else if (property == adev->mode_info.underscan_hborder_property) {
4801 		dm_new_state->underscan_hborder = val;
4802 		ret = 0;
4803 	} else if (property == adev->mode_info.underscan_vborder_property) {
4804 		dm_new_state->underscan_vborder = val;
4805 		ret = 0;
4806 	} else if (property == adev->mode_info.underscan_property) {
4807 		dm_new_state->underscan_enable = val;
4808 		ret = 0;
4809 	} else if (property == adev->mode_info.abm_level_property) {
4810 		dm_new_state->abm_level = val;
4811 		ret = 0;
4812 	}
4813 
4814 	return ret;
4815 }
4816 
4817 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
4818 					    const struct drm_connector_state *state,
4819 					    struct drm_property *property,
4820 					    uint64_t *val)
4821 {
4822 	struct drm_device *dev = connector->dev;
4823 	struct amdgpu_device *adev = dev->dev_private;
4824 	struct dm_connector_state *dm_state =
4825 		to_dm_connector_state(state);
4826 	int ret = -EINVAL;
4827 
4828 	if (property == dev->mode_config.scaling_mode_property) {
4829 		switch (dm_state->scaling) {
4830 		case RMX_CENTER:
4831 			*val = DRM_MODE_SCALE_CENTER;
4832 			break;
4833 		case RMX_ASPECT:
4834 			*val = DRM_MODE_SCALE_ASPECT;
4835 			break;
4836 		case RMX_FULL:
4837 			*val = DRM_MODE_SCALE_FULLSCREEN;
4838 			break;
4839 		case RMX_OFF:
4840 		default:
4841 			*val = DRM_MODE_SCALE_NONE;
4842 			break;
4843 		}
4844 		ret = 0;
4845 	} else if (property == adev->mode_info.underscan_hborder_property) {
4846 		*val = dm_state->underscan_hborder;
4847 		ret = 0;
4848 	} else if (property == adev->mode_info.underscan_vborder_property) {
4849 		*val = dm_state->underscan_vborder;
4850 		ret = 0;
4851 	} else if (property == adev->mode_info.underscan_property) {
4852 		*val = dm_state->underscan_enable;
4853 		ret = 0;
4854 	} else if (property == adev->mode_info.abm_level_property) {
4855 		*val = dm_state->abm_level;
4856 		ret = 0;
4857 	}
4858 
4859 	return ret;
4860 }
4861 
4862 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
4863 {
4864 	struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
4865 
4866 	drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
4867 }
4868 
4869 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
4870 {
4871 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4872 	const struct dc_link *link = aconnector->dc_link;
4873 	struct amdgpu_device *adev = connector->dev->dev_private;
4874 	struct amdgpu_display_manager *dm = &adev->dm;
4875 
4876 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4877 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4878 
4879 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4880 	    link->type != dc_connection_none &&
4881 	    dm->backlight_dev) {
4882 		backlight_device_unregister(dm->backlight_dev);
4883 		dm->backlight_dev = NULL;
4884 	}
4885 #endif
4886 
4887 	if (aconnector->dc_em_sink)
4888 		dc_sink_release(aconnector->dc_em_sink);
4889 	aconnector->dc_em_sink = NULL;
4890 	if (aconnector->dc_sink)
4891 		dc_sink_release(aconnector->dc_sink);
4892 	aconnector->dc_sink = NULL;
4893 
4894 	drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
4895 	drm_connector_unregister(connector);
4896 	drm_connector_cleanup(connector);
4897 	if (aconnector->i2c) {
4898 		i2c_del_adapter(&aconnector->i2c->base);
4899 		kfree(aconnector->i2c);
4900 	}
4901 	kfree(aconnector->dm_dp_aux.aux.name);
4902 
4903 	kfree(connector);
4904 }
4905 
4906 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
4907 {
4908 	struct dm_connector_state *state =
4909 		to_dm_connector_state(connector->state);
4910 
4911 	if (connector->state)
4912 		__drm_atomic_helper_connector_destroy_state(connector->state);
4913 
4914 	kfree(state);
4915 
4916 	state = kzalloc(sizeof(*state), GFP_KERNEL);
4917 
4918 	if (state) {
4919 		state->scaling = RMX_OFF;
4920 		state->underscan_enable = false;
4921 		state->underscan_hborder = 0;
4922 		state->underscan_vborder = 0;
4923 		state->base.max_requested_bpc = 8;
4924 		state->vcpi_slots = 0;
4925 		state->pbn = 0;
4926 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4927 			state->abm_level = amdgpu_dm_abm_level;
4928 
4929 		__drm_atomic_helper_connector_reset(connector, &state->base);
4930 	}
4931 }
4932 
4933 struct drm_connector_state *
4934 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
4935 {
4936 	struct dm_connector_state *state =
4937 		to_dm_connector_state(connector->state);
4938 
4939 	struct dm_connector_state *new_state =
4940 			kmemdup(state, sizeof(*state), GFP_KERNEL);
4941 
4942 	if (!new_state)
4943 		return NULL;
4944 
4945 	__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
4946 
4947 	new_state->freesync_capable = state->freesync_capable;
4948 	new_state->abm_level = state->abm_level;
4949 	new_state->scaling = state->scaling;
4950 	new_state->underscan_enable = state->underscan_enable;
4951 	new_state->underscan_hborder = state->underscan_hborder;
4952 	new_state->underscan_vborder = state->underscan_vborder;
4953 	new_state->vcpi_slots = state->vcpi_slots;
4954 	new_state->pbn = state->pbn;
4955 	return &new_state->base;
4956 }
4957 
4958 static int
4959 amdgpu_dm_connector_late_register(struct drm_connector *connector)
4960 {
4961 	struct amdgpu_dm_connector *amdgpu_dm_connector =
4962 		to_amdgpu_dm_connector(connector);
4963 	int r;
4964 
4965 	if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
4966 	    (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
4967 		amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
4968 		r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
4969 		if (r)
4970 			return r;
4971 	}
4972 
4973 #if defined(CONFIG_DEBUG_FS)
4974 	connector_debugfs_init(amdgpu_dm_connector);
4975 #endif
4976 
4977 	return 0;
4978 }
4979 
4980 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
4981 	.reset = amdgpu_dm_connector_funcs_reset,
4982 	.detect = amdgpu_dm_connector_detect,
4983 	.fill_modes = drm_helper_probe_single_connector_modes,
4984 	.destroy = amdgpu_dm_connector_destroy,
4985 	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
4986 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4987 	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
4988 	.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
4989 	.late_register = amdgpu_dm_connector_late_register,
4990 	.early_unregister = amdgpu_dm_connector_unregister
4991 };
4992 
4993 static int get_modes(struct drm_connector *connector)
4994 {
4995 	return amdgpu_dm_connector_get_modes(connector);
4996 }
4997 
4998 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
4999 {
5000 	struct dc_sink_init_data init_params = {
5001 			.link = aconnector->dc_link,
5002 			.sink_signal = SIGNAL_TYPE_VIRTUAL
5003 	};
5004 	struct edid *edid;
5005 
5006 	if (!aconnector->base.edid_blob_ptr) {
5007 		DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
5008 				aconnector->base.name);
5009 
5010 		aconnector->base.force = DRM_FORCE_OFF;
5011 		aconnector->base.override_edid = false;
5012 		return;
5013 	}
5014 
5015 	edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
5016 
5017 	aconnector->edid = edid;
5018 
5019 	aconnector->dc_em_sink = dc_link_add_remote_sink(
5020 		aconnector->dc_link,
5021 		(uint8_t *)edid,
5022 		(edid->extensions + 1) * EDID_LENGTH,
5023 		&init_params);
5024 
5025 	if (aconnector->base.force == DRM_FORCE_ON) {
5026 		aconnector->dc_sink = aconnector->dc_link->local_sink ?
5027 		aconnector->dc_link->local_sink :
5028 		aconnector->dc_em_sink;
5029 		dc_sink_retain(aconnector->dc_sink);
5030 	}
5031 }
5032 
5033 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
5034 {
5035 	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
5036 
5037 	/*
5038 	 * In case of headless boot with force on for DP managed connector
5039 	 * Those settings have to be != 0 to get initial modeset
5040 	 */
5041 	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5042 		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
5043 		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
5044 	}
5045 
5046 
5047 	aconnector->base.override_edid = true;
5048 	create_eml_sink(aconnector);
5049 }
5050 
5051 static struct dc_stream_state *
5052 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5053 				const struct drm_display_mode *drm_mode,
5054 				const struct dm_connector_state *dm_state,
5055 				const struct dc_stream_state *old_stream)
5056 {
5057 	struct drm_connector *connector = &aconnector->base;
5058 	struct amdgpu_device *adev = connector->dev->dev_private;
5059 	struct dc_stream_state *stream;
5060 	const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
5061 	int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
5062 	enum dc_status dc_result = DC_OK;
5063 
5064 	do {
5065 		stream = create_stream_for_sink(aconnector, drm_mode,
5066 						dm_state, old_stream,
5067 						requested_bpc);
5068 		if (stream == NULL) {
5069 			DRM_ERROR("Failed to create stream for sink!\n");
5070 			break;
5071 		}
5072 
5073 		dc_result = dc_validate_stream(adev->dm.dc, stream);
5074 
5075 		if (dc_result != DC_OK) {
5076 			DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
5077 				      drm_mode->hdisplay,
5078 				      drm_mode->vdisplay,
5079 				      drm_mode->clock,
5080 				      dc_result,
5081 				      dc_status_to_str(dc_result));
5082 
5083 			dc_stream_release(stream);
5084 			stream = NULL;
5085 			requested_bpc -= 2; /* lower bpc to retry validation */
5086 		}
5087 
5088 	} while (stream == NULL && requested_bpc >= 6);
5089 
5090 	return stream;
5091 }
5092 
5093 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
5094 				   struct drm_display_mode *mode)
5095 {
5096 	int result = MODE_ERROR;
5097 	struct dc_sink *dc_sink;
5098 	/* TODO: Unhardcode stream count */
5099 	struct dc_stream_state *stream;
5100 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5101 
5102 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
5103 			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
5104 		return result;
5105 
5106 	/*
5107 	 * Only run this the first time mode_valid is called to initilialize
5108 	 * EDID mgmt
5109 	 */
5110 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
5111 		!aconnector->dc_em_sink)
5112 		handle_edid_mgmt(aconnector);
5113 
5114 	dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
5115 
5116 	if (dc_sink == NULL) {
5117 		DRM_ERROR("dc_sink is NULL!\n");
5118 		goto fail;
5119 	}
5120 
5121 	stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
5122 	if (stream) {
5123 		dc_stream_release(stream);
5124 		result = MODE_OK;
5125 	}
5126 
5127 fail:
5128 	/* TODO: error handling*/
5129 	return result;
5130 }
5131 
5132 static int fill_hdr_info_packet(const struct drm_connector_state *state,
5133 				struct dc_info_packet *out)
5134 {
5135 	struct hdmi_drm_infoframe frame;
5136 	unsigned char buf[30]; /* 26 + 4 */
5137 	ssize_t len;
5138 	int ret, i;
5139 
5140 	memset(out, 0, sizeof(*out));
5141 
5142 	if (!state->hdr_output_metadata)
5143 		return 0;
5144 
5145 	ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
5146 	if (ret)
5147 		return ret;
5148 
5149 	len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
5150 	if (len < 0)
5151 		return (int)len;
5152 
5153 	/* Static metadata is a fixed 26 bytes + 4 byte header. */
5154 	if (len != 30)
5155 		return -EINVAL;
5156 
5157 	/* Prepare the infopacket for DC. */
5158 	switch (state->connector->connector_type) {
5159 	case DRM_MODE_CONNECTOR_HDMIA:
5160 		out->hb0 = 0x87; /* type */
5161 		out->hb1 = 0x01; /* version */
5162 		out->hb2 = 0x1A; /* length */
5163 		out->sb[0] = buf[3]; /* checksum */
5164 		i = 1;
5165 		break;
5166 
5167 	case DRM_MODE_CONNECTOR_DisplayPort:
5168 	case DRM_MODE_CONNECTOR_eDP:
5169 		out->hb0 = 0x00; /* sdp id, zero */
5170 		out->hb1 = 0x87; /* type */
5171 		out->hb2 = 0x1D; /* payload len - 1 */
5172 		out->hb3 = (0x13 << 2); /* sdp version */
5173 		out->sb[0] = 0x01; /* version */
5174 		out->sb[1] = 0x1A; /* length */
5175 		i = 2;
5176 		break;
5177 
5178 	default:
5179 		return -EINVAL;
5180 	}
5181 
5182 	memcpy(&out->sb[i], &buf[4], 26);
5183 	out->valid = true;
5184 
5185 	print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
5186 		       sizeof(out->sb), false);
5187 
5188 	return 0;
5189 }
5190 
5191 static bool
5192 is_hdr_metadata_different(const struct drm_connector_state *old_state,
5193 			  const struct drm_connector_state *new_state)
5194 {
5195 	struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
5196 	struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
5197 
5198 	if (old_blob != new_blob) {
5199 		if (old_blob && new_blob &&
5200 		    old_blob->length == new_blob->length)
5201 			return memcmp(old_blob->data, new_blob->data,
5202 				      old_blob->length);
5203 
5204 		return true;
5205 	}
5206 
5207 	return false;
5208 }
5209 
5210 static int
5211 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
5212 				 struct drm_atomic_state *state)
5213 {
5214 	struct drm_connector_state *new_con_state =
5215 		drm_atomic_get_new_connector_state(state, conn);
5216 	struct drm_connector_state *old_con_state =
5217 		drm_atomic_get_old_connector_state(state, conn);
5218 	struct drm_crtc *crtc = new_con_state->crtc;
5219 	struct drm_crtc_state *new_crtc_state;
5220 	int ret;
5221 
5222 	if (!crtc)
5223 		return 0;
5224 
5225 	if (is_hdr_metadata_different(old_con_state, new_con_state)) {
5226 		struct dc_info_packet hdr_infopacket;
5227 
5228 		ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
5229 		if (ret)
5230 			return ret;
5231 
5232 		new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
5233 		if (IS_ERR(new_crtc_state))
5234 			return PTR_ERR(new_crtc_state);
5235 
5236 		/*
5237 		 * DC considers the stream backends changed if the
5238 		 * static metadata changes. Forcing the modeset also
5239 		 * gives a simple way for userspace to switch from
5240 		 * 8bpc to 10bpc when setting the metadata to enter
5241 		 * or exit HDR.
5242 		 *
5243 		 * Changing the static metadata after it's been
5244 		 * set is permissible, however. So only force a
5245 		 * modeset if we're entering or exiting HDR.
5246 		 */
5247 		new_crtc_state->mode_changed =
5248 			!old_con_state->hdr_output_metadata ||
5249 			!new_con_state->hdr_output_metadata;
5250 	}
5251 
5252 	return 0;
5253 }
5254 
5255 static const struct drm_connector_helper_funcs
5256 amdgpu_dm_connector_helper_funcs = {
5257 	/*
5258 	 * If hotplugging a second bigger display in FB Con mode, bigger resolution
5259 	 * modes will be filtered by drm_mode_validate_size(), and those modes
5260 	 * are missing after user start lightdm. So we need to renew modes list.
5261 	 * in get_modes call back, not just return the modes count
5262 	 */
5263 	.get_modes = get_modes,
5264 	.mode_valid = amdgpu_dm_connector_mode_valid,
5265 	.atomic_check = amdgpu_dm_connector_atomic_check,
5266 };
5267 
5268 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
5269 {
5270 }
5271 
5272 static bool does_crtc_have_active_cursor(struct drm_crtc_state *new_crtc_state)
5273 {
5274 	struct drm_device *dev = new_crtc_state->crtc->dev;
5275 	struct drm_plane *plane;
5276 
5277 	drm_for_each_plane_mask(plane, dev, new_crtc_state->plane_mask) {
5278 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
5279 			return true;
5280 	}
5281 
5282 	return false;
5283 }
5284 
5285 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
5286 {
5287 	struct drm_atomic_state *state = new_crtc_state->state;
5288 	struct drm_plane *plane;
5289 	int num_active = 0;
5290 
5291 	drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
5292 		struct drm_plane_state *new_plane_state;
5293 
5294 		/* Cursor planes are "fake". */
5295 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
5296 			continue;
5297 
5298 		new_plane_state = drm_atomic_get_new_plane_state(state, plane);
5299 
5300 		if (!new_plane_state) {
5301 			/*
5302 			 * The plane is enable on the CRTC and hasn't changed
5303 			 * state. This means that it previously passed
5304 			 * validation and is therefore enabled.
5305 			 */
5306 			num_active += 1;
5307 			continue;
5308 		}
5309 
5310 		/* We need a framebuffer to be considered enabled. */
5311 		num_active += (new_plane_state->fb != NULL);
5312 	}
5313 
5314 	return num_active;
5315 }
5316 
5317 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
5318 					 struct drm_crtc_state *new_crtc_state)
5319 {
5320 	struct dm_crtc_state *dm_new_crtc_state =
5321 		to_dm_crtc_state(new_crtc_state);
5322 
5323 	dm_new_crtc_state->active_planes = 0;
5324 
5325 	if (!dm_new_crtc_state->stream)
5326 		return;
5327 
5328 	dm_new_crtc_state->active_planes =
5329 		count_crtc_active_planes(new_crtc_state);
5330 }
5331 
5332 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
5333 				       struct drm_crtc_state *state)
5334 {
5335 	struct amdgpu_device *adev = crtc->dev->dev_private;
5336 	struct dc *dc = adev->dm.dc;
5337 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
5338 	int ret = -EINVAL;
5339 
5340 	dm_update_crtc_active_planes(crtc, state);
5341 
5342 	if (unlikely(!dm_crtc_state->stream &&
5343 		     modeset_required(state, NULL, dm_crtc_state->stream))) {
5344 		WARN_ON(1);
5345 		return ret;
5346 	}
5347 
5348 	/* In some use cases, like reset, no stream is attached */
5349 	if (!dm_crtc_state->stream)
5350 		return 0;
5351 
5352 	/*
5353 	 * We want at least one hardware plane enabled to use
5354 	 * the stream with a cursor enabled.
5355 	 */
5356 	if (state->enable && state->active &&
5357 	    does_crtc_have_active_cursor(state) &&
5358 	    dm_crtc_state->active_planes == 0)
5359 		return -EINVAL;
5360 
5361 	if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
5362 		return 0;
5363 
5364 	return ret;
5365 }
5366 
5367 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
5368 				      const struct drm_display_mode *mode,
5369 				      struct drm_display_mode *adjusted_mode)
5370 {
5371 	return true;
5372 }
5373 
5374 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
5375 	.disable = dm_crtc_helper_disable,
5376 	.atomic_check = dm_crtc_helper_atomic_check,
5377 	.mode_fixup = dm_crtc_helper_mode_fixup,
5378 	.get_scanout_position = amdgpu_crtc_get_scanout_position,
5379 };
5380 
5381 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
5382 {
5383 
5384 }
5385 
5386 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
5387 {
5388 	switch (display_color_depth) {
5389 		case COLOR_DEPTH_666:
5390 			return 6;
5391 		case COLOR_DEPTH_888:
5392 			return 8;
5393 		case COLOR_DEPTH_101010:
5394 			return 10;
5395 		case COLOR_DEPTH_121212:
5396 			return 12;
5397 		case COLOR_DEPTH_141414:
5398 			return 14;
5399 		case COLOR_DEPTH_161616:
5400 			return 16;
5401 		default:
5402 			break;
5403 		}
5404 	return 0;
5405 }
5406 
5407 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
5408 					  struct drm_crtc_state *crtc_state,
5409 					  struct drm_connector_state *conn_state)
5410 {
5411 	struct drm_atomic_state *state = crtc_state->state;
5412 	struct drm_connector *connector = conn_state->connector;
5413 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5414 	struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
5415 	const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
5416 	struct drm_dp_mst_topology_mgr *mst_mgr;
5417 	struct drm_dp_mst_port *mst_port;
5418 	enum dc_color_depth color_depth;
5419 	int clock, bpp = 0;
5420 	bool is_y420 = false;
5421 
5422 	if (!aconnector->port || !aconnector->dc_sink)
5423 		return 0;
5424 
5425 	mst_port = aconnector->port;
5426 	mst_mgr = &aconnector->mst_port->mst_mgr;
5427 
5428 	if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
5429 		return 0;
5430 
5431 	if (!state->duplicated) {
5432 		int max_bpc = conn_state->max_requested_bpc;
5433 		is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
5434 				aconnector->force_yuv420_output;
5435 		color_depth = convert_color_depth_from_display_info(connector,
5436 								    is_y420,
5437 								    max_bpc);
5438 		bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
5439 		clock = adjusted_mode->clock;
5440 		dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
5441 	}
5442 	dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
5443 									   mst_mgr,
5444 									   mst_port,
5445 									   dm_new_connector_state->pbn,
5446 									   dm_mst_get_pbn_divider(aconnector->dc_link));
5447 	if (dm_new_connector_state->vcpi_slots < 0) {
5448 		DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
5449 		return dm_new_connector_state->vcpi_slots;
5450 	}
5451 	return 0;
5452 }
5453 
5454 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
5455 	.disable = dm_encoder_helper_disable,
5456 	.atomic_check = dm_encoder_helper_atomic_check
5457 };
5458 
5459 #if defined(CONFIG_DRM_AMD_DC_DCN)
5460 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
5461 					    struct dc_state *dc_state)
5462 {
5463 	struct dc_stream_state *stream = NULL;
5464 	struct drm_connector *connector;
5465 	struct drm_connector_state *new_con_state, *old_con_state;
5466 	struct amdgpu_dm_connector *aconnector;
5467 	struct dm_connector_state *dm_conn_state;
5468 	int i, j, clock, bpp;
5469 	int vcpi, pbn_div, pbn = 0;
5470 
5471 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
5472 
5473 		aconnector = to_amdgpu_dm_connector(connector);
5474 
5475 		if (!aconnector->port)
5476 			continue;
5477 
5478 		if (!new_con_state || !new_con_state->crtc)
5479 			continue;
5480 
5481 		dm_conn_state = to_dm_connector_state(new_con_state);
5482 
5483 		for (j = 0; j < dc_state->stream_count; j++) {
5484 			stream = dc_state->streams[j];
5485 			if (!stream)
5486 				continue;
5487 
5488 			if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
5489 				break;
5490 
5491 			stream = NULL;
5492 		}
5493 
5494 		if (!stream)
5495 			continue;
5496 
5497 		if (stream->timing.flags.DSC != 1) {
5498 			drm_dp_mst_atomic_enable_dsc(state,
5499 						     aconnector->port,
5500 						     dm_conn_state->pbn,
5501 						     0,
5502 						     false);
5503 			continue;
5504 		}
5505 
5506 		pbn_div = dm_mst_get_pbn_divider(stream->link);
5507 		bpp = stream->timing.dsc_cfg.bits_per_pixel;
5508 		clock = stream->timing.pix_clk_100hz / 10;
5509 		pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
5510 		vcpi = drm_dp_mst_atomic_enable_dsc(state,
5511 						    aconnector->port,
5512 						    pbn, pbn_div,
5513 						    true);
5514 		if (vcpi < 0)
5515 			return vcpi;
5516 
5517 		dm_conn_state->pbn = pbn;
5518 		dm_conn_state->vcpi_slots = vcpi;
5519 	}
5520 	return 0;
5521 }
5522 #endif
5523 
5524 static void dm_drm_plane_reset(struct drm_plane *plane)
5525 {
5526 	struct dm_plane_state *amdgpu_state = NULL;
5527 
5528 	if (plane->state)
5529 		plane->funcs->atomic_destroy_state(plane, plane->state);
5530 
5531 	amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
5532 	WARN_ON(amdgpu_state == NULL);
5533 
5534 	if (amdgpu_state)
5535 		__drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
5536 }
5537 
5538 static struct drm_plane_state *
5539 dm_drm_plane_duplicate_state(struct drm_plane *plane)
5540 {
5541 	struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
5542 
5543 	old_dm_plane_state = to_dm_plane_state(plane->state);
5544 	dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
5545 	if (!dm_plane_state)
5546 		return NULL;
5547 
5548 	__drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
5549 
5550 	if (old_dm_plane_state->dc_state) {
5551 		dm_plane_state->dc_state = old_dm_plane_state->dc_state;
5552 		dc_plane_state_retain(dm_plane_state->dc_state);
5553 	}
5554 
5555 	return &dm_plane_state->base;
5556 }
5557 
5558 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
5559 				struct drm_plane_state *state)
5560 {
5561 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
5562 
5563 	if (dm_plane_state->dc_state)
5564 		dc_plane_state_release(dm_plane_state->dc_state);
5565 
5566 	drm_atomic_helper_plane_destroy_state(plane, state);
5567 }
5568 
5569 static const struct drm_plane_funcs dm_plane_funcs = {
5570 	.update_plane	= drm_atomic_helper_update_plane,
5571 	.disable_plane	= drm_atomic_helper_disable_plane,
5572 	.destroy	= drm_primary_helper_destroy,
5573 	.reset = dm_drm_plane_reset,
5574 	.atomic_duplicate_state = dm_drm_plane_duplicate_state,
5575 	.atomic_destroy_state = dm_drm_plane_destroy_state,
5576 };
5577 
5578 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
5579 				      struct drm_plane_state *new_state)
5580 {
5581 	struct amdgpu_framebuffer *afb;
5582 	struct drm_gem_object *obj;
5583 	struct amdgpu_device *adev;
5584 	struct amdgpu_bo *rbo;
5585 	struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
5586 	struct list_head list;
5587 	struct ttm_validate_buffer tv;
5588 	struct ww_acquire_ctx ticket;
5589 	uint64_t tiling_flags;
5590 	uint32_t domain;
5591 	int r;
5592 	bool tmz_surface = false;
5593 	bool force_disable_dcc = false;
5594 
5595 	dm_plane_state_old = to_dm_plane_state(plane->state);
5596 	dm_plane_state_new = to_dm_plane_state(new_state);
5597 
5598 	if (!new_state->fb) {
5599 		DRM_DEBUG_DRIVER("No FB bound\n");
5600 		return 0;
5601 	}
5602 
5603 	afb = to_amdgpu_framebuffer(new_state->fb);
5604 	obj = new_state->fb->obj[0];
5605 	rbo = gem_to_amdgpu_bo(obj);
5606 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
5607 	INIT_LIST_HEAD(&list);
5608 
5609 	tv.bo = &rbo->tbo;
5610 	tv.num_shared = 1;
5611 	list_add(&tv.head, &list);
5612 
5613 	r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
5614 	if (r) {
5615 		dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
5616 		return r;
5617 	}
5618 
5619 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
5620 		domain = amdgpu_display_supported_domains(adev, rbo->flags);
5621 	else
5622 		domain = AMDGPU_GEM_DOMAIN_VRAM;
5623 
5624 	r = amdgpu_bo_pin(rbo, domain);
5625 	if (unlikely(r != 0)) {
5626 		if (r != -ERESTARTSYS)
5627 			DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
5628 		ttm_eu_backoff_reservation(&ticket, &list);
5629 		return r;
5630 	}
5631 
5632 	r = amdgpu_ttm_alloc_gart(&rbo->tbo);
5633 	if (unlikely(r != 0)) {
5634 		amdgpu_bo_unpin(rbo);
5635 		ttm_eu_backoff_reservation(&ticket, &list);
5636 		DRM_ERROR("%p bind failed\n", rbo);
5637 		return r;
5638 	}
5639 
5640 	amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
5641 
5642 	tmz_surface = amdgpu_bo_encrypted(rbo);
5643 
5644 	ttm_eu_backoff_reservation(&ticket, &list);
5645 
5646 	afb->address = amdgpu_bo_gpu_offset(rbo);
5647 
5648 	amdgpu_bo_ref(rbo);
5649 
5650 	if (dm_plane_state_new->dc_state &&
5651 			dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
5652 		struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
5653 
5654 		force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5655 		fill_plane_buffer_attributes(
5656 			adev, afb, plane_state->format, plane_state->rotation,
5657 			tiling_flags, &plane_state->tiling_info,
5658 			&plane_state->plane_size, &plane_state->dcc,
5659 			&plane_state->address, tmz_surface,
5660 			force_disable_dcc);
5661 	}
5662 
5663 	return 0;
5664 }
5665 
5666 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
5667 				       struct drm_plane_state *old_state)
5668 {
5669 	struct amdgpu_bo *rbo;
5670 	int r;
5671 
5672 	if (!old_state->fb)
5673 		return;
5674 
5675 	rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
5676 	r = amdgpu_bo_reserve(rbo, false);
5677 	if (unlikely(r)) {
5678 		DRM_ERROR("failed to reserve rbo before unpin\n");
5679 		return;
5680 	}
5681 
5682 	amdgpu_bo_unpin(rbo);
5683 	amdgpu_bo_unreserve(rbo);
5684 	amdgpu_bo_unref(&rbo);
5685 }
5686 
5687 static int dm_plane_helper_check_state(struct drm_plane_state *state,
5688 				       struct drm_crtc_state *new_crtc_state)
5689 {
5690 	int max_downscale = 0;
5691 	int max_upscale = INT_MAX;
5692 
5693 	/* TODO: These should be checked against DC plane caps */
5694 	return drm_atomic_helper_check_plane_state(
5695 		state, new_crtc_state, max_downscale, max_upscale, true, true);
5696 }
5697 
5698 static int dm_plane_atomic_check(struct drm_plane *plane,
5699 				 struct drm_plane_state *state)
5700 {
5701 	struct amdgpu_device *adev = plane->dev->dev_private;
5702 	struct dc *dc = adev->dm.dc;
5703 	struct dm_plane_state *dm_plane_state;
5704 	struct dc_scaling_info scaling_info;
5705 	struct drm_crtc_state *new_crtc_state;
5706 	int ret;
5707 
5708 	dm_plane_state = to_dm_plane_state(state);
5709 
5710 	if (!dm_plane_state->dc_state)
5711 		return 0;
5712 
5713 	new_crtc_state =
5714 		drm_atomic_get_new_crtc_state(state->state, state->crtc);
5715 	if (!new_crtc_state)
5716 		return -EINVAL;
5717 
5718 	ret = dm_plane_helper_check_state(state, new_crtc_state);
5719 	if (ret)
5720 		return ret;
5721 
5722 	ret = fill_dc_scaling_info(state, &scaling_info);
5723 	if (ret)
5724 		return ret;
5725 
5726 	if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
5727 		return 0;
5728 
5729 	return -EINVAL;
5730 }
5731 
5732 static int dm_plane_atomic_async_check(struct drm_plane *plane,
5733 				       struct drm_plane_state *new_plane_state)
5734 {
5735 	/* Only support async updates on cursor planes. */
5736 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
5737 		return -EINVAL;
5738 
5739 	return 0;
5740 }
5741 
5742 static void dm_plane_atomic_async_update(struct drm_plane *plane,
5743 					 struct drm_plane_state *new_state)
5744 {
5745 	struct drm_plane_state *old_state =
5746 		drm_atomic_get_old_plane_state(new_state->state, plane);
5747 
5748 	swap(plane->state->fb, new_state->fb);
5749 
5750 	plane->state->src_x = new_state->src_x;
5751 	plane->state->src_y = new_state->src_y;
5752 	plane->state->src_w = new_state->src_w;
5753 	plane->state->src_h = new_state->src_h;
5754 	plane->state->crtc_x = new_state->crtc_x;
5755 	plane->state->crtc_y = new_state->crtc_y;
5756 	plane->state->crtc_w = new_state->crtc_w;
5757 	plane->state->crtc_h = new_state->crtc_h;
5758 
5759 	handle_cursor_update(plane, old_state);
5760 }
5761 
5762 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
5763 	.prepare_fb = dm_plane_helper_prepare_fb,
5764 	.cleanup_fb = dm_plane_helper_cleanup_fb,
5765 	.atomic_check = dm_plane_atomic_check,
5766 	.atomic_async_check = dm_plane_atomic_async_check,
5767 	.atomic_async_update = dm_plane_atomic_async_update
5768 };
5769 
5770 /*
5771  * TODO: these are currently initialized to rgb formats only.
5772  * For future use cases we should either initialize them dynamically based on
5773  * plane capabilities, or initialize this array to all formats, so internal drm
5774  * check will succeed, and let DC implement proper check
5775  */
5776 static const uint32_t rgb_formats[] = {
5777 	DRM_FORMAT_XRGB8888,
5778 	DRM_FORMAT_ARGB8888,
5779 	DRM_FORMAT_RGBA8888,
5780 	DRM_FORMAT_XRGB2101010,
5781 	DRM_FORMAT_XBGR2101010,
5782 	DRM_FORMAT_ARGB2101010,
5783 	DRM_FORMAT_ABGR2101010,
5784 	DRM_FORMAT_XBGR8888,
5785 	DRM_FORMAT_ABGR8888,
5786 	DRM_FORMAT_RGB565,
5787 };
5788 
5789 static const uint32_t overlay_formats[] = {
5790 	DRM_FORMAT_XRGB8888,
5791 	DRM_FORMAT_ARGB8888,
5792 	DRM_FORMAT_RGBA8888,
5793 	DRM_FORMAT_XBGR8888,
5794 	DRM_FORMAT_ABGR8888,
5795 	DRM_FORMAT_RGB565
5796 };
5797 
5798 static const u32 cursor_formats[] = {
5799 	DRM_FORMAT_ARGB8888
5800 };
5801 
5802 static int get_plane_formats(const struct drm_plane *plane,
5803 			     const struct dc_plane_cap *plane_cap,
5804 			     uint32_t *formats, int max_formats)
5805 {
5806 	int i, num_formats = 0;
5807 
5808 	/*
5809 	 * TODO: Query support for each group of formats directly from
5810 	 * DC plane caps. This will require adding more formats to the
5811 	 * caps list.
5812 	 */
5813 
5814 	switch (plane->type) {
5815 	case DRM_PLANE_TYPE_PRIMARY:
5816 		for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
5817 			if (num_formats >= max_formats)
5818 				break;
5819 
5820 			formats[num_formats++] = rgb_formats[i];
5821 		}
5822 
5823 		if (plane_cap && plane_cap->pixel_format_support.nv12)
5824 			formats[num_formats++] = DRM_FORMAT_NV12;
5825 		if (plane_cap && plane_cap->pixel_format_support.p010)
5826 			formats[num_formats++] = DRM_FORMAT_P010;
5827 		if (plane_cap && plane_cap->pixel_format_support.fp16) {
5828 			formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
5829 			formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
5830 			formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
5831 			formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
5832 		}
5833 		break;
5834 
5835 	case DRM_PLANE_TYPE_OVERLAY:
5836 		for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
5837 			if (num_formats >= max_formats)
5838 				break;
5839 
5840 			formats[num_formats++] = overlay_formats[i];
5841 		}
5842 		break;
5843 
5844 	case DRM_PLANE_TYPE_CURSOR:
5845 		for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
5846 			if (num_formats >= max_formats)
5847 				break;
5848 
5849 			formats[num_formats++] = cursor_formats[i];
5850 		}
5851 		break;
5852 	}
5853 
5854 	return num_formats;
5855 }
5856 
5857 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
5858 				struct drm_plane *plane,
5859 				unsigned long possible_crtcs,
5860 				const struct dc_plane_cap *plane_cap)
5861 {
5862 	uint32_t formats[32];
5863 	int num_formats;
5864 	int res = -EPERM;
5865 	unsigned int supported_rotations;
5866 
5867 	num_formats = get_plane_formats(plane, plane_cap, formats,
5868 					ARRAY_SIZE(formats));
5869 
5870 	res = drm_universal_plane_init(dm->adev->ddev, plane, possible_crtcs,
5871 				       &dm_plane_funcs, formats, num_formats,
5872 				       NULL, plane->type, NULL);
5873 	if (res)
5874 		return res;
5875 
5876 	if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
5877 	    plane_cap && plane_cap->per_pixel_alpha) {
5878 		unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
5879 					  BIT(DRM_MODE_BLEND_PREMULTI);
5880 
5881 		drm_plane_create_alpha_property(plane);
5882 		drm_plane_create_blend_mode_property(plane, blend_caps);
5883 	}
5884 
5885 	if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
5886 	    plane_cap &&
5887 	    (plane_cap->pixel_format_support.nv12 ||
5888 	     plane_cap->pixel_format_support.p010)) {
5889 		/* This only affects YUV formats. */
5890 		drm_plane_create_color_properties(
5891 			plane,
5892 			BIT(DRM_COLOR_YCBCR_BT601) |
5893 			BIT(DRM_COLOR_YCBCR_BT709) |
5894 			BIT(DRM_COLOR_YCBCR_BT2020),
5895 			BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
5896 			BIT(DRM_COLOR_YCBCR_FULL_RANGE),
5897 			DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
5898 	}
5899 
5900 	supported_rotations =
5901 		DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
5902 		DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
5903 
5904 	drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
5905 					   supported_rotations);
5906 
5907 	drm_plane_helper_add(plane, &dm_plane_helper_funcs);
5908 
5909 	/* Create (reset) the plane state */
5910 	if (plane->funcs->reset)
5911 		plane->funcs->reset(plane);
5912 
5913 	return 0;
5914 }
5915 
5916 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
5917 			       struct drm_plane *plane,
5918 			       uint32_t crtc_index)
5919 {
5920 	struct amdgpu_crtc *acrtc = NULL;
5921 	struct drm_plane *cursor_plane;
5922 
5923 	int res = -ENOMEM;
5924 
5925 	cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
5926 	if (!cursor_plane)
5927 		goto fail;
5928 
5929 	cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
5930 	res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
5931 
5932 	acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
5933 	if (!acrtc)
5934 		goto fail;
5935 
5936 	res = drm_crtc_init_with_planes(
5937 			dm->ddev,
5938 			&acrtc->base,
5939 			plane,
5940 			cursor_plane,
5941 			&amdgpu_dm_crtc_funcs, NULL);
5942 
5943 	if (res)
5944 		goto fail;
5945 
5946 	drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
5947 
5948 	/* Create (reset) the plane state */
5949 	if (acrtc->base.funcs->reset)
5950 		acrtc->base.funcs->reset(&acrtc->base);
5951 
5952 	acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
5953 	acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
5954 
5955 	acrtc->crtc_id = crtc_index;
5956 	acrtc->base.enabled = false;
5957 	acrtc->otg_inst = -1;
5958 
5959 	dm->adev->mode_info.crtcs[crtc_index] = acrtc;
5960 	drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
5961 				   true, MAX_COLOR_LUT_ENTRIES);
5962 	drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
5963 
5964 	return 0;
5965 
5966 fail:
5967 	kfree(acrtc);
5968 	kfree(cursor_plane);
5969 	return res;
5970 }
5971 
5972 
5973 static int to_drm_connector_type(enum signal_type st)
5974 {
5975 	switch (st) {
5976 	case SIGNAL_TYPE_HDMI_TYPE_A:
5977 		return DRM_MODE_CONNECTOR_HDMIA;
5978 	case SIGNAL_TYPE_EDP:
5979 		return DRM_MODE_CONNECTOR_eDP;
5980 	case SIGNAL_TYPE_LVDS:
5981 		return DRM_MODE_CONNECTOR_LVDS;
5982 	case SIGNAL_TYPE_RGB:
5983 		return DRM_MODE_CONNECTOR_VGA;
5984 	case SIGNAL_TYPE_DISPLAY_PORT:
5985 	case SIGNAL_TYPE_DISPLAY_PORT_MST:
5986 		return DRM_MODE_CONNECTOR_DisplayPort;
5987 	case SIGNAL_TYPE_DVI_DUAL_LINK:
5988 	case SIGNAL_TYPE_DVI_SINGLE_LINK:
5989 		return DRM_MODE_CONNECTOR_DVID;
5990 	case SIGNAL_TYPE_VIRTUAL:
5991 		return DRM_MODE_CONNECTOR_VIRTUAL;
5992 
5993 	default:
5994 		return DRM_MODE_CONNECTOR_Unknown;
5995 	}
5996 }
5997 
5998 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
5999 {
6000 	struct drm_encoder *encoder;
6001 
6002 	/* There is only one encoder per connector */
6003 	drm_connector_for_each_possible_encoder(connector, encoder)
6004 		return encoder;
6005 
6006 	return NULL;
6007 }
6008 
6009 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
6010 {
6011 	struct drm_encoder *encoder;
6012 	struct amdgpu_encoder *amdgpu_encoder;
6013 
6014 	encoder = amdgpu_dm_connector_to_encoder(connector);
6015 
6016 	if (encoder == NULL)
6017 		return;
6018 
6019 	amdgpu_encoder = to_amdgpu_encoder(encoder);
6020 
6021 	amdgpu_encoder->native_mode.clock = 0;
6022 
6023 	if (!list_empty(&connector->probed_modes)) {
6024 		struct drm_display_mode *preferred_mode = NULL;
6025 
6026 		list_for_each_entry(preferred_mode,
6027 				    &connector->probed_modes,
6028 				    head) {
6029 			if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
6030 				amdgpu_encoder->native_mode = *preferred_mode;
6031 
6032 			break;
6033 		}
6034 
6035 	}
6036 }
6037 
6038 static struct drm_display_mode *
6039 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
6040 			     char *name,
6041 			     int hdisplay, int vdisplay)
6042 {
6043 	struct drm_device *dev = encoder->dev;
6044 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6045 	struct drm_display_mode *mode = NULL;
6046 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6047 
6048 	mode = drm_mode_duplicate(dev, native_mode);
6049 
6050 	if (mode == NULL)
6051 		return NULL;
6052 
6053 	mode->hdisplay = hdisplay;
6054 	mode->vdisplay = vdisplay;
6055 	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
6056 	strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
6057 
6058 	return mode;
6059 
6060 }
6061 
6062 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
6063 						 struct drm_connector *connector)
6064 {
6065 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6066 	struct drm_display_mode *mode = NULL;
6067 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6068 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6069 				to_amdgpu_dm_connector(connector);
6070 	int i;
6071 	int n;
6072 	struct mode_size {
6073 		char name[DRM_DISPLAY_MODE_LEN];
6074 		int w;
6075 		int h;
6076 	} common_modes[] = {
6077 		{  "640x480",  640,  480},
6078 		{  "800x600",  800,  600},
6079 		{ "1024x768", 1024,  768},
6080 		{ "1280x720", 1280,  720},
6081 		{ "1280x800", 1280,  800},
6082 		{"1280x1024", 1280, 1024},
6083 		{ "1440x900", 1440,  900},
6084 		{"1680x1050", 1680, 1050},
6085 		{"1600x1200", 1600, 1200},
6086 		{"1920x1080", 1920, 1080},
6087 		{"1920x1200", 1920, 1200}
6088 	};
6089 
6090 	n = ARRAY_SIZE(common_modes);
6091 
6092 	for (i = 0; i < n; i++) {
6093 		struct drm_display_mode *curmode = NULL;
6094 		bool mode_existed = false;
6095 
6096 		if (common_modes[i].w > native_mode->hdisplay ||
6097 		    common_modes[i].h > native_mode->vdisplay ||
6098 		   (common_modes[i].w == native_mode->hdisplay &&
6099 		    common_modes[i].h == native_mode->vdisplay))
6100 			continue;
6101 
6102 		list_for_each_entry(curmode, &connector->probed_modes, head) {
6103 			if (common_modes[i].w == curmode->hdisplay &&
6104 			    common_modes[i].h == curmode->vdisplay) {
6105 				mode_existed = true;
6106 				break;
6107 			}
6108 		}
6109 
6110 		if (mode_existed)
6111 			continue;
6112 
6113 		mode = amdgpu_dm_create_common_mode(encoder,
6114 				common_modes[i].name, common_modes[i].w,
6115 				common_modes[i].h);
6116 		drm_mode_probed_add(connector, mode);
6117 		amdgpu_dm_connector->num_modes++;
6118 	}
6119 }
6120 
6121 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
6122 					      struct edid *edid)
6123 {
6124 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6125 			to_amdgpu_dm_connector(connector);
6126 
6127 	if (edid) {
6128 		/* empty probed_modes */
6129 		INIT_LIST_HEAD(&connector->probed_modes);
6130 		amdgpu_dm_connector->num_modes =
6131 				drm_add_edid_modes(connector, edid);
6132 
6133 		/* sorting the probed modes before calling function
6134 		 * amdgpu_dm_get_native_mode() since EDID can have
6135 		 * more than one preferred mode. The modes that are
6136 		 * later in the probed mode list could be of higher
6137 		 * and preferred resolution. For example, 3840x2160
6138 		 * resolution in base EDID preferred timing and 4096x2160
6139 		 * preferred resolution in DID extension block later.
6140 		 */
6141 		drm_mode_sort(&connector->probed_modes);
6142 		amdgpu_dm_get_native_mode(connector);
6143 	} else {
6144 		amdgpu_dm_connector->num_modes = 0;
6145 	}
6146 }
6147 
6148 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
6149 {
6150 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6151 			to_amdgpu_dm_connector(connector);
6152 	struct drm_encoder *encoder;
6153 	struct edid *edid = amdgpu_dm_connector->edid;
6154 
6155 	encoder = amdgpu_dm_connector_to_encoder(connector);
6156 
6157 	if (!edid || !drm_edid_is_valid(edid)) {
6158 		amdgpu_dm_connector->num_modes =
6159 				drm_add_modes_noedid(connector, 640, 480);
6160 	} else {
6161 		amdgpu_dm_connector_ddc_get_modes(connector, edid);
6162 		amdgpu_dm_connector_add_common_modes(encoder, connector);
6163 	}
6164 	amdgpu_dm_fbc_init(connector);
6165 
6166 	return amdgpu_dm_connector->num_modes;
6167 }
6168 
6169 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
6170 				     struct amdgpu_dm_connector *aconnector,
6171 				     int connector_type,
6172 				     struct dc_link *link,
6173 				     int link_index)
6174 {
6175 	struct amdgpu_device *adev = dm->ddev->dev_private;
6176 
6177 	/*
6178 	 * Some of the properties below require access to state, like bpc.
6179 	 * Allocate some default initial connector state with our reset helper.
6180 	 */
6181 	if (aconnector->base.funcs->reset)
6182 		aconnector->base.funcs->reset(&aconnector->base);
6183 
6184 	aconnector->connector_id = link_index;
6185 	aconnector->dc_link = link;
6186 	aconnector->base.interlace_allowed = false;
6187 	aconnector->base.doublescan_allowed = false;
6188 	aconnector->base.stereo_allowed = false;
6189 	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
6190 	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6191 	aconnector->audio_inst = -1;
6192 	mutex_init(&aconnector->hpd_lock);
6193 
6194 	/*
6195 	 * configure support HPD hot plug connector_>polled default value is 0
6196 	 * which means HPD hot plug not supported
6197 	 */
6198 	switch (connector_type) {
6199 	case DRM_MODE_CONNECTOR_HDMIA:
6200 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6201 		aconnector->base.ycbcr_420_allowed =
6202 			link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
6203 		break;
6204 	case DRM_MODE_CONNECTOR_DisplayPort:
6205 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6206 		aconnector->base.ycbcr_420_allowed =
6207 			link->link_enc->features.dp_ycbcr420_supported ? true : false;
6208 		break;
6209 	case DRM_MODE_CONNECTOR_DVID:
6210 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6211 		break;
6212 	default:
6213 		break;
6214 	}
6215 
6216 	drm_object_attach_property(&aconnector->base.base,
6217 				dm->ddev->mode_config.scaling_mode_property,
6218 				DRM_MODE_SCALE_NONE);
6219 
6220 	drm_object_attach_property(&aconnector->base.base,
6221 				adev->mode_info.underscan_property,
6222 				UNDERSCAN_OFF);
6223 	drm_object_attach_property(&aconnector->base.base,
6224 				adev->mode_info.underscan_hborder_property,
6225 				0);
6226 	drm_object_attach_property(&aconnector->base.base,
6227 				adev->mode_info.underscan_vborder_property,
6228 				0);
6229 
6230 	if (!aconnector->mst_port)
6231 		drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
6232 
6233 	/* This defaults to the max in the range, but we want 8bpc for non-edp. */
6234 	aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
6235 	aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
6236 
6237 	if (connector_type == DRM_MODE_CONNECTOR_eDP &&
6238 	    dc_is_dmcu_initialized(adev->dm.dc)) {
6239 		drm_object_attach_property(&aconnector->base.base,
6240 				adev->mode_info.abm_level_property, 0);
6241 	}
6242 
6243 	if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
6244 	    connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
6245 	    connector_type == DRM_MODE_CONNECTOR_eDP) {
6246 		drm_object_attach_property(
6247 			&aconnector->base.base,
6248 			dm->ddev->mode_config.hdr_output_metadata_property, 0);
6249 
6250 		if (!aconnector->mst_port)
6251 			drm_connector_attach_vrr_capable_property(&aconnector->base);
6252 
6253 #ifdef CONFIG_DRM_AMD_DC_HDCP
6254 		if (adev->dm.hdcp_workqueue)
6255 			drm_connector_attach_content_protection_property(&aconnector->base, true);
6256 #endif
6257 	}
6258 }
6259 
6260 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
6261 			      struct i2c_msg *msgs, int num)
6262 {
6263 	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
6264 	struct ddc_service *ddc_service = i2c->ddc_service;
6265 	struct i2c_command cmd;
6266 	int i;
6267 	int result = -EIO;
6268 
6269 	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
6270 
6271 	if (!cmd.payloads)
6272 		return result;
6273 
6274 	cmd.number_of_payloads = num;
6275 	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
6276 	cmd.speed = 100;
6277 
6278 	for (i = 0; i < num; i++) {
6279 		cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
6280 		cmd.payloads[i].address = msgs[i].addr;
6281 		cmd.payloads[i].length = msgs[i].len;
6282 		cmd.payloads[i].data = msgs[i].buf;
6283 	}
6284 
6285 	if (dc_submit_i2c(
6286 			ddc_service->ctx->dc,
6287 			ddc_service->ddc_pin->hw_info.ddc_channel,
6288 			&cmd))
6289 		result = num;
6290 
6291 	kfree(cmd.payloads);
6292 	return result;
6293 }
6294 
6295 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
6296 {
6297 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
6298 }
6299 
6300 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
6301 	.master_xfer = amdgpu_dm_i2c_xfer,
6302 	.functionality = amdgpu_dm_i2c_func,
6303 };
6304 
6305 static struct amdgpu_i2c_adapter *
6306 create_i2c(struct ddc_service *ddc_service,
6307 	   int link_index,
6308 	   int *res)
6309 {
6310 	struct amdgpu_device *adev = ddc_service->ctx->driver_context;
6311 	struct amdgpu_i2c_adapter *i2c;
6312 
6313 	i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
6314 	if (!i2c)
6315 		return NULL;
6316 	i2c->base.owner = THIS_MODULE;
6317 	i2c->base.class = I2C_CLASS_DDC;
6318 	i2c->base.dev.parent = &adev->pdev->dev;
6319 	i2c->base.algo = &amdgpu_dm_i2c_algo;
6320 	snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
6321 	i2c_set_adapdata(&i2c->base, i2c);
6322 	i2c->ddc_service = ddc_service;
6323 	i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
6324 
6325 	return i2c;
6326 }
6327 
6328 
6329 /*
6330  * Note: this function assumes that dc_link_detect() was called for the
6331  * dc_link which will be represented by this aconnector.
6332  */
6333 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
6334 				    struct amdgpu_dm_connector *aconnector,
6335 				    uint32_t link_index,
6336 				    struct amdgpu_encoder *aencoder)
6337 {
6338 	int res = 0;
6339 	int connector_type;
6340 	struct dc *dc = dm->dc;
6341 	struct dc_link *link = dc_get_link_at_index(dc, link_index);
6342 	struct amdgpu_i2c_adapter *i2c;
6343 
6344 	link->priv = aconnector;
6345 
6346 	DRM_DEBUG_DRIVER("%s()\n", __func__);
6347 
6348 	i2c = create_i2c(link->ddc, link->link_index, &res);
6349 	if (!i2c) {
6350 		DRM_ERROR("Failed to create i2c adapter data\n");
6351 		return -ENOMEM;
6352 	}
6353 
6354 	aconnector->i2c = i2c;
6355 	res = i2c_add_adapter(&i2c->base);
6356 
6357 	if (res) {
6358 		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
6359 		goto out_free;
6360 	}
6361 
6362 	connector_type = to_drm_connector_type(link->connector_signal);
6363 
6364 	res = drm_connector_init_with_ddc(
6365 			dm->ddev,
6366 			&aconnector->base,
6367 			&amdgpu_dm_connector_funcs,
6368 			connector_type,
6369 			&i2c->base);
6370 
6371 	if (res) {
6372 		DRM_ERROR("connector_init failed\n");
6373 		aconnector->connector_id = -1;
6374 		goto out_free;
6375 	}
6376 
6377 	drm_connector_helper_add(
6378 			&aconnector->base,
6379 			&amdgpu_dm_connector_helper_funcs);
6380 
6381 	amdgpu_dm_connector_init_helper(
6382 		dm,
6383 		aconnector,
6384 		connector_type,
6385 		link,
6386 		link_index);
6387 
6388 	drm_connector_attach_encoder(
6389 		&aconnector->base, &aencoder->base);
6390 
6391 	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
6392 		|| connector_type == DRM_MODE_CONNECTOR_eDP)
6393 		amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
6394 
6395 out_free:
6396 	if (res) {
6397 		kfree(i2c);
6398 		aconnector->i2c = NULL;
6399 	}
6400 	return res;
6401 }
6402 
6403 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
6404 {
6405 	switch (adev->mode_info.num_crtc) {
6406 	case 1:
6407 		return 0x1;
6408 	case 2:
6409 		return 0x3;
6410 	case 3:
6411 		return 0x7;
6412 	case 4:
6413 		return 0xf;
6414 	case 5:
6415 		return 0x1f;
6416 	case 6:
6417 	default:
6418 		return 0x3f;
6419 	}
6420 }
6421 
6422 static int amdgpu_dm_encoder_init(struct drm_device *dev,
6423 				  struct amdgpu_encoder *aencoder,
6424 				  uint32_t link_index)
6425 {
6426 	struct amdgpu_device *adev = dev->dev_private;
6427 
6428 	int res = drm_encoder_init(dev,
6429 				   &aencoder->base,
6430 				   &amdgpu_dm_encoder_funcs,
6431 				   DRM_MODE_ENCODER_TMDS,
6432 				   NULL);
6433 
6434 	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
6435 
6436 	if (!res)
6437 		aencoder->encoder_id = link_index;
6438 	else
6439 		aencoder->encoder_id = -1;
6440 
6441 	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
6442 
6443 	return res;
6444 }
6445 
6446 static void manage_dm_interrupts(struct amdgpu_device *adev,
6447 				 struct amdgpu_crtc *acrtc,
6448 				 bool enable)
6449 {
6450 	/*
6451 	 * We have no guarantee that the frontend index maps to the same
6452 	 * backend index - some even map to more than one.
6453 	 *
6454 	 * TODO: Use a different interrupt or check DC itself for the mapping.
6455 	 */
6456 	int irq_type =
6457 		amdgpu_display_crtc_idx_to_irq_type(
6458 			adev,
6459 			acrtc->crtc_id);
6460 
6461 	if (enable) {
6462 		drm_crtc_vblank_on(&acrtc->base);
6463 		amdgpu_irq_get(
6464 			adev,
6465 			&adev->pageflip_irq,
6466 			irq_type);
6467 	} else {
6468 
6469 		amdgpu_irq_put(
6470 			adev,
6471 			&adev->pageflip_irq,
6472 			irq_type);
6473 		drm_crtc_vblank_off(&acrtc->base);
6474 	}
6475 }
6476 
6477 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
6478 				      struct amdgpu_crtc *acrtc)
6479 {
6480 	int irq_type =
6481 		amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
6482 
6483 	/**
6484 	 * This reads the current state for the IRQ and force reapplies
6485 	 * the setting to hardware.
6486 	 */
6487 	amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
6488 }
6489 
6490 static bool
6491 is_scaling_state_different(const struct dm_connector_state *dm_state,
6492 			   const struct dm_connector_state *old_dm_state)
6493 {
6494 	if (dm_state->scaling != old_dm_state->scaling)
6495 		return true;
6496 	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
6497 		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
6498 			return true;
6499 	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
6500 		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
6501 			return true;
6502 	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
6503 		   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
6504 		return true;
6505 	return false;
6506 }
6507 
6508 #ifdef CONFIG_DRM_AMD_DC_HDCP
6509 static bool is_content_protection_different(struct drm_connector_state *state,
6510 					    const struct drm_connector_state *old_state,
6511 					    const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
6512 {
6513 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6514 
6515 	if (old_state->hdcp_content_type != state->hdcp_content_type &&
6516 	    state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
6517 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6518 		return true;
6519 	}
6520 
6521 	/* CP is being re enabled, ignore this */
6522 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
6523 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
6524 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
6525 		return false;
6526 	}
6527 
6528 	/* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED */
6529 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
6530 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
6531 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6532 
6533 	/* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
6534 	 * hot-plug, headless s3, dpms
6535 	 */
6536 	if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && connector->dpms == DRM_MODE_DPMS_ON &&
6537 	    aconnector->dc_sink != NULL)
6538 		return true;
6539 
6540 	if (old_state->content_protection == state->content_protection)
6541 		return false;
6542 
6543 	if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
6544 		return true;
6545 
6546 	return false;
6547 }
6548 
6549 #endif
6550 static void remove_stream(struct amdgpu_device *adev,
6551 			  struct amdgpu_crtc *acrtc,
6552 			  struct dc_stream_state *stream)
6553 {
6554 	/* this is the update mode case */
6555 
6556 	acrtc->otg_inst = -1;
6557 	acrtc->enabled = false;
6558 }
6559 
6560 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
6561 			       struct dc_cursor_position *position)
6562 {
6563 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6564 	int x, y;
6565 	int xorigin = 0, yorigin = 0;
6566 
6567 	position->enable = false;
6568 	position->x = 0;
6569 	position->y = 0;
6570 
6571 	if (!crtc || !plane->state->fb)
6572 		return 0;
6573 
6574 	if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
6575 	    (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
6576 		DRM_ERROR("%s: bad cursor width or height %d x %d\n",
6577 			  __func__,
6578 			  plane->state->crtc_w,
6579 			  plane->state->crtc_h);
6580 		return -EINVAL;
6581 	}
6582 
6583 	x = plane->state->crtc_x;
6584 	y = plane->state->crtc_y;
6585 
6586 	if (x <= -amdgpu_crtc->max_cursor_width ||
6587 	    y <= -amdgpu_crtc->max_cursor_height)
6588 		return 0;
6589 
6590 	if (x < 0) {
6591 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
6592 		x = 0;
6593 	}
6594 	if (y < 0) {
6595 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
6596 		y = 0;
6597 	}
6598 	position->enable = true;
6599 	position->translate_by_source = true;
6600 	position->x = x;
6601 	position->y = y;
6602 	position->x_hotspot = xorigin;
6603 	position->y_hotspot = yorigin;
6604 
6605 	return 0;
6606 }
6607 
6608 static void handle_cursor_update(struct drm_plane *plane,
6609 				 struct drm_plane_state *old_plane_state)
6610 {
6611 	struct amdgpu_device *adev = plane->dev->dev_private;
6612 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
6613 	struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
6614 	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
6615 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6616 	uint64_t address = afb ? afb->address : 0;
6617 	struct dc_cursor_position position;
6618 	struct dc_cursor_attributes attributes;
6619 	int ret;
6620 
6621 	if (!plane->state->fb && !old_plane_state->fb)
6622 		return;
6623 
6624 	DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
6625 			 __func__,
6626 			 amdgpu_crtc->crtc_id,
6627 			 plane->state->crtc_w,
6628 			 plane->state->crtc_h);
6629 
6630 	ret = get_cursor_position(plane, crtc, &position);
6631 	if (ret)
6632 		return;
6633 
6634 	if (!position.enable) {
6635 		/* turn off cursor */
6636 		if (crtc_state && crtc_state->stream) {
6637 			mutex_lock(&adev->dm.dc_lock);
6638 			dc_stream_set_cursor_position(crtc_state->stream,
6639 						      &position);
6640 			mutex_unlock(&adev->dm.dc_lock);
6641 		}
6642 		return;
6643 	}
6644 
6645 	amdgpu_crtc->cursor_width = plane->state->crtc_w;
6646 	amdgpu_crtc->cursor_height = plane->state->crtc_h;
6647 
6648 	memset(&attributes, 0, sizeof(attributes));
6649 	attributes.address.high_part = upper_32_bits(address);
6650 	attributes.address.low_part  = lower_32_bits(address);
6651 	attributes.width             = plane->state->crtc_w;
6652 	attributes.height            = plane->state->crtc_h;
6653 	attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
6654 	attributes.rotation_angle    = 0;
6655 	attributes.attribute_flags.value = 0;
6656 
6657 	attributes.pitch = attributes.width;
6658 
6659 	if (crtc_state->stream) {
6660 		mutex_lock(&adev->dm.dc_lock);
6661 		if (!dc_stream_set_cursor_attributes(crtc_state->stream,
6662 							 &attributes))
6663 			DRM_ERROR("DC failed to set cursor attributes\n");
6664 
6665 		if (!dc_stream_set_cursor_position(crtc_state->stream,
6666 						   &position))
6667 			DRM_ERROR("DC failed to set cursor position\n");
6668 		mutex_unlock(&adev->dm.dc_lock);
6669 	}
6670 }
6671 
6672 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
6673 {
6674 
6675 	assert_spin_locked(&acrtc->base.dev->event_lock);
6676 	WARN_ON(acrtc->event);
6677 
6678 	acrtc->event = acrtc->base.state->event;
6679 
6680 	/* Set the flip status */
6681 	acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
6682 
6683 	/* Mark this event as consumed */
6684 	acrtc->base.state->event = NULL;
6685 
6686 	DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
6687 						 acrtc->crtc_id);
6688 }
6689 
6690 static void update_freesync_state_on_stream(
6691 	struct amdgpu_display_manager *dm,
6692 	struct dm_crtc_state *new_crtc_state,
6693 	struct dc_stream_state *new_stream,
6694 	struct dc_plane_state *surface,
6695 	u32 flip_timestamp_in_us)
6696 {
6697 	struct mod_vrr_params vrr_params;
6698 	struct dc_info_packet vrr_infopacket = {0};
6699 	struct amdgpu_device *adev = dm->adev;
6700 	unsigned long flags;
6701 
6702 	if (!new_stream)
6703 		return;
6704 
6705 	/*
6706 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6707 	 * For now it's sufficient to just guard against these conditions.
6708 	 */
6709 
6710 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6711 		return;
6712 
6713 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
6714 	vrr_params = new_crtc_state->vrr_params;
6715 
6716 	if (surface) {
6717 		mod_freesync_handle_preflip(
6718 			dm->freesync_module,
6719 			surface,
6720 			new_stream,
6721 			flip_timestamp_in_us,
6722 			&vrr_params);
6723 
6724 		if (adev->family < AMDGPU_FAMILY_AI &&
6725 		    amdgpu_dm_vrr_active(new_crtc_state)) {
6726 			mod_freesync_handle_v_update(dm->freesync_module,
6727 						     new_stream, &vrr_params);
6728 
6729 			/* Need to call this before the frame ends. */
6730 			dc_stream_adjust_vmin_vmax(dm->dc,
6731 						   new_crtc_state->stream,
6732 						   &vrr_params.adjust);
6733 		}
6734 	}
6735 
6736 	mod_freesync_build_vrr_infopacket(
6737 		dm->freesync_module,
6738 		new_stream,
6739 		&vrr_params,
6740 		PACKET_TYPE_VRR,
6741 		TRANSFER_FUNC_UNKNOWN,
6742 		&vrr_infopacket);
6743 
6744 	new_crtc_state->freesync_timing_changed |=
6745 		(memcmp(&new_crtc_state->vrr_params.adjust,
6746 			&vrr_params.adjust,
6747 			sizeof(vrr_params.adjust)) != 0);
6748 
6749 	new_crtc_state->freesync_vrr_info_changed |=
6750 		(memcmp(&new_crtc_state->vrr_infopacket,
6751 			&vrr_infopacket,
6752 			sizeof(vrr_infopacket)) != 0);
6753 
6754 	new_crtc_state->vrr_params = vrr_params;
6755 	new_crtc_state->vrr_infopacket = vrr_infopacket;
6756 
6757 	new_stream->adjust = new_crtc_state->vrr_params.adjust;
6758 	new_stream->vrr_infopacket = vrr_infopacket;
6759 
6760 	if (new_crtc_state->freesync_vrr_info_changed)
6761 		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
6762 			      new_crtc_state->base.crtc->base.id,
6763 			      (int)new_crtc_state->base.vrr_enabled,
6764 			      (int)vrr_params.state);
6765 
6766 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
6767 }
6768 
6769 static void pre_update_freesync_state_on_stream(
6770 	struct amdgpu_display_manager *dm,
6771 	struct dm_crtc_state *new_crtc_state)
6772 {
6773 	struct dc_stream_state *new_stream = new_crtc_state->stream;
6774 	struct mod_vrr_params vrr_params;
6775 	struct mod_freesync_config config = new_crtc_state->freesync_config;
6776 	struct amdgpu_device *adev = dm->adev;
6777 	unsigned long flags;
6778 
6779 	if (!new_stream)
6780 		return;
6781 
6782 	/*
6783 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6784 	 * For now it's sufficient to just guard against these conditions.
6785 	 */
6786 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6787 		return;
6788 
6789 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
6790 	vrr_params = new_crtc_state->vrr_params;
6791 
6792 	if (new_crtc_state->vrr_supported &&
6793 	    config.min_refresh_in_uhz &&
6794 	    config.max_refresh_in_uhz) {
6795 		config.state = new_crtc_state->base.vrr_enabled ?
6796 			VRR_STATE_ACTIVE_VARIABLE :
6797 			VRR_STATE_INACTIVE;
6798 	} else {
6799 		config.state = VRR_STATE_UNSUPPORTED;
6800 	}
6801 
6802 	mod_freesync_build_vrr_params(dm->freesync_module,
6803 				      new_stream,
6804 				      &config, &vrr_params);
6805 
6806 	new_crtc_state->freesync_timing_changed |=
6807 		(memcmp(&new_crtc_state->vrr_params.adjust,
6808 			&vrr_params.adjust,
6809 			sizeof(vrr_params.adjust)) != 0);
6810 
6811 	new_crtc_state->vrr_params = vrr_params;
6812 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
6813 }
6814 
6815 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
6816 					    struct dm_crtc_state *new_state)
6817 {
6818 	bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
6819 	bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
6820 
6821 	if (!old_vrr_active && new_vrr_active) {
6822 		/* Transition VRR inactive -> active:
6823 		 * While VRR is active, we must not disable vblank irq, as a
6824 		 * reenable after disable would compute bogus vblank/pflip
6825 		 * timestamps if it likely happened inside display front-porch.
6826 		 *
6827 		 * We also need vupdate irq for the actual core vblank handling
6828 		 * at end of vblank.
6829 		 */
6830 		dm_set_vupdate_irq(new_state->base.crtc, true);
6831 		drm_crtc_vblank_get(new_state->base.crtc);
6832 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
6833 				 __func__, new_state->base.crtc->base.id);
6834 	} else if (old_vrr_active && !new_vrr_active) {
6835 		/* Transition VRR active -> inactive:
6836 		 * Allow vblank irq disable again for fixed refresh rate.
6837 		 */
6838 		dm_set_vupdate_irq(new_state->base.crtc, false);
6839 		drm_crtc_vblank_put(new_state->base.crtc);
6840 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
6841 				 __func__, new_state->base.crtc->base.id);
6842 	}
6843 }
6844 
6845 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
6846 {
6847 	struct drm_plane *plane;
6848 	struct drm_plane_state *old_plane_state, *new_plane_state;
6849 	int i;
6850 
6851 	/*
6852 	 * TODO: Make this per-stream so we don't issue redundant updates for
6853 	 * commits with multiple streams.
6854 	 */
6855 	for_each_oldnew_plane_in_state(state, plane, old_plane_state,
6856 				       new_plane_state, i)
6857 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
6858 			handle_cursor_update(plane, old_plane_state);
6859 }
6860 
6861 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
6862 				    struct dc_state *dc_state,
6863 				    struct drm_device *dev,
6864 				    struct amdgpu_display_manager *dm,
6865 				    struct drm_crtc *pcrtc,
6866 				    bool wait_for_vblank)
6867 {
6868 	uint32_t i;
6869 	uint64_t timestamp_ns;
6870 	struct drm_plane *plane;
6871 	struct drm_plane_state *old_plane_state, *new_plane_state;
6872 	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
6873 	struct drm_crtc_state *new_pcrtc_state =
6874 			drm_atomic_get_new_crtc_state(state, pcrtc);
6875 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
6876 	struct dm_crtc_state *dm_old_crtc_state =
6877 			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
6878 	int planes_count = 0, vpos, hpos;
6879 	long r;
6880 	unsigned long flags;
6881 	struct amdgpu_bo *abo;
6882 	uint64_t tiling_flags;
6883 	bool tmz_surface = false;
6884 	uint32_t target_vblank, last_flip_vblank;
6885 	bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
6886 	bool pflip_present = false;
6887 	struct {
6888 		struct dc_surface_update surface_updates[MAX_SURFACES];
6889 		struct dc_plane_info plane_infos[MAX_SURFACES];
6890 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
6891 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
6892 		struct dc_stream_update stream_update;
6893 	} *bundle;
6894 
6895 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
6896 
6897 	if (!bundle) {
6898 		dm_error("Failed to allocate update bundle\n");
6899 		goto cleanup;
6900 	}
6901 
6902 	/*
6903 	 * Disable the cursor first if we're disabling all the planes.
6904 	 * It'll remain on the screen after the planes are re-enabled
6905 	 * if we don't.
6906 	 */
6907 	if (acrtc_state->active_planes == 0)
6908 		amdgpu_dm_commit_cursors(state);
6909 
6910 	/* update planes when needed */
6911 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
6912 		struct drm_crtc *crtc = new_plane_state->crtc;
6913 		struct drm_crtc_state *new_crtc_state;
6914 		struct drm_framebuffer *fb = new_plane_state->fb;
6915 		bool plane_needs_flip;
6916 		struct dc_plane_state *dc_plane;
6917 		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
6918 
6919 		/* Cursor plane is handled after stream updates */
6920 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
6921 			continue;
6922 
6923 		if (!fb || !crtc || pcrtc != crtc)
6924 			continue;
6925 
6926 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
6927 		if (!new_crtc_state->active)
6928 			continue;
6929 
6930 		dc_plane = dm_new_plane_state->dc_state;
6931 
6932 		bundle->surface_updates[planes_count].surface = dc_plane;
6933 		if (new_pcrtc_state->color_mgmt_changed) {
6934 			bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
6935 			bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
6936 			bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
6937 		}
6938 
6939 		fill_dc_scaling_info(new_plane_state,
6940 				     &bundle->scaling_infos[planes_count]);
6941 
6942 		bundle->surface_updates[planes_count].scaling_info =
6943 			&bundle->scaling_infos[planes_count];
6944 
6945 		plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
6946 
6947 		pflip_present = pflip_present || plane_needs_flip;
6948 
6949 		if (!plane_needs_flip) {
6950 			planes_count += 1;
6951 			continue;
6952 		}
6953 
6954 		abo = gem_to_amdgpu_bo(fb->obj[0]);
6955 
6956 		/*
6957 		 * Wait for all fences on this FB. Do limited wait to avoid
6958 		 * deadlock during GPU reset when this fence will not signal
6959 		 * but we hold reservation lock for the BO.
6960 		 */
6961 		r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
6962 							false,
6963 							msecs_to_jiffies(5000));
6964 		if (unlikely(r <= 0))
6965 			DRM_ERROR("Waiting for fences timed out!");
6966 
6967 		/*
6968 		 * TODO This might fail and hence better not used, wait
6969 		 * explicitly on fences instead
6970 		 * and in general should be called for
6971 		 * blocking commit to as per framework helpers
6972 		 */
6973 		r = amdgpu_bo_reserve(abo, true);
6974 		if (unlikely(r != 0))
6975 			DRM_ERROR("failed to reserve buffer before flip\n");
6976 
6977 		amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
6978 
6979 		tmz_surface = amdgpu_bo_encrypted(abo);
6980 
6981 		amdgpu_bo_unreserve(abo);
6982 
6983 		fill_dc_plane_info_and_addr(
6984 			dm->adev, new_plane_state, tiling_flags,
6985 			&bundle->plane_infos[planes_count],
6986 			&bundle->flip_addrs[planes_count].address,
6987 			tmz_surface,
6988 			false);
6989 
6990 		DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
6991 				 new_plane_state->plane->index,
6992 				 bundle->plane_infos[planes_count].dcc.enable);
6993 
6994 		bundle->surface_updates[planes_count].plane_info =
6995 			&bundle->plane_infos[planes_count];
6996 
6997 		/*
6998 		 * Only allow immediate flips for fast updates that don't
6999 		 * change FB pitch, DCC state, rotation or mirroing.
7000 		 */
7001 		bundle->flip_addrs[planes_count].flip_immediate =
7002 			crtc->state->async_flip &&
7003 			acrtc_state->update_type == UPDATE_TYPE_FAST;
7004 
7005 		timestamp_ns = ktime_get_ns();
7006 		bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
7007 		bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
7008 		bundle->surface_updates[planes_count].surface = dc_plane;
7009 
7010 		if (!bundle->surface_updates[planes_count].surface) {
7011 			DRM_ERROR("No surface for CRTC: id=%d\n",
7012 					acrtc_attach->crtc_id);
7013 			continue;
7014 		}
7015 
7016 		if (plane == pcrtc->primary)
7017 			update_freesync_state_on_stream(
7018 				dm,
7019 				acrtc_state,
7020 				acrtc_state->stream,
7021 				dc_plane,
7022 				bundle->flip_addrs[planes_count].flip_timestamp_in_us);
7023 
7024 		DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
7025 				 __func__,
7026 				 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
7027 				 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
7028 
7029 		planes_count += 1;
7030 
7031 	}
7032 
7033 	if (pflip_present) {
7034 		if (!vrr_active) {
7035 			/* Use old throttling in non-vrr fixed refresh rate mode
7036 			 * to keep flip scheduling based on target vblank counts
7037 			 * working in a backwards compatible way, e.g., for
7038 			 * clients using the GLX_OML_sync_control extension or
7039 			 * DRI3/Present extension with defined target_msc.
7040 			 */
7041 			last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
7042 		}
7043 		else {
7044 			/* For variable refresh rate mode only:
7045 			 * Get vblank of last completed flip to avoid > 1 vrr
7046 			 * flips per video frame by use of throttling, but allow
7047 			 * flip programming anywhere in the possibly large
7048 			 * variable vrr vblank interval for fine-grained flip
7049 			 * timing control and more opportunity to avoid stutter
7050 			 * on late submission of flips.
7051 			 */
7052 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7053 			last_flip_vblank = acrtc_attach->last_flip_vblank;
7054 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7055 		}
7056 
7057 		target_vblank = last_flip_vblank + wait_for_vblank;
7058 
7059 		/*
7060 		 * Wait until we're out of the vertical blank period before the one
7061 		 * targeted by the flip
7062 		 */
7063 		while ((acrtc_attach->enabled &&
7064 			(amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
7065 							    0, &vpos, &hpos, NULL,
7066 							    NULL, &pcrtc->hwmode)
7067 			 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
7068 			(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
7069 			(int)(target_vblank -
7070 			  amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
7071 			usleep_range(1000, 1100);
7072 		}
7073 
7074 		/**
7075 		 * Prepare the flip event for the pageflip interrupt to handle.
7076 		 *
7077 		 * This only works in the case where we've already turned on the
7078 		 * appropriate hardware blocks (eg. HUBP) so in the transition case
7079 		 * from 0 -> n planes we have to skip a hardware generated event
7080 		 * and rely on sending it from software.
7081 		 */
7082 		if (acrtc_attach->base.state->event &&
7083 		    acrtc_state->active_planes > 0) {
7084 			drm_crtc_vblank_get(pcrtc);
7085 
7086 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7087 
7088 			WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
7089 			prepare_flip_isr(acrtc_attach);
7090 
7091 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7092 		}
7093 
7094 		if (acrtc_state->stream) {
7095 			if (acrtc_state->freesync_vrr_info_changed)
7096 				bundle->stream_update.vrr_infopacket =
7097 					&acrtc_state->stream->vrr_infopacket;
7098 		}
7099 	}
7100 
7101 	/* Update the planes if changed or disable if we don't have any. */
7102 	if ((planes_count || acrtc_state->active_planes == 0) &&
7103 		acrtc_state->stream) {
7104 		bundle->stream_update.stream = acrtc_state->stream;
7105 		if (new_pcrtc_state->mode_changed) {
7106 			bundle->stream_update.src = acrtc_state->stream->src;
7107 			bundle->stream_update.dst = acrtc_state->stream->dst;
7108 		}
7109 
7110 		if (new_pcrtc_state->color_mgmt_changed) {
7111 			/*
7112 			 * TODO: This isn't fully correct since we've actually
7113 			 * already modified the stream in place.
7114 			 */
7115 			bundle->stream_update.gamut_remap =
7116 				&acrtc_state->stream->gamut_remap_matrix;
7117 			bundle->stream_update.output_csc_transform =
7118 				&acrtc_state->stream->csc_color_matrix;
7119 			bundle->stream_update.out_transfer_func =
7120 				acrtc_state->stream->out_transfer_func;
7121 		}
7122 
7123 		acrtc_state->stream->abm_level = acrtc_state->abm_level;
7124 		if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
7125 			bundle->stream_update.abm_level = &acrtc_state->abm_level;
7126 
7127 		/*
7128 		 * If FreeSync state on the stream has changed then we need to
7129 		 * re-adjust the min/max bounds now that DC doesn't handle this
7130 		 * as part of commit.
7131 		 */
7132 		if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
7133 		    amdgpu_dm_vrr_active(acrtc_state)) {
7134 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7135 			dc_stream_adjust_vmin_vmax(
7136 				dm->dc, acrtc_state->stream,
7137 				&acrtc_state->vrr_params.adjust);
7138 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7139 		}
7140 		mutex_lock(&dm->dc_lock);
7141 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7142 				acrtc_state->stream->link->psr_settings.psr_allow_active)
7143 			amdgpu_dm_psr_disable(acrtc_state->stream);
7144 
7145 		dc_commit_updates_for_stream(dm->dc,
7146 						     bundle->surface_updates,
7147 						     planes_count,
7148 						     acrtc_state->stream,
7149 						     &bundle->stream_update,
7150 						     dc_state);
7151 
7152 		/**
7153 		 * Enable or disable the interrupts on the backend.
7154 		 *
7155 		 * Most pipes are put into power gating when unused.
7156 		 *
7157 		 * When power gating is enabled on a pipe we lose the
7158 		 * interrupt enablement state when power gating is disabled.
7159 		 *
7160 		 * So we need to update the IRQ control state in hardware
7161 		 * whenever the pipe turns on (since it could be previously
7162 		 * power gated) or off (since some pipes can't be power gated
7163 		 * on some ASICs).
7164 		 */
7165 		if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
7166 			dm_update_pflip_irq_state(
7167 				(struct amdgpu_device *)dev->dev_private,
7168 				acrtc_attach);
7169 
7170 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7171 				acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
7172 				!acrtc_state->stream->link->psr_settings.psr_feature_enabled)
7173 			amdgpu_dm_link_setup_psr(acrtc_state->stream);
7174 		else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
7175 				acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
7176 				!acrtc_state->stream->link->psr_settings.psr_allow_active) {
7177 			amdgpu_dm_psr_enable(acrtc_state->stream);
7178 		}
7179 
7180 		mutex_unlock(&dm->dc_lock);
7181 	}
7182 
7183 	/*
7184 	 * Update cursor state *after* programming all the planes.
7185 	 * This avoids redundant programming in the case where we're going
7186 	 * to be disabling a single plane - those pipes are being disabled.
7187 	 */
7188 	if (acrtc_state->active_planes)
7189 		amdgpu_dm_commit_cursors(state);
7190 
7191 cleanup:
7192 	kfree(bundle);
7193 }
7194 
7195 static void amdgpu_dm_commit_audio(struct drm_device *dev,
7196 				   struct drm_atomic_state *state)
7197 {
7198 	struct amdgpu_device *adev = dev->dev_private;
7199 	struct amdgpu_dm_connector *aconnector;
7200 	struct drm_connector *connector;
7201 	struct drm_connector_state *old_con_state, *new_con_state;
7202 	struct drm_crtc_state *new_crtc_state;
7203 	struct dm_crtc_state *new_dm_crtc_state;
7204 	const struct dc_stream_status *status;
7205 	int i, inst;
7206 
7207 	/* Notify device removals. */
7208 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7209 		if (old_con_state->crtc != new_con_state->crtc) {
7210 			/* CRTC changes require notification. */
7211 			goto notify;
7212 		}
7213 
7214 		if (!new_con_state->crtc)
7215 			continue;
7216 
7217 		new_crtc_state = drm_atomic_get_new_crtc_state(
7218 			state, new_con_state->crtc);
7219 
7220 		if (!new_crtc_state)
7221 			continue;
7222 
7223 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7224 			continue;
7225 
7226 	notify:
7227 		aconnector = to_amdgpu_dm_connector(connector);
7228 
7229 		mutex_lock(&adev->dm.audio_lock);
7230 		inst = aconnector->audio_inst;
7231 		aconnector->audio_inst = -1;
7232 		mutex_unlock(&adev->dm.audio_lock);
7233 
7234 		amdgpu_dm_audio_eld_notify(adev, inst);
7235 	}
7236 
7237 	/* Notify audio device additions. */
7238 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
7239 		if (!new_con_state->crtc)
7240 			continue;
7241 
7242 		new_crtc_state = drm_atomic_get_new_crtc_state(
7243 			state, new_con_state->crtc);
7244 
7245 		if (!new_crtc_state)
7246 			continue;
7247 
7248 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7249 			continue;
7250 
7251 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
7252 		if (!new_dm_crtc_state->stream)
7253 			continue;
7254 
7255 		status = dc_stream_get_status(new_dm_crtc_state->stream);
7256 		if (!status)
7257 			continue;
7258 
7259 		aconnector = to_amdgpu_dm_connector(connector);
7260 
7261 		mutex_lock(&adev->dm.audio_lock);
7262 		inst = status->audio_inst;
7263 		aconnector->audio_inst = inst;
7264 		mutex_unlock(&adev->dm.audio_lock);
7265 
7266 		amdgpu_dm_audio_eld_notify(adev, inst);
7267 	}
7268 }
7269 
7270 /*
7271  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
7272  * @crtc_state: the DRM CRTC state
7273  * @stream_state: the DC stream state.
7274  *
7275  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
7276  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
7277  */
7278 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
7279 						struct dc_stream_state *stream_state)
7280 {
7281 	stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
7282 }
7283 
7284 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
7285 				   struct drm_atomic_state *state,
7286 				   bool nonblock)
7287 {
7288 	struct drm_crtc *crtc;
7289 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7290 	struct amdgpu_device *adev = dev->dev_private;
7291 	int i;
7292 
7293 	/*
7294 	 * We evade vblank and pflip interrupts on CRTCs that are undergoing
7295 	 * a modeset, being disabled, or have no active planes.
7296 	 *
7297 	 * It's done in atomic commit rather than commit tail for now since
7298 	 * some of these interrupt handlers access the current CRTC state and
7299 	 * potentially the stream pointer itself.
7300 	 *
7301 	 * Since the atomic state is swapped within atomic commit and not within
7302 	 * commit tail this would leave to new state (that hasn't been committed yet)
7303 	 * being accesssed from within the handlers.
7304 	 *
7305 	 * TODO: Fix this so we can do this in commit tail and not have to block
7306 	 * in atomic check.
7307 	 */
7308 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7309 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7310 
7311 		if (old_crtc_state->active &&
7312 		    (!new_crtc_state->active ||
7313 		     drm_atomic_crtc_needs_modeset(new_crtc_state)))
7314 			manage_dm_interrupts(adev, acrtc, false);
7315 	}
7316 	/*
7317 	 * Add check here for SoC's that support hardware cursor plane, to
7318 	 * unset legacy_cursor_update
7319 	 */
7320 
7321 	return drm_atomic_helper_commit(dev, state, nonblock);
7322 
7323 	/*TODO Handle EINTR, reenable IRQ*/
7324 }
7325 
7326 /**
7327  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
7328  * @state: The atomic state to commit
7329  *
7330  * This will tell DC to commit the constructed DC state from atomic_check,
7331  * programming the hardware. Any failures here implies a hardware failure, since
7332  * atomic check should have filtered anything non-kosher.
7333  */
7334 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
7335 {
7336 	struct drm_device *dev = state->dev;
7337 	struct amdgpu_device *adev = dev->dev_private;
7338 	struct amdgpu_display_manager *dm = &adev->dm;
7339 	struct dm_atomic_state *dm_state;
7340 	struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
7341 	uint32_t i, j;
7342 	struct drm_crtc *crtc;
7343 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7344 	unsigned long flags;
7345 	bool wait_for_vblank = true;
7346 	struct drm_connector *connector;
7347 	struct drm_connector_state *old_con_state, *new_con_state;
7348 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7349 	int crtc_disable_count = 0;
7350 
7351 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
7352 
7353 	dm_state = dm_atomic_get_new_state(state);
7354 	if (dm_state && dm_state->context) {
7355 		dc_state = dm_state->context;
7356 	} else {
7357 		/* No state changes, retain current state. */
7358 		dc_state_temp = dc_create_state(dm->dc);
7359 		ASSERT(dc_state_temp);
7360 		dc_state = dc_state_temp;
7361 		dc_resource_state_copy_construct_current(dm->dc, dc_state);
7362 	}
7363 
7364 	/* update changed items */
7365 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7366 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7367 
7368 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7369 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7370 
7371 		DRM_DEBUG_DRIVER(
7372 			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7373 			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
7374 			"connectors_changed:%d\n",
7375 			acrtc->crtc_id,
7376 			new_crtc_state->enable,
7377 			new_crtc_state->active,
7378 			new_crtc_state->planes_changed,
7379 			new_crtc_state->mode_changed,
7380 			new_crtc_state->active_changed,
7381 			new_crtc_state->connectors_changed);
7382 
7383 		/* Copy all transient state flags into dc state */
7384 		if (dm_new_crtc_state->stream) {
7385 			amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
7386 							    dm_new_crtc_state->stream);
7387 		}
7388 
7389 		/* handles headless hotplug case, updating new_state and
7390 		 * aconnector as needed
7391 		 */
7392 
7393 		if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
7394 
7395 			DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
7396 
7397 			if (!dm_new_crtc_state->stream) {
7398 				/*
7399 				 * this could happen because of issues with
7400 				 * userspace notifications delivery.
7401 				 * In this case userspace tries to set mode on
7402 				 * display which is disconnected in fact.
7403 				 * dc_sink is NULL in this case on aconnector.
7404 				 * We expect reset mode will come soon.
7405 				 *
7406 				 * This can also happen when unplug is done
7407 				 * during resume sequence ended
7408 				 *
7409 				 * In this case, we want to pretend we still
7410 				 * have a sink to keep the pipe running so that
7411 				 * hw state is consistent with the sw state
7412 				 */
7413 				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7414 						__func__, acrtc->base.base.id);
7415 				continue;
7416 			}
7417 
7418 			if (dm_old_crtc_state->stream)
7419 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7420 
7421 			pm_runtime_get_noresume(dev->dev);
7422 
7423 			acrtc->enabled = true;
7424 			acrtc->hw_mode = new_crtc_state->mode;
7425 			crtc->hwmode = new_crtc_state->mode;
7426 		} else if (modereset_required(new_crtc_state)) {
7427 			DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
7428 			/* i.e. reset mode */
7429 			if (dm_old_crtc_state->stream) {
7430 				if (dm_old_crtc_state->stream->link->psr_settings.psr_allow_active)
7431 					amdgpu_dm_psr_disable(dm_old_crtc_state->stream);
7432 
7433 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7434 			}
7435 		}
7436 	} /* for_each_crtc_in_state() */
7437 
7438 	if (dc_state) {
7439 		dm_enable_per_frame_crtc_master_sync(dc_state);
7440 		mutex_lock(&dm->dc_lock);
7441 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
7442 		mutex_unlock(&dm->dc_lock);
7443 	}
7444 
7445 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7446 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7447 
7448 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7449 
7450 		if (dm_new_crtc_state->stream != NULL) {
7451 			const struct dc_stream_status *status =
7452 					dc_stream_get_status(dm_new_crtc_state->stream);
7453 
7454 			if (!status)
7455 				status = dc_stream_get_status_from_state(dc_state,
7456 									 dm_new_crtc_state->stream);
7457 
7458 			if (!status)
7459 				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
7460 			else
7461 				acrtc->otg_inst = status->primary_otg_inst;
7462 		}
7463 	}
7464 #ifdef CONFIG_DRM_AMD_DC_HDCP
7465 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7466 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7467 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7468 		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7469 
7470 		new_crtc_state = NULL;
7471 
7472 		if (acrtc)
7473 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7474 
7475 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7476 
7477 		if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
7478 		    connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
7479 			hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
7480 			new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7481 			continue;
7482 		}
7483 
7484 		if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
7485 			hdcp_update_display(
7486 				adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
7487 				new_con_state->hdcp_content_type,
7488 				new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
7489 													 : false);
7490 	}
7491 #endif
7492 
7493 	/* Handle connector state changes */
7494 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7495 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7496 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
7497 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7498 		struct dc_surface_update dummy_updates[MAX_SURFACES];
7499 		struct dc_stream_update stream_update;
7500 		struct dc_info_packet hdr_packet;
7501 		struct dc_stream_status *status = NULL;
7502 		bool abm_changed, hdr_changed, scaling_changed;
7503 
7504 		memset(&dummy_updates, 0, sizeof(dummy_updates));
7505 		memset(&stream_update, 0, sizeof(stream_update));
7506 
7507 		if (acrtc) {
7508 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7509 			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
7510 		}
7511 
7512 		/* Skip any modesets/resets */
7513 		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
7514 			continue;
7515 
7516 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7517 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7518 
7519 		scaling_changed = is_scaling_state_different(dm_new_con_state,
7520 							     dm_old_con_state);
7521 
7522 		abm_changed = dm_new_crtc_state->abm_level !=
7523 			      dm_old_crtc_state->abm_level;
7524 
7525 		hdr_changed =
7526 			is_hdr_metadata_different(old_con_state, new_con_state);
7527 
7528 		if (!scaling_changed && !abm_changed && !hdr_changed)
7529 			continue;
7530 
7531 		stream_update.stream = dm_new_crtc_state->stream;
7532 		if (scaling_changed) {
7533 			update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
7534 					dm_new_con_state, dm_new_crtc_state->stream);
7535 
7536 			stream_update.src = dm_new_crtc_state->stream->src;
7537 			stream_update.dst = dm_new_crtc_state->stream->dst;
7538 		}
7539 
7540 		if (abm_changed) {
7541 			dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
7542 
7543 			stream_update.abm_level = &dm_new_crtc_state->abm_level;
7544 		}
7545 
7546 		if (hdr_changed) {
7547 			fill_hdr_info_packet(new_con_state, &hdr_packet);
7548 			stream_update.hdr_static_metadata = &hdr_packet;
7549 		}
7550 
7551 		status = dc_stream_get_status(dm_new_crtc_state->stream);
7552 		WARN_ON(!status);
7553 		WARN_ON(!status->plane_count);
7554 
7555 		/*
7556 		 * TODO: DC refuses to perform stream updates without a dc_surface_update.
7557 		 * Here we create an empty update on each plane.
7558 		 * To fix this, DC should permit updating only stream properties.
7559 		 */
7560 		for (j = 0; j < status->plane_count; j++)
7561 			dummy_updates[j].surface = status->plane_states[0];
7562 
7563 
7564 		mutex_lock(&dm->dc_lock);
7565 		dc_commit_updates_for_stream(dm->dc,
7566 						     dummy_updates,
7567 						     status->plane_count,
7568 						     dm_new_crtc_state->stream,
7569 						     &stream_update,
7570 						     dc_state);
7571 		mutex_unlock(&dm->dc_lock);
7572 	}
7573 
7574 	/* Count number of newly disabled CRTCs for dropping PM refs later. */
7575 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
7576 				      new_crtc_state, i) {
7577 		if (old_crtc_state->active && !new_crtc_state->active)
7578 			crtc_disable_count++;
7579 
7580 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7581 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7582 
7583 		/* Update freesync active state. */
7584 		pre_update_freesync_state_on_stream(dm, dm_new_crtc_state);
7585 
7586 		/* Handle vrr on->off / off->on transitions */
7587 		amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
7588 						dm_new_crtc_state);
7589 	}
7590 
7591 	/**
7592 	 * Enable interrupts for CRTCs that are newly enabled or went through
7593 	 * a modeset. It was intentionally deferred until after the front end
7594 	 * state was modified to wait until the OTG was on and so the IRQ
7595 	 * handlers didn't access stale or invalid state.
7596 	 */
7597 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7598 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7599 
7600 		if (new_crtc_state->active &&
7601 		    (!old_crtc_state->active ||
7602 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
7603 			manage_dm_interrupts(adev, acrtc, true);
7604 #ifdef CONFIG_DEBUG_FS
7605 			/**
7606 			 * Frontend may have changed so reapply the CRC capture
7607 			 * settings for the stream.
7608 			 */
7609 			dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7610 
7611 			if (amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) {
7612 				amdgpu_dm_crtc_configure_crc_source(
7613 					crtc, dm_new_crtc_state,
7614 					dm_new_crtc_state->crc_src);
7615 			}
7616 #endif
7617 		}
7618 	}
7619 
7620 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
7621 		if (new_crtc_state->async_flip)
7622 			wait_for_vblank = false;
7623 
7624 	/* update planes when needed per crtc*/
7625 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
7626 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7627 
7628 		if (dm_new_crtc_state->stream)
7629 			amdgpu_dm_commit_planes(state, dc_state, dev,
7630 						dm, crtc, wait_for_vblank);
7631 	}
7632 
7633 	/* Update audio instances for each connector. */
7634 	amdgpu_dm_commit_audio(dev, state);
7635 
7636 	/*
7637 	 * send vblank event on all events not handled in flip and
7638 	 * mark consumed event for drm_atomic_helper_commit_hw_done
7639 	 */
7640 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
7641 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7642 
7643 		if (new_crtc_state->event)
7644 			drm_send_event_locked(dev, &new_crtc_state->event->base);
7645 
7646 		new_crtc_state->event = NULL;
7647 	}
7648 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
7649 
7650 	/* Signal HW programming completion */
7651 	drm_atomic_helper_commit_hw_done(state);
7652 
7653 	if (wait_for_vblank)
7654 		drm_atomic_helper_wait_for_flip_done(dev, state);
7655 
7656 	drm_atomic_helper_cleanup_planes(dev, state);
7657 
7658 	/*
7659 	 * Finally, drop a runtime PM reference for each newly disabled CRTC,
7660 	 * so we can put the GPU into runtime suspend if we're not driving any
7661 	 * displays anymore
7662 	 */
7663 	for (i = 0; i < crtc_disable_count; i++)
7664 		pm_runtime_put_autosuspend(dev->dev);
7665 	pm_runtime_mark_last_busy(dev->dev);
7666 
7667 	if (dc_state_temp)
7668 		dc_release_state(dc_state_temp);
7669 }
7670 
7671 
7672 static int dm_force_atomic_commit(struct drm_connector *connector)
7673 {
7674 	int ret = 0;
7675 	struct drm_device *ddev = connector->dev;
7676 	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
7677 	struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7678 	struct drm_plane *plane = disconnected_acrtc->base.primary;
7679 	struct drm_connector_state *conn_state;
7680 	struct drm_crtc_state *crtc_state;
7681 	struct drm_plane_state *plane_state;
7682 
7683 	if (!state)
7684 		return -ENOMEM;
7685 
7686 	state->acquire_ctx = ddev->mode_config.acquire_ctx;
7687 
7688 	/* Construct an atomic state to restore previous display setting */
7689 
7690 	/*
7691 	 * Attach connectors to drm_atomic_state
7692 	 */
7693 	conn_state = drm_atomic_get_connector_state(state, connector);
7694 
7695 	ret = PTR_ERR_OR_ZERO(conn_state);
7696 	if (ret)
7697 		goto err;
7698 
7699 	/* Attach crtc to drm_atomic_state*/
7700 	crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
7701 
7702 	ret = PTR_ERR_OR_ZERO(crtc_state);
7703 	if (ret)
7704 		goto err;
7705 
7706 	/* force a restore */
7707 	crtc_state->mode_changed = true;
7708 
7709 	/* Attach plane to drm_atomic_state */
7710 	plane_state = drm_atomic_get_plane_state(state, plane);
7711 
7712 	ret = PTR_ERR_OR_ZERO(plane_state);
7713 	if (ret)
7714 		goto err;
7715 
7716 
7717 	/* Call commit internally with the state we just constructed */
7718 	ret = drm_atomic_commit(state);
7719 	if (!ret)
7720 		return 0;
7721 
7722 err:
7723 	DRM_ERROR("Restoring old state failed with %i\n", ret);
7724 	drm_atomic_state_put(state);
7725 
7726 	return ret;
7727 }
7728 
7729 /*
7730  * This function handles all cases when set mode does not come upon hotplug.
7731  * This includes when a display is unplugged then plugged back into the
7732  * same port and when running without usermode desktop manager supprot
7733  */
7734 void dm_restore_drm_connector_state(struct drm_device *dev,
7735 				    struct drm_connector *connector)
7736 {
7737 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7738 	struct amdgpu_crtc *disconnected_acrtc;
7739 	struct dm_crtc_state *acrtc_state;
7740 
7741 	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
7742 		return;
7743 
7744 	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7745 	if (!disconnected_acrtc)
7746 		return;
7747 
7748 	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
7749 	if (!acrtc_state->stream)
7750 		return;
7751 
7752 	/*
7753 	 * If the previous sink is not released and different from the current,
7754 	 * we deduce we are in a state where we can not rely on usermode call
7755 	 * to turn on the display, so we do it here
7756 	 */
7757 	if (acrtc_state->stream->sink != aconnector->dc_sink)
7758 		dm_force_atomic_commit(&aconnector->base);
7759 }
7760 
7761 /*
7762  * Grabs all modesetting locks to serialize against any blocking commits,
7763  * Waits for completion of all non blocking commits.
7764  */
7765 static int do_aquire_global_lock(struct drm_device *dev,
7766 				 struct drm_atomic_state *state)
7767 {
7768 	struct drm_crtc *crtc;
7769 	struct drm_crtc_commit *commit;
7770 	long ret;
7771 
7772 	/*
7773 	 * Adding all modeset locks to aquire_ctx will
7774 	 * ensure that when the framework release it the
7775 	 * extra locks we are locking here will get released to
7776 	 */
7777 	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
7778 	if (ret)
7779 		return ret;
7780 
7781 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7782 		spin_lock(&crtc->commit_lock);
7783 		commit = list_first_entry_or_null(&crtc->commit_list,
7784 				struct drm_crtc_commit, commit_entry);
7785 		if (commit)
7786 			drm_crtc_commit_get(commit);
7787 		spin_unlock(&crtc->commit_lock);
7788 
7789 		if (!commit)
7790 			continue;
7791 
7792 		/*
7793 		 * Make sure all pending HW programming completed and
7794 		 * page flips done
7795 		 */
7796 		ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
7797 
7798 		if (ret > 0)
7799 			ret = wait_for_completion_interruptible_timeout(
7800 					&commit->flip_done, 10*HZ);
7801 
7802 		if (ret == 0)
7803 			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
7804 				  "timed out\n", crtc->base.id, crtc->name);
7805 
7806 		drm_crtc_commit_put(commit);
7807 	}
7808 
7809 	return ret < 0 ? ret : 0;
7810 }
7811 
7812 static void get_freesync_config_for_crtc(
7813 	struct dm_crtc_state *new_crtc_state,
7814 	struct dm_connector_state *new_con_state)
7815 {
7816 	struct mod_freesync_config config = {0};
7817 	struct amdgpu_dm_connector *aconnector =
7818 			to_amdgpu_dm_connector(new_con_state->base.connector);
7819 	struct drm_display_mode *mode = &new_crtc_state->base.mode;
7820 	int vrefresh = drm_mode_vrefresh(mode);
7821 
7822 	new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
7823 					vrefresh >= aconnector->min_vfreq &&
7824 					vrefresh <= aconnector->max_vfreq;
7825 
7826 	if (new_crtc_state->vrr_supported) {
7827 		new_crtc_state->stream->ignore_msa_timing_param = true;
7828 		config.state = new_crtc_state->base.vrr_enabled ?
7829 				VRR_STATE_ACTIVE_VARIABLE :
7830 				VRR_STATE_INACTIVE;
7831 		config.min_refresh_in_uhz =
7832 				aconnector->min_vfreq * 1000000;
7833 		config.max_refresh_in_uhz =
7834 				aconnector->max_vfreq * 1000000;
7835 		config.vsif_supported = true;
7836 		config.btr = true;
7837 	}
7838 
7839 	new_crtc_state->freesync_config = config;
7840 }
7841 
7842 static void reset_freesync_config_for_crtc(
7843 	struct dm_crtc_state *new_crtc_state)
7844 {
7845 	new_crtc_state->vrr_supported = false;
7846 
7847 	memset(&new_crtc_state->vrr_params, 0,
7848 	       sizeof(new_crtc_state->vrr_params));
7849 	memset(&new_crtc_state->vrr_infopacket, 0,
7850 	       sizeof(new_crtc_state->vrr_infopacket));
7851 }
7852 
7853 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
7854 				struct drm_atomic_state *state,
7855 				struct drm_crtc *crtc,
7856 				struct drm_crtc_state *old_crtc_state,
7857 				struct drm_crtc_state *new_crtc_state,
7858 				bool enable,
7859 				bool *lock_and_validation_needed)
7860 {
7861 	struct dm_atomic_state *dm_state = NULL;
7862 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7863 	struct dc_stream_state *new_stream;
7864 	int ret = 0;
7865 
7866 	/*
7867 	 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
7868 	 * update changed items
7869 	 */
7870 	struct amdgpu_crtc *acrtc = NULL;
7871 	struct amdgpu_dm_connector *aconnector = NULL;
7872 	struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
7873 	struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
7874 
7875 	new_stream = NULL;
7876 
7877 	dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7878 	dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7879 	acrtc = to_amdgpu_crtc(crtc);
7880 	aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
7881 
7882 	/* TODO This hack should go away */
7883 	if (aconnector && enable) {
7884 		/* Make sure fake sink is created in plug-in scenario */
7885 		drm_new_conn_state = drm_atomic_get_new_connector_state(state,
7886 							    &aconnector->base);
7887 		drm_old_conn_state = drm_atomic_get_old_connector_state(state,
7888 							    &aconnector->base);
7889 
7890 		if (IS_ERR(drm_new_conn_state)) {
7891 			ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
7892 			goto fail;
7893 		}
7894 
7895 		dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
7896 		dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
7897 
7898 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7899 			goto skip_modeset;
7900 
7901 		new_stream = create_validate_stream_for_sink(aconnector,
7902 							     &new_crtc_state->mode,
7903 							     dm_new_conn_state,
7904 							     dm_old_crtc_state->stream);
7905 
7906 		/*
7907 		 * we can have no stream on ACTION_SET if a display
7908 		 * was disconnected during S3, in this case it is not an
7909 		 * error, the OS will be updated after detection, and
7910 		 * will do the right thing on next atomic commit
7911 		 */
7912 
7913 		if (!new_stream) {
7914 			DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7915 					__func__, acrtc->base.base.id);
7916 			ret = -ENOMEM;
7917 			goto fail;
7918 		}
7919 
7920 		dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
7921 
7922 		ret = fill_hdr_info_packet(drm_new_conn_state,
7923 					   &new_stream->hdr_static_metadata);
7924 		if (ret)
7925 			goto fail;
7926 
7927 		/*
7928 		 * If we already removed the old stream from the context
7929 		 * (and set the new stream to NULL) then we can't reuse
7930 		 * the old stream even if the stream and scaling are unchanged.
7931 		 * We'll hit the BUG_ON and black screen.
7932 		 *
7933 		 * TODO: Refactor this function to allow this check to work
7934 		 * in all conditions.
7935 		 */
7936 		if (dm_new_crtc_state->stream &&
7937 		    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
7938 		    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
7939 			new_crtc_state->mode_changed = false;
7940 			DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
7941 					 new_crtc_state->mode_changed);
7942 		}
7943 	}
7944 
7945 	/* mode_changed flag may get updated above, need to check again */
7946 	if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7947 		goto skip_modeset;
7948 
7949 	DRM_DEBUG_DRIVER(
7950 		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7951 		"planes_changed:%d, mode_changed:%d,active_changed:%d,"
7952 		"connectors_changed:%d\n",
7953 		acrtc->crtc_id,
7954 		new_crtc_state->enable,
7955 		new_crtc_state->active,
7956 		new_crtc_state->planes_changed,
7957 		new_crtc_state->mode_changed,
7958 		new_crtc_state->active_changed,
7959 		new_crtc_state->connectors_changed);
7960 
7961 	/* Remove stream for any changed/disabled CRTC */
7962 	if (!enable) {
7963 
7964 		if (!dm_old_crtc_state->stream)
7965 			goto skip_modeset;
7966 
7967 		ret = dm_atomic_get_state(state, &dm_state);
7968 		if (ret)
7969 			goto fail;
7970 
7971 		DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
7972 				crtc->base.id);
7973 
7974 		/* i.e. reset mode */
7975 		if (dc_remove_stream_from_ctx(
7976 				dm->dc,
7977 				dm_state->context,
7978 				dm_old_crtc_state->stream) != DC_OK) {
7979 			ret = -EINVAL;
7980 			goto fail;
7981 		}
7982 
7983 		dc_stream_release(dm_old_crtc_state->stream);
7984 		dm_new_crtc_state->stream = NULL;
7985 
7986 		reset_freesync_config_for_crtc(dm_new_crtc_state);
7987 
7988 		*lock_and_validation_needed = true;
7989 
7990 	} else {/* Add stream for any updated/enabled CRTC */
7991 		/*
7992 		 * Quick fix to prevent NULL pointer on new_stream when
7993 		 * added MST connectors not found in existing crtc_state in the chained mode
7994 		 * TODO: need to dig out the root cause of that
7995 		 */
7996 		if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
7997 			goto skip_modeset;
7998 
7999 		if (modereset_required(new_crtc_state))
8000 			goto skip_modeset;
8001 
8002 		if (modeset_required(new_crtc_state, new_stream,
8003 				     dm_old_crtc_state->stream)) {
8004 
8005 			WARN_ON(dm_new_crtc_state->stream);
8006 
8007 			ret = dm_atomic_get_state(state, &dm_state);
8008 			if (ret)
8009 				goto fail;
8010 
8011 			dm_new_crtc_state->stream = new_stream;
8012 
8013 			dc_stream_retain(new_stream);
8014 
8015 			DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
8016 						crtc->base.id);
8017 
8018 			if (dc_add_stream_to_ctx(
8019 					dm->dc,
8020 					dm_state->context,
8021 					dm_new_crtc_state->stream) != DC_OK) {
8022 				ret = -EINVAL;
8023 				goto fail;
8024 			}
8025 
8026 			*lock_and_validation_needed = true;
8027 		}
8028 	}
8029 
8030 skip_modeset:
8031 	/* Release extra reference */
8032 	if (new_stream)
8033 		 dc_stream_release(new_stream);
8034 
8035 	/*
8036 	 * We want to do dc stream updates that do not require a
8037 	 * full modeset below.
8038 	 */
8039 	if (!(enable && aconnector && new_crtc_state->enable &&
8040 	      new_crtc_state->active))
8041 		return 0;
8042 	/*
8043 	 * Given above conditions, the dc state cannot be NULL because:
8044 	 * 1. We're in the process of enabling CRTCs (just been added
8045 	 *    to the dc context, or already is on the context)
8046 	 * 2. Has a valid connector attached, and
8047 	 * 3. Is currently active and enabled.
8048 	 * => The dc stream state currently exists.
8049 	 */
8050 	BUG_ON(dm_new_crtc_state->stream == NULL);
8051 
8052 	/* Scaling or underscan settings */
8053 	if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
8054 		update_stream_scaling_settings(
8055 			&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
8056 
8057 	/* ABM settings */
8058 	dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8059 
8060 	/*
8061 	 * Color management settings. We also update color properties
8062 	 * when a modeset is needed, to ensure it gets reprogrammed.
8063 	 */
8064 	if (dm_new_crtc_state->base.color_mgmt_changed ||
8065 	    drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8066 		ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
8067 		if (ret)
8068 			goto fail;
8069 	}
8070 
8071 	/* Update Freesync settings. */
8072 	get_freesync_config_for_crtc(dm_new_crtc_state,
8073 				     dm_new_conn_state);
8074 
8075 	return ret;
8076 
8077 fail:
8078 	if (new_stream)
8079 		dc_stream_release(new_stream);
8080 	return ret;
8081 }
8082 
8083 static bool should_reset_plane(struct drm_atomic_state *state,
8084 			       struct drm_plane *plane,
8085 			       struct drm_plane_state *old_plane_state,
8086 			       struct drm_plane_state *new_plane_state)
8087 {
8088 	struct drm_plane *other;
8089 	struct drm_plane_state *old_other_state, *new_other_state;
8090 	struct drm_crtc_state *new_crtc_state;
8091 	int i;
8092 
8093 	/*
8094 	 * TODO: Remove this hack once the checks below are sufficient
8095 	 * enough to determine when we need to reset all the planes on
8096 	 * the stream.
8097 	 */
8098 	if (state->allow_modeset)
8099 		return true;
8100 
8101 	/* Exit early if we know that we're adding or removing the plane. */
8102 	if (old_plane_state->crtc != new_plane_state->crtc)
8103 		return true;
8104 
8105 	/* old crtc == new_crtc == NULL, plane not in context. */
8106 	if (!new_plane_state->crtc)
8107 		return false;
8108 
8109 	new_crtc_state =
8110 		drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
8111 
8112 	if (!new_crtc_state)
8113 		return true;
8114 
8115 	/* CRTC Degamma changes currently require us to recreate planes. */
8116 	if (new_crtc_state->color_mgmt_changed)
8117 		return true;
8118 
8119 	if (drm_atomic_crtc_needs_modeset(new_crtc_state))
8120 		return true;
8121 
8122 	/*
8123 	 * If there are any new primary or overlay planes being added or
8124 	 * removed then the z-order can potentially change. To ensure
8125 	 * correct z-order and pipe acquisition the current DC architecture
8126 	 * requires us to remove and recreate all existing planes.
8127 	 *
8128 	 * TODO: Come up with a more elegant solution for this.
8129 	 */
8130 	for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
8131 		if (other->type == DRM_PLANE_TYPE_CURSOR)
8132 			continue;
8133 
8134 		if (old_other_state->crtc != new_plane_state->crtc &&
8135 		    new_other_state->crtc != new_plane_state->crtc)
8136 			continue;
8137 
8138 		if (old_other_state->crtc != new_other_state->crtc)
8139 			return true;
8140 
8141 		/* TODO: Remove this once we can handle fast format changes. */
8142 		if (old_other_state->fb && new_other_state->fb &&
8143 		    old_other_state->fb->format != new_other_state->fb->format)
8144 			return true;
8145 	}
8146 
8147 	return false;
8148 }
8149 
8150 static int dm_update_plane_state(struct dc *dc,
8151 				 struct drm_atomic_state *state,
8152 				 struct drm_plane *plane,
8153 				 struct drm_plane_state *old_plane_state,
8154 				 struct drm_plane_state *new_plane_state,
8155 				 bool enable,
8156 				 bool *lock_and_validation_needed)
8157 {
8158 
8159 	struct dm_atomic_state *dm_state = NULL;
8160 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
8161 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8162 	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
8163 	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
8164 	struct amdgpu_crtc *new_acrtc;
8165 	bool needs_reset;
8166 	int ret = 0;
8167 
8168 
8169 	new_plane_crtc = new_plane_state->crtc;
8170 	old_plane_crtc = old_plane_state->crtc;
8171 	dm_new_plane_state = to_dm_plane_state(new_plane_state);
8172 	dm_old_plane_state = to_dm_plane_state(old_plane_state);
8173 
8174 	/*TODO Implement better atomic check for cursor plane */
8175 	if (plane->type == DRM_PLANE_TYPE_CURSOR) {
8176 		if (!enable || !new_plane_crtc ||
8177 			drm_atomic_plane_disabling(plane->state, new_plane_state))
8178 			return 0;
8179 
8180 		new_acrtc = to_amdgpu_crtc(new_plane_crtc);
8181 
8182 		if ((new_plane_state->crtc_w > new_acrtc->max_cursor_width) ||
8183 			(new_plane_state->crtc_h > new_acrtc->max_cursor_height)) {
8184 			DRM_DEBUG_ATOMIC("Bad cursor size %d x %d\n",
8185 							 new_plane_state->crtc_w, new_plane_state->crtc_h);
8186 			return -EINVAL;
8187 		}
8188 
8189 		return 0;
8190 	}
8191 
8192 	needs_reset = should_reset_plane(state, plane, old_plane_state,
8193 					 new_plane_state);
8194 
8195 	/* Remove any changed/removed planes */
8196 	if (!enable) {
8197 		if (!needs_reset)
8198 			return 0;
8199 
8200 		if (!old_plane_crtc)
8201 			return 0;
8202 
8203 		old_crtc_state = drm_atomic_get_old_crtc_state(
8204 				state, old_plane_crtc);
8205 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8206 
8207 		if (!dm_old_crtc_state->stream)
8208 			return 0;
8209 
8210 		DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
8211 				plane->base.id, old_plane_crtc->base.id);
8212 
8213 		ret = dm_atomic_get_state(state, &dm_state);
8214 		if (ret)
8215 			return ret;
8216 
8217 		if (!dc_remove_plane_from_context(
8218 				dc,
8219 				dm_old_crtc_state->stream,
8220 				dm_old_plane_state->dc_state,
8221 				dm_state->context)) {
8222 
8223 			ret = EINVAL;
8224 			return ret;
8225 		}
8226 
8227 
8228 		dc_plane_state_release(dm_old_plane_state->dc_state);
8229 		dm_new_plane_state->dc_state = NULL;
8230 
8231 		*lock_and_validation_needed = true;
8232 
8233 	} else { /* Add new planes */
8234 		struct dc_plane_state *dc_new_plane_state;
8235 
8236 		if (drm_atomic_plane_disabling(plane->state, new_plane_state))
8237 			return 0;
8238 
8239 		if (!new_plane_crtc)
8240 			return 0;
8241 
8242 		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
8243 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8244 
8245 		if (!dm_new_crtc_state->stream)
8246 			return 0;
8247 
8248 		if (!needs_reset)
8249 			return 0;
8250 
8251 		ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
8252 		if (ret)
8253 			return ret;
8254 
8255 		WARN_ON(dm_new_plane_state->dc_state);
8256 
8257 		dc_new_plane_state = dc_create_plane_state(dc);
8258 		if (!dc_new_plane_state)
8259 			return -ENOMEM;
8260 
8261 		DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
8262 				plane->base.id, new_plane_crtc->base.id);
8263 
8264 		ret = fill_dc_plane_attributes(
8265 			new_plane_crtc->dev->dev_private,
8266 			dc_new_plane_state,
8267 			new_plane_state,
8268 			new_crtc_state);
8269 		if (ret) {
8270 			dc_plane_state_release(dc_new_plane_state);
8271 			return ret;
8272 		}
8273 
8274 		ret = dm_atomic_get_state(state, &dm_state);
8275 		if (ret) {
8276 			dc_plane_state_release(dc_new_plane_state);
8277 			return ret;
8278 		}
8279 
8280 		/*
8281 		 * Any atomic check errors that occur after this will
8282 		 * not need a release. The plane state will be attached
8283 		 * to the stream, and therefore part of the atomic
8284 		 * state. It'll be released when the atomic state is
8285 		 * cleaned.
8286 		 */
8287 		if (!dc_add_plane_to_context(
8288 				dc,
8289 				dm_new_crtc_state->stream,
8290 				dc_new_plane_state,
8291 				dm_state->context)) {
8292 
8293 			dc_plane_state_release(dc_new_plane_state);
8294 			return -EINVAL;
8295 		}
8296 
8297 		dm_new_plane_state->dc_state = dc_new_plane_state;
8298 
8299 		/* Tell DC to do a full surface update every time there
8300 		 * is a plane change. Inefficient, but works for now.
8301 		 */
8302 		dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
8303 
8304 		*lock_and_validation_needed = true;
8305 	}
8306 
8307 
8308 	return ret;
8309 }
8310 
8311 static int
8312 dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
8313 				    struct drm_atomic_state *state,
8314 				    enum surface_update_type *out_type)
8315 {
8316 	struct dc *dc = dm->dc;
8317 	struct dm_atomic_state *dm_state = NULL, *old_dm_state = NULL;
8318 	int i, j, num_plane, ret = 0;
8319 	struct drm_plane_state *old_plane_state, *new_plane_state;
8320 	struct dm_plane_state *new_dm_plane_state, *old_dm_plane_state;
8321 	struct drm_crtc *new_plane_crtc;
8322 	struct drm_plane *plane;
8323 
8324 	struct drm_crtc *crtc;
8325 	struct drm_crtc_state *new_crtc_state, *old_crtc_state;
8326 	struct dm_crtc_state *new_dm_crtc_state, *old_dm_crtc_state;
8327 	struct dc_stream_status *status = NULL;
8328 	enum surface_update_type update_type = UPDATE_TYPE_FAST;
8329 	struct surface_info_bundle {
8330 		struct dc_surface_update surface_updates[MAX_SURFACES];
8331 		struct dc_plane_info plane_infos[MAX_SURFACES];
8332 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
8333 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8334 		struct dc_stream_update stream_update;
8335 	} *bundle;
8336 
8337 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8338 
8339 	if (!bundle) {
8340 		DRM_ERROR("Failed to allocate update bundle\n");
8341 		/* Set type to FULL to avoid crashing in DC*/
8342 		update_type = UPDATE_TYPE_FULL;
8343 		goto cleanup;
8344 	}
8345 
8346 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8347 
8348 		memset(bundle, 0, sizeof(struct surface_info_bundle));
8349 
8350 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8351 		old_dm_crtc_state = to_dm_crtc_state(old_crtc_state);
8352 		num_plane = 0;
8353 
8354 		if (new_dm_crtc_state->stream != old_dm_crtc_state->stream) {
8355 			update_type = UPDATE_TYPE_FULL;
8356 			goto cleanup;
8357 		}
8358 
8359 		if (!new_dm_crtc_state->stream)
8360 			continue;
8361 
8362 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, j) {
8363 			const struct amdgpu_framebuffer *amdgpu_fb =
8364 				to_amdgpu_framebuffer(new_plane_state->fb);
8365 			struct dc_plane_info *plane_info = &bundle->plane_infos[num_plane];
8366 			struct dc_flip_addrs *flip_addr = &bundle->flip_addrs[num_plane];
8367 			struct dc_scaling_info *scaling_info = &bundle->scaling_infos[num_plane];
8368 			uint64_t tiling_flags;
8369 			bool tmz_surface = false;
8370 
8371 			new_plane_crtc = new_plane_state->crtc;
8372 			new_dm_plane_state = to_dm_plane_state(new_plane_state);
8373 			old_dm_plane_state = to_dm_plane_state(old_plane_state);
8374 
8375 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
8376 				continue;
8377 
8378 			if (new_dm_plane_state->dc_state != old_dm_plane_state->dc_state) {
8379 				update_type = UPDATE_TYPE_FULL;
8380 				goto cleanup;
8381 			}
8382 
8383 			if (crtc != new_plane_crtc)
8384 				continue;
8385 
8386 			bundle->surface_updates[num_plane].surface =
8387 					new_dm_plane_state->dc_state;
8388 
8389 			if (new_crtc_state->mode_changed) {
8390 				bundle->stream_update.dst = new_dm_crtc_state->stream->dst;
8391 				bundle->stream_update.src = new_dm_crtc_state->stream->src;
8392 			}
8393 
8394 			if (new_crtc_state->color_mgmt_changed) {
8395 				bundle->surface_updates[num_plane].gamma =
8396 						new_dm_plane_state->dc_state->gamma_correction;
8397 				bundle->surface_updates[num_plane].in_transfer_func =
8398 						new_dm_plane_state->dc_state->in_transfer_func;
8399 				bundle->surface_updates[num_plane].gamut_remap_matrix =
8400 						&new_dm_plane_state->dc_state->gamut_remap_matrix;
8401 				bundle->stream_update.gamut_remap =
8402 						&new_dm_crtc_state->stream->gamut_remap_matrix;
8403 				bundle->stream_update.output_csc_transform =
8404 						&new_dm_crtc_state->stream->csc_color_matrix;
8405 				bundle->stream_update.out_transfer_func =
8406 						new_dm_crtc_state->stream->out_transfer_func;
8407 			}
8408 
8409 			ret = fill_dc_scaling_info(new_plane_state,
8410 						   scaling_info);
8411 			if (ret)
8412 				goto cleanup;
8413 
8414 			bundle->surface_updates[num_plane].scaling_info = scaling_info;
8415 
8416 			if (amdgpu_fb) {
8417 				ret = get_fb_info(amdgpu_fb, &tiling_flags, &tmz_surface);
8418 				if (ret)
8419 					goto cleanup;
8420 
8421 				ret = fill_dc_plane_info_and_addr(
8422 					dm->adev, new_plane_state, tiling_flags,
8423 					plane_info,
8424 					&flip_addr->address, tmz_surface,
8425 					false);
8426 				if (ret)
8427 					goto cleanup;
8428 
8429 				bundle->surface_updates[num_plane].plane_info = plane_info;
8430 				bundle->surface_updates[num_plane].flip_addr = flip_addr;
8431 			}
8432 
8433 			num_plane++;
8434 		}
8435 
8436 		if (num_plane == 0)
8437 			continue;
8438 
8439 		ret = dm_atomic_get_state(state, &dm_state);
8440 		if (ret)
8441 			goto cleanup;
8442 
8443 		old_dm_state = dm_atomic_get_old_state(state);
8444 		if (!old_dm_state) {
8445 			ret = -EINVAL;
8446 			goto cleanup;
8447 		}
8448 
8449 		status = dc_stream_get_status_from_state(old_dm_state->context,
8450 							 new_dm_crtc_state->stream);
8451 		bundle->stream_update.stream = new_dm_crtc_state->stream;
8452 		/*
8453 		 * TODO: DC modifies the surface during this call so we need
8454 		 * to lock here - find a way to do this without locking.
8455 		 */
8456 		mutex_lock(&dm->dc_lock);
8457 		update_type = dc_check_update_surfaces_for_stream(
8458 				dc,	bundle->surface_updates, num_plane,
8459 				&bundle->stream_update, status);
8460 		mutex_unlock(&dm->dc_lock);
8461 
8462 		if (update_type > UPDATE_TYPE_MED) {
8463 			update_type = UPDATE_TYPE_FULL;
8464 			goto cleanup;
8465 		}
8466 	}
8467 
8468 cleanup:
8469 	kfree(bundle);
8470 
8471 	*out_type = update_type;
8472 	return ret;
8473 }
8474 
8475 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
8476 {
8477 	struct drm_connector *connector;
8478 	struct drm_connector_state *conn_state;
8479 	struct amdgpu_dm_connector *aconnector = NULL;
8480 	int i;
8481 	for_each_new_connector_in_state(state, connector, conn_state, i) {
8482 		if (conn_state->crtc != crtc)
8483 			continue;
8484 
8485 		aconnector = to_amdgpu_dm_connector(connector);
8486 		if (!aconnector->port || !aconnector->mst_port)
8487 			aconnector = NULL;
8488 		else
8489 			break;
8490 	}
8491 
8492 	if (!aconnector)
8493 		return 0;
8494 
8495 	return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
8496 }
8497 
8498 /**
8499  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
8500  * @dev: The DRM device
8501  * @state: The atomic state to commit
8502  *
8503  * Validate that the given atomic state is programmable by DC into hardware.
8504  * This involves constructing a &struct dc_state reflecting the new hardware
8505  * state we wish to commit, then querying DC to see if it is programmable. It's
8506  * important not to modify the existing DC state. Otherwise, atomic_check
8507  * may unexpectedly commit hardware changes.
8508  *
8509  * When validating the DC state, it's important that the right locks are
8510  * acquired. For full updates case which removes/adds/updates streams on one
8511  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
8512  * that any such full update commit will wait for completion of any outstanding
8513  * flip using DRMs synchronization events. See
8514  * dm_determine_update_type_for_commit()
8515  *
8516  * Note that DM adds the affected connectors for all CRTCs in state, when that
8517  * might not seem necessary. This is because DC stream creation requires the
8518  * DC sink, which is tied to the DRM connector state. Cleaning this up should
8519  * be possible but non-trivial - a possible TODO item.
8520  *
8521  * Return: -Error code if validation failed.
8522  */
8523 static int amdgpu_dm_atomic_check(struct drm_device *dev,
8524 				  struct drm_atomic_state *state)
8525 {
8526 	struct amdgpu_device *adev = dev->dev_private;
8527 	struct dm_atomic_state *dm_state = NULL;
8528 	struct dc *dc = adev->dm.dc;
8529 	struct drm_connector *connector;
8530 	struct drm_connector_state *old_con_state, *new_con_state;
8531 	struct drm_crtc *crtc;
8532 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8533 	struct drm_plane *plane;
8534 	struct drm_plane_state *old_plane_state, *new_plane_state;
8535 	enum surface_update_type update_type = UPDATE_TYPE_FAST;
8536 	enum surface_update_type overall_update_type = UPDATE_TYPE_FAST;
8537 	enum dc_status status;
8538 	int ret, i;
8539 
8540 	/*
8541 	 * This bool will be set for true for any modeset/reset
8542 	 * or plane update which implies non fast surface update.
8543 	 */
8544 	bool lock_and_validation_needed = false;
8545 
8546 	ret = drm_atomic_helper_check_modeset(dev, state);
8547 	if (ret)
8548 		goto fail;
8549 
8550 	if (adev->asic_type >= CHIP_NAVI10) {
8551 		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8552 			if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8553 				ret = add_affected_mst_dsc_crtcs(state, crtc);
8554 				if (ret)
8555 					goto fail;
8556 			}
8557 		}
8558 	}
8559 
8560 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8561 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
8562 		    !new_crtc_state->color_mgmt_changed &&
8563 		    old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
8564 			continue;
8565 
8566 		if (!new_crtc_state->enable)
8567 			continue;
8568 
8569 		ret = drm_atomic_add_affected_connectors(state, crtc);
8570 		if (ret)
8571 			return ret;
8572 
8573 		ret = drm_atomic_add_affected_planes(state, crtc);
8574 		if (ret)
8575 			goto fail;
8576 	}
8577 
8578 	/*
8579 	 * Add all primary and overlay planes on the CRTC to the state
8580 	 * whenever a plane is enabled to maintain correct z-ordering
8581 	 * and to enable fast surface updates.
8582 	 */
8583 	drm_for_each_crtc(crtc, dev) {
8584 		bool modified = false;
8585 
8586 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8587 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
8588 				continue;
8589 
8590 			if (new_plane_state->crtc == crtc ||
8591 			    old_plane_state->crtc == crtc) {
8592 				modified = true;
8593 				break;
8594 			}
8595 		}
8596 
8597 		if (!modified)
8598 			continue;
8599 
8600 		drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
8601 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
8602 				continue;
8603 
8604 			new_plane_state =
8605 				drm_atomic_get_plane_state(state, plane);
8606 
8607 			if (IS_ERR(new_plane_state)) {
8608 				ret = PTR_ERR(new_plane_state);
8609 				goto fail;
8610 			}
8611 		}
8612 	}
8613 
8614 	/* Remove exiting planes if they are modified */
8615 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8616 		ret = dm_update_plane_state(dc, state, plane,
8617 					    old_plane_state,
8618 					    new_plane_state,
8619 					    false,
8620 					    &lock_and_validation_needed);
8621 		if (ret)
8622 			goto fail;
8623 	}
8624 
8625 	/* Disable all crtcs which require disable */
8626 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8627 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
8628 					   old_crtc_state,
8629 					   new_crtc_state,
8630 					   false,
8631 					   &lock_and_validation_needed);
8632 		if (ret)
8633 			goto fail;
8634 	}
8635 
8636 	/* Enable all crtcs which require enable */
8637 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8638 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
8639 					   old_crtc_state,
8640 					   new_crtc_state,
8641 					   true,
8642 					   &lock_and_validation_needed);
8643 		if (ret)
8644 			goto fail;
8645 	}
8646 
8647 	/* Add new/modified planes */
8648 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8649 		ret = dm_update_plane_state(dc, state, plane,
8650 					    old_plane_state,
8651 					    new_plane_state,
8652 					    true,
8653 					    &lock_and_validation_needed);
8654 		if (ret)
8655 			goto fail;
8656 	}
8657 
8658 	/* Run this here since we want to validate the streams we created */
8659 	ret = drm_atomic_helper_check_planes(dev, state);
8660 	if (ret)
8661 		goto fail;
8662 
8663 	if (state->legacy_cursor_update) {
8664 		/*
8665 		 * This is a fast cursor update coming from the plane update
8666 		 * helper, check if it can be done asynchronously for better
8667 		 * performance.
8668 		 */
8669 		state->async_update =
8670 			!drm_atomic_helper_async_check(dev, state);
8671 
8672 		/*
8673 		 * Skip the remaining global validation if this is an async
8674 		 * update. Cursor updates can be done without affecting
8675 		 * state or bandwidth calcs and this avoids the performance
8676 		 * penalty of locking the private state object and
8677 		 * allocating a new dc_state.
8678 		 */
8679 		if (state->async_update)
8680 			return 0;
8681 	}
8682 
8683 	/* Check scaling and underscan changes*/
8684 	/* TODO Removed scaling changes validation due to inability to commit
8685 	 * new stream into context w\o causing full reset. Need to
8686 	 * decide how to handle.
8687 	 */
8688 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8689 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8690 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8691 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8692 
8693 		/* Skip any modesets/resets */
8694 		if (!acrtc || drm_atomic_crtc_needs_modeset(
8695 				drm_atomic_get_new_crtc_state(state, &acrtc->base)))
8696 			continue;
8697 
8698 		/* Skip any thing not scale or underscan changes */
8699 		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
8700 			continue;
8701 
8702 		overall_update_type = UPDATE_TYPE_FULL;
8703 		lock_and_validation_needed = true;
8704 	}
8705 
8706 	ret = dm_determine_update_type_for_commit(&adev->dm, state, &update_type);
8707 	if (ret)
8708 		goto fail;
8709 
8710 	if (overall_update_type < update_type)
8711 		overall_update_type = update_type;
8712 
8713 	/*
8714 	 * lock_and_validation_needed was an old way to determine if we need to set
8715 	 * the global lock. Leaving it in to check if we broke any corner cases
8716 	 * lock_and_validation_needed true = UPDATE_TYPE_FULL or UPDATE_TYPE_MED
8717 	 * lock_and_validation_needed false = UPDATE_TYPE_FAST
8718 	 */
8719 	if (lock_and_validation_needed && overall_update_type <= UPDATE_TYPE_FAST)
8720 		WARN(1, "Global lock should be Set, overall_update_type should be UPDATE_TYPE_MED or UPDATE_TYPE_FULL");
8721 
8722 	if (overall_update_type > UPDATE_TYPE_FAST) {
8723 		ret = dm_atomic_get_state(state, &dm_state);
8724 		if (ret)
8725 			goto fail;
8726 
8727 		ret = do_aquire_global_lock(dev, state);
8728 		if (ret)
8729 			goto fail;
8730 
8731 #if defined(CONFIG_DRM_AMD_DC_DCN)
8732 		if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
8733 			goto fail;
8734 
8735 		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
8736 		if (ret)
8737 			goto fail;
8738 #endif
8739 
8740 		/*
8741 		 * Perform validation of MST topology in the state:
8742 		 * We need to perform MST atomic check before calling
8743 		 * dc_validate_global_state(), or there is a chance
8744 		 * to get stuck in an infinite loop and hang eventually.
8745 		 */
8746 		ret = drm_dp_mst_atomic_check(state);
8747 		if (ret)
8748 			goto fail;
8749 		status = dc_validate_global_state(dc, dm_state->context, false);
8750 		if (status != DC_OK) {
8751 			DC_LOG_WARNING("DC global validation failure: %s (%d)",
8752 				       dc_status_to_str(status), status);
8753 			ret = -EINVAL;
8754 			goto fail;
8755 		}
8756 	} else {
8757 		/*
8758 		 * The commit is a fast update. Fast updates shouldn't change
8759 		 * the DC context, affect global validation, and can have their
8760 		 * commit work done in parallel with other commits not touching
8761 		 * the same resource. If we have a new DC context as part of
8762 		 * the DM atomic state from validation we need to free it and
8763 		 * retain the existing one instead.
8764 		 */
8765 		struct dm_atomic_state *new_dm_state, *old_dm_state;
8766 
8767 		new_dm_state = dm_atomic_get_new_state(state);
8768 		old_dm_state = dm_atomic_get_old_state(state);
8769 
8770 		if (new_dm_state && old_dm_state) {
8771 			if (new_dm_state->context)
8772 				dc_release_state(new_dm_state->context);
8773 
8774 			new_dm_state->context = old_dm_state->context;
8775 
8776 			if (old_dm_state->context)
8777 				dc_retain_state(old_dm_state->context);
8778 		}
8779 	}
8780 
8781 	/* Store the overall update type for use later in atomic check. */
8782 	for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
8783 		struct dm_crtc_state *dm_new_crtc_state =
8784 			to_dm_crtc_state(new_crtc_state);
8785 
8786 		dm_new_crtc_state->update_type = (int)overall_update_type;
8787 	}
8788 
8789 	/* Must be success */
8790 	WARN_ON(ret);
8791 	return ret;
8792 
8793 fail:
8794 	if (ret == -EDEADLK)
8795 		DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
8796 	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
8797 		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
8798 	else
8799 		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
8800 
8801 	return ret;
8802 }
8803 
8804 static bool is_dp_capable_without_timing_msa(struct dc *dc,
8805 					     struct amdgpu_dm_connector *amdgpu_dm_connector)
8806 {
8807 	uint8_t dpcd_data;
8808 	bool capable = false;
8809 
8810 	if (amdgpu_dm_connector->dc_link &&
8811 		dm_helpers_dp_read_dpcd(
8812 				NULL,
8813 				amdgpu_dm_connector->dc_link,
8814 				DP_DOWN_STREAM_PORT_COUNT,
8815 				&dpcd_data,
8816 				sizeof(dpcd_data))) {
8817 		capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
8818 	}
8819 
8820 	return capable;
8821 }
8822 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
8823 					struct edid *edid)
8824 {
8825 	int i;
8826 	bool edid_check_required;
8827 	struct detailed_timing *timing;
8828 	struct detailed_non_pixel *data;
8829 	struct detailed_data_monitor_range *range;
8830 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8831 			to_amdgpu_dm_connector(connector);
8832 	struct dm_connector_state *dm_con_state = NULL;
8833 
8834 	struct drm_device *dev = connector->dev;
8835 	struct amdgpu_device *adev = dev->dev_private;
8836 	bool freesync_capable = false;
8837 
8838 	if (!connector->state) {
8839 		DRM_ERROR("%s - Connector has no state", __func__);
8840 		goto update;
8841 	}
8842 
8843 	if (!edid) {
8844 		dm_con_state = to_dm_connector_state(connector->state);
8845 
8846 		amdgpu_dm_connector->min_vfreq = 0;
8847 		amdgpu_dm_connector->max_vfreq = 0;
8848 		amdgpu_dm_connector->pixel_clock_mhz = 0;
8849 
8850 		goto update;
8851 	}
8852 
8853 	dm_con_state = to_dm_connector_state(connector->state);
8854 
8855 	edid_check_required = false;
8856 	if (!amdgpu_dm_connector->dc_sink) {
8857 		DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
8858 		goto update;
8859 	}
8860 	if (!adev->dm.freesync_module)
8861 		goto update;
8862 	/*
8863 	 * if edid non zero restrict freesync only for dp and edp
8864 	 */
8865 	if (edid) {
8866 		if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
8867 			|| amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
8868 			edid_check_required = is_dp_capable_without_timing_msa(
8869 						adev->dm.dc,
8870 						amdgpu_dm_connector);
8871 		}
8872 	}
8873 	if (edid_check_required == true && (edid->version > 1 ||
8874 	   (edid->version == 1 && edid->revision > 1))) {
8875 		for (i = 0; i < 4; i++) {
8876 
8877 			timing	= &edid->detailed_timings[i];
8878 			data	= &timing->data.other_data;
8879 			range	= &data->data.range;
8880 			/*
8881 			 * Check if monitor has continuous frequency mode
8882 			 */
8883 			if (data->type != EDID_DETAIL_MONITOR_RANGE)
8884 				continue;
8885 			/*
8886 			 * Check for flag range limits only. If flag == 1 then
8887 			 * no additional timing information provided.
8888 			 * Default GTF, GTF Secondary curve and CVT are not
8889 			 * supported
8890 			 */
8891 			if (range->flags != 1)
8892 				continue;
8893 
8894 			amdgpu_dm_connector->min_vfreq = range->min_vfreq;
8895 			amdgpu_dm_connector->max_vfreq = range->max_vfreq;
8896 			amdgpu_dm_connector->pixel_clock_mhz =
8897 				range->pixel_clock_mhz * 10;
8898 			break;
8899 		}
8900 
8901 		if (amdgpu_dm_connector->max_vfreq -
8902 		    amdgpu_dm_connector->min_vfreq > 10) {
8903 
8904 			freesync_capable = true;
8905 		}
8906 	}
8907 
8908 update:
8909 	if (dm_con_state)
8910 		dm_con_state->freesync_capable = freesync_capable;
8911 
8912 	if (connector->vrr_capable_property)
8913 		drm_connector_set_vrr_capable_property(connector,
8914 						       freesync_capable);
8915 }
8916 
8917 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
8918 {
8919 	uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
8920 
8921 	if (!(link->connector_signal & SIGNAL_TYPE_EDP))
8922 		return;
8923 	if (link->type == dc_connection_none)
8924 		return;
8925 	if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
8926 					dpcd_data, sizeof(dpcd_data))) {
8927 		link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
8928 
8929 		if (dpcd_data[0] == 0) {
8930 			link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
8931 			link->psr_settings.psr_feature_enabled = false;
8932 		} else {
8933 			link->psr_settings.psr_version = DC_PSR_VERSION_1;
8934 			link->psr_settings.psr_feature_enabled = true;
8935 		}
8936 
8937 		DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
8938 	}
8939 }
8940 
8941 /*
8942  * amdgpu_dm_link_setup_psr() - configure psr link
8943  * @stream: stream state
8944  *
8945  * Return: true if success
8946  */
8947 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
8948 {
8949 	struct dc_link *link = NULL;
8950 	struct psr_config psr_config = {0};
8951 	struct psr_context psr_context = {0};
8952 	bool ret = false;
8953 
8954 	if (stream == NULL)
8955 		return false;
8956 
8957 	link = stream->link;
8958 
8959 	psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
8960 
8961 	if (psr_config.psr_version > 0) {
8962 		psr_config.psr_exit_link_training_required = 0x1;
8963 		psr_config.psr_frame_capture_indication_req = 0;
8964 		psr_config.psr_rfb_setup_time = 0x37;
8965 		psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
8966 		psr_config.allow_smu_optimizations = 0x0;
8967 
8968 		ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
8969 
8970 	}
8971 	DRM_DEBUG_DRIVER("PSR link: %d\n",	link->psr_settings.psr_feature_enabled);
8972 
8973 	return ret;
8974 }
8975 
8976 /*
8977  * amdgpu_dm_psr_enable() - enable psr f/w
8978  * @stream: stream state
8979  *
8980  * Return: true if success
8981  */
8982 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
8983 {
8984 	struct dc_link *link = stream->link;
8985 	unsigned int vsync_rate_hz = 0;
8986 	struct dc_static_screen_params params = {0};
8987 	/* Calculate number of static frames before generating interrupt to
8988 	 * enter PSR.
8989 	 */
8990 	// Init fail safe of 2 frames static
8991 	unsigned int num_frames_static = 2;
8992 
8993 	DRM_DEBUG_DRIVER("Enabling psr...\n");
8994 
8995 	vsync_rate_hz = div64_u64(div64_u64((
8996 			stream->timing.pix_clk_100hz * 100),
8997 			stream->timing.v_total),
8998 			stream->timing.h_total);
8999 
9000 	/* Round up
9001 	 * Calculate number of frames such that at least 30 ms of time has
9002 	 * passed.
9003 	 */
9004 	if (vsync_rate_hz != 0) {
9005 		unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
9006 		num_frames_static = (30000 / frame_time_microsec) + 1;
9007 	}
9008 
9009 	params.triggers.cursor_update = true;
9010 	params.triggers.overlay_update = true;
9011 	params.triggers.surface_update = true;
9012 	params.num_frames = num_frames_static;
9013 
9014 	dc_stream_set_static_screen_params(link->ctx->dc,
9015 					   &stream, 1,
9016 					   &params);
9017 
9018 	return dc_link_set_psr_allow_active(link, true, false);
9019 }
9020 
9021 /*
9022  * amdgpu_dm_psr_disable() - disable psr f/w
9023  * @stream:  stream state
9024  *
9025  * Return: true if success
9026  */
9027 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
9028 {
9029 
9030 	DRM_DEBUG_DRIVER("Disabling psr...\n");
9031 
9032 	return dc_link_set_psr_allow_active(stream->link, false, true);
9033 }
9034