xref: /linux/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c (revision d9afbb3509900a953f5cf90bc57e793ee80c1108)
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28 
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37 
38 #include "vid.h"
39 #include "amdgpu.h"
40 #include "amdgpu_display.h"
41 #include "amdgpu_ucode.h"
42 #include "atom.h"
43 #include "amdgpu_dm.h"
44 #ifdef CONFIG_DRM_AMD_DC_HDCP
45 #include "amdgpu_dm_hdcp.h"
46 #include <drm/drm_hdcp.h>
47 #endif
48 #include "amdgpu_pm.h"
49 
50 #include "amd_shared.h"
51 #include "amdgpu_dm_irq.h"
52 #include "dm_helpers.h"
53 #include "amdgpu_dm_mst_types.h"
54 #if defined(CONFIG_DEBUG_FS)
55 #include "amdgpu_dm_debugfs.h"
56 #endif
57 
58 #include "ivsrcid/ivsrcid_vislands30.h"
59 
60 #include <linux/module.h>
61 #include <linux/moduleparam.h>
62 #include <linux/version.h>
63 #include <linux/types.h>
64 #include <linux/pm_runtime.h>
65 #include <linux/pci.h>
66 #include <linux/firmware.h>
67 #include <linux/component.h>
68 
69 #include <drm/drm_atomic.h>
70 #include <drm/drm_atomic_uapi.h>
71 #include <drm/drm_atomic_helper.h>
72 #include <drm/drm_dp_mst_helper.h>
73 #include <drm/drm_fb_helper.h>
74 #include <drm/drm_fourcc.h>
75 #include <drm/drm_edid.h>
76 #include <drm/drm_vblank.h>
77 #include <drm/drm_audio_component.h>
78 #include <drm/drm_hdcp.h>
79 
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
82 
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
87 
88 #include "soc15_common.h"
89 #endif
90 
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
94 
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
97 
98 #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
99 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
100 
101 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
102 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
103 
104 /* Number of bytes in PSP header for firmware. */
105 #define PSP_HEADER_BYTES 0x100
106 
107 /* Number of bytes in PSP footer for firmware. */
108 #define PSP_FOOTER_BYTES 0x100
109 
110 /**
111  * DOC: overview
112  *
113  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
114  * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
115  * requests into DC requests, and DC responses into DRM responses.
116  *
117  * The root control structure is &struct amdgpu_display_manager.
118  */
119 
120 /* basic init/fini API */
121 static int amdgpu_dm_init(struct amdgpu_device *adev);
122 static void amdgpu_dm_fini(struct amdgpu_device *adev);
123 
124 /*
125  * initializes drm_device display related structures, based on the information
126  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
127  * drm_encoder, drm_mode_config
128  *
129  * Returns 0 on success
130  */
131 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
132 /* removes and deallocates the drm structures, created by the above function */
133 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
134 
135 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
136 				struct drm_plane *plane,
137 				unsigned long possible_crtcs,
138 				const struct dc_plane_cap *plane_cap);
139 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
140 			       struct drm_plane *plane,
141 			       uint32_t link_index);
142 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
143 				    struct amdgpu_dm_connector *amdgpu_dm_connector,
144 				    uint32_t link_index,
145 				    struct amdgpu_encoder *amdgpu_encoder);
146 static int amdgpu_dm_encoder_init(struct drm_device *dev,
147 				  struct amdgpu_encoder *aencoder,
148 				  uint32_t link_index);
149 
150 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
151 
152 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
153 				   struct drm_atomic_state *state,
154 				   bool nonblock);
155 
156 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
157 
158 static int amdgpu_dm_atomic_check(struct drm_device *dev,
159 				  struct drm_atomic_state *state);
160 
161 static void handle_cursor_update(struct drm_plane *plane,
162 				 struct drm_plane_state *old_plane_state);
163 
164 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
165 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
166 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
167 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
168 
169 
170 /*
171  * dm_vblank_get_counter
172  *
173  * @brief
174  * Get counter for number of vertical blanks
175  *
176  * @param
177  * struct amdgpu_device *adev - [in] desired amdgpu device
178  * int disp_idx - [in] which CRTC to get the counter from
179  *
180  * @return
181  * Counter for vertical blanks
182  */
183 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
184 {
185 	if (crtc >= adev->mode_info.num_crtc)
186 		return 0;
187 	else {
188 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
189 		struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
190 				acrtc->base.state);
191 
192 
193 		if (acrtc_state->stream == NULL) {
194 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
195 				  crtc);
196 			return 0;
197 		}
198 
199 		return dc_stream_get_vblank_counter(acrtc_state->stream);
200 	}
201 }
202 
203 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
204 				  u32 *vbl, u32 *position)
205 {
206 	uint32_t v_blank_start, v_blank_end, h_position, v_position;
207 
208 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
209 		return -EINVAL;
210 	else {
211 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
212 		struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
213 						acrtc->base.state);
214 
215 		if (acrtc_state->stream ==  NULL) {
216 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
217 				  crtc);
218 			return 0;
219 		}
220 
221 		/*
222 		 * TODO rework base driver to use values directly.
223 		 * for now parse it back into reg-format
224 		 */
225 		dc_stream_get_scanoutpos(acrtc_state->stream,
226 					 &v_blank_start,
227 					 &v_blank_end,
228 					 &h_position,
229 					 &v_position);
230 
231 		*position = v_position | (h_position << 16);
232 		*vbl = v_blank_start | (v_blank_end << 16);
233 	}
234 
235 	return 0;
236 }
237 
238 static bool dm_is_idle(void *handle)
239 {
240 	/* XXX todo */
241 	return true;
242 }
243 
244 static int dm_wait_for_idle(void *handle)
245 {
246 	/* XXX todo */
247 	return 0;
248 }
249 
250 static bool dm_check_soft_reset(void *handle)
251 {
252 	return false;
253 }
254 
255 static int dm_soft_reset(void *handle)
256 {
257 	/* XXX todo */
258 	return 0;
259 }
260 
261 static struct amdgpu_crtc *
262 get_crtc_by_otg_inst(struct amdgpu_device *adev,
263 		     int otg_inst)
264 {
265 	struct drm_device *dev = adev->ddev;
266 	struct drm_crtc *crtc;
267 	struct amdgpu_crtc *amdgpu_crtc;
268 
269 	if (otg_inst == -1) {
270 		WARN_ON(1);
271 		return adev->mode_info.crtcs[0];
272 	}
273 
274 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
275 		amdgpu_crtc = to_amdgpu_crtc(crtc);
276 
277 		if (amdgpu_crtc->otg_inst == otg_inst)
278 			return amdgpu_crtc;
279 	}
280 
281 	return NULL;
282 }
283 
284 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
285 {
286 	return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
287 	       dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
288 }
289 
290 /**
291  * dm_pflip_high_irq() - Handle pageflip interrupt
292  * @interrupt_params: ignored
293  *
294  * Handles the pageflip interrupt by notifying all interested parties
295  * that the pageflip has been completed.
296  */
297 static void dm_pflip_high_irq(void *interrupt_params)
298 {
299 	struct amdgpu_crtc *amdgpu_crtc;
300 	struct common_irq_params *irq_params = interrupt_params;
301 	struct amdgpu_device *adev = irq_params->adev;
302 	unsigned long flags;
303 	struct drm_pending_vblank_event *e;
304 	struct dm_crtc_state *acrtc_state;
305 	uint32_t vpos, hpos, v_blank_start, v_blank_end;
306 	bool vrr_active;
307 
308 	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
309 
310 	/* IRQ could occur when in initial stage */
311 	/* TODO work and BO cleanup */
312 	if (amdgpu_crtc == NULL) {
313 		DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
314 		return;
315 	}
316 
317 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
318 
319 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
320 		DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
321 						 amdgpu_crtc->pflip_status,
322 						 AMDGPU_FLIP_SUBMITTED,
323 						 amdgpu_crtc->crtc_id,
324 						 amdgpu_crtc);
325 		spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
326 		return;
327 	}
328 
329 	/* page flip completed. */
330 	e = amdgpu_crtc->event;
331 	amdgpu_crtc->event = NULL;
332 
333 	if (!e)
334 		WARN_ON(1);
335 
336 	acrtc_state = to_dm_crtc_state(amdgpu_crtc->base.state);
337 	vrr_active = amdgpu_dm_vrr_active(acrtc_state);
338 
339 	/* Fixed refresh rate, or VRR scanout position outside front-porch? */
340 	if (!vrr_active ||
341 	    !dc_stream_get_scanoutpos(acrtc_state->stream, &v_blank_start,
342 				      &v_blank_end, &hpos, &vpos) ||
343 	    (vpos < v_blank_start)) {
344 		/* Update to correct count and vblank timestamp if racing with
345 		 * vblank irq. This also updates to the correct vblank timestamp
346 		 * even in VRR mode, as scanout is past the front-porch atm.
347 		 */
348 		drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
349 
350 		/* Wake up userspace by sending the pageflip event with proper
351 		 * count and timestamp of vblank of flip completion.
352 		 */
353 		if (e) {
354 			drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
355 
356 			/* Event sent, so done with vblank for this flip */
357 			drm_crtc_vblank_put(&amdgpu_crtc->base);
358 		}
359 	} else if (e) {
360 		/* VRR active and inside front-porch: vblank count and
361 		 * timestamp for pageflip event will only be up to date after
362 		 * drm_crtc_handle_vblank() has been executed from late vblank
363 		 * irq handler after start of back-porch (vline 0). We queue the
364 		 * pageflip event for send-out by drm_crtc_handle_vblank() with
365 		 * updated timestamp and count, once it runs after us.
366 		 *
367 		 * We need to open-code this instead of using the helper
368 		 * drm_crtc_arm_vblank_event(), as that helper would
369 		 * call drm_crtc_accurate_vblank_count(), which we must
370 		 * not call in VRR mode while we are in front-porch!
371 		 */
372 
373 		/* sequence will be replaced by real count during send-out. */
374 		e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
375 		e->pipe = amdgpu_crtc->crtc_id;
376 
377 		list_add_tail(&e->base.link, &adev->ddev->vblank_event_list);
378 		e = NULL;
379 	}
380 
381 	/* Keep track of vblank of this flip for flip throttling. We use the
382 	 * cooked hw counter, as that one incremented at start of this vblank
383 	 * of pageflip completion, so last_flip_vblank is the forbidden count
384 	 * for queueing new pageflips if vsync + VRR is enabled.
385 	 */
386 	amdgpu_crtc->last_flip_vblank =
387 		amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
388 
389 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
390 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
391 
392 	DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
393 			 amdgpu_crtc->crtc_id, amdgpu_crtc,
394 			 vrr_active, (int) !e);
395 }
396 
397 static void dm_vupdate_high_irq(void *interrupt_params)
398 {
399 	struct common_irq_params *irq_params = interrupt_params;
400 	struct amdgpu_device *adev = irq_params->adev;
401 	struct amdgpu_crtc *acrtc;
402 	struct dm_crtc_state *acrtc_state;
403 	unsigned long flags;
404 
405 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
406 
407 	if (acrtc) {
408 		acrtc_state = to_dm_crtc_state(acrtc->base.state);
409 
410 		DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
411 			      acrtc->crtc_id,
412 			      amdgpu_dm_vrr_active(acrtc_state));
413 
414 		/* Core vblank handling is done here after end of front-porch in
415 		 * vrr mode, as vblank timestamping will give valid results
416 		 * while now done after front-porch. This will also deliver
417 		 * page-flip completion events that have been queued to us
418 		 * if a pageflip happened inside front-porch.
419 		 */
420 		if (amdgpu_dm_vrr_active(acrtc_state)) {
421 			drm_crtc_handle_vblank(&acrtc->base);
422 
423 			/* BTR processing for pre-DCE12 ASICs */
424 			if (acrtc_state->stream &&
425 			    adev->family < AMDGPU_FAMILY_AI) {
426 				spin_lock_irqsave(&adev->ddev->event_lock, flags);
427 				mod_freesync_handle_v_update(
428 				    adev->dm.freesync_module,
429 				    acrtc_state->stream,
430 				    &acrtc_state->vrr_params);
431 
432 				dc_stream_adjust_vmin_vmax(
433 				    adev->dm.dc,
434 				    acrtc_state->stream,
435 				    &acrtc_state->vrr_params.adjust);
436 				spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
437 			}
438 		}
439 	}
440 }
441 
442 /**
443  * dm_crtc_high_irq() - Handles CRTC interrupt
444  * @interrupt_params: used for determining the CRTC instance
445  *
446  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
447  * event handler.
448  */
449 static void dm_crtc_high_irq(void *interrupt_params)
450 {
451 	struct common_irq_params *irq_params = interrupt_params;
452 	struct amdgpu_device *adev = irq_params->adev;
453 	struct amdgpu_crtc *acrtc;
454 	struct dm_crtc_state *acrtc_state;
455 	unsigned long flags;
456 
457 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
458 	if (!acrtc)
459 		return;
460 
461 	acrtc_state = to_dm_crtc_state(acrtc->base.state);
462 
463 	DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
464 			 amdgpu_dm_vrr_active(acrtc_state),
465 			 acrtc_state->active_planes);
466 
467 	/**
468 	 * Core vblank handling at start of front-porch is only possible
469 	 * in non-vrr mode, as only there vblank timestamping will give
470 	 * valid results while done in front-porch. Otherwise defer it
471 	 * to dm_vupdate_high_irq after end of front-porch.
472 	 */
473 	if (!amdgpu_dm_vrr_active(acrtc_state))
474 		drm_crtc_handle_vblank(&acrtc->base);
475 
476 	/**
477 	 * Following stuff must happen at start of vblank, for crc
478 	 * computation and below-the-range btr support in vrr mode.
479 	 */
480 	amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
481 
482 	/* BTR updates need to happen before VUPDATE on Vega and above. */
483 	if (adev->family < AMDGPU_FAMILY_AI)
484 		return;
485 
486 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
487 
488 	if (acrtc_state->stream && acrtc_state->vrr_params.supported &&
489 	    acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
490 		mod_freesync_handle_v_update(adev->dm.freesync_module,
491 					     acrtc_state->stream,
492 					     &acrtc_state->vrr_params);
493 
494 		dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc_state->stream,
495 					   &acrtc_state->vrr_params.adjust);
496 	}
497 
498 	/*
499 	 * If there aren't any active_planes then DCH HUBP may be clock-gated.
500 	 * In that case, pageflip completion interrupts won't fire and pageflip
501 	 * completion events won't get delivered. Prevent this by sending
502 	 * pending pageflip events from here if a flip is still pending.
503 	 *
504 	 * If any planes are enabled, use dm_pflip_high_irq() instead, to
505 	 * avoid race conditions between flip programming and completion,
506 	 * which could cause too early flip completion events.
507 	 */
508 	if (adev->family >= AMDGPU_FAMILY_RV &&
509 	    acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
510 	    acrtc_state->active_planes == 0) {
511 		if (acrtc->event) {
512 			drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
513 			acrtc->event = NULL;
514 			drm_crtc_vblank_put(&acrtc->base);
515 		}
516 		acrtc->pflip_status = AMDGPU_FLIP_NONE;
517 	}
518 
519 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
520 }
521 
522 static int dm_set_clockgating_state(void *handle,
523 		  enum amd_clockgating_state state)
524 {
525 	return 0;
526 }
527 
528 static int dm_set_powergating_state(void *handle,
529 		  enum amd_powergating_state state)
530 {
531 	return 0;
532 }
533 
534 /* Prototypes of private functions */
535 static int dm_early_init(void* handle);
536 
537 /* Allocate memory for FBC compressed data  */
538 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
539 {
540 	struct drm_device *dev = connector->dev;
541 	struct amdgpu_device *adev = dev->dev_private;
542 	struct dm_comressor_info *compressor = &adev->dm.compressor;
543 	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
544 	struct drm_display_mode *mode;
545 	unsigned long max_size = 0;
546 
547 	if (adev->dm.dc->fbc_compressor == NULL)
548 		return;
549 
550 	if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
551 		return;
552 
553 	if (compressor->bo_ptr)
554 		return;
555 
556 
557 	list_for_each_entry(mode, &connector->modes, head) {
558 		if (max_size < mode->htotal * mode->vtotal)
559 			max_size = mode->htotal * mode->vtotal;
560 	}
561 
562 	if (max_size) {
563 		int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
564 			    AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
565 			    &compressor->gpu_addr, &compressor->cpu_addr);
566 
567 		if (r)
568 			DRM_ERROR("DM: Failed to initialize FBC\n");
569 		else {
570 			adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
571 			DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
572 		}
573 
574 	}
575 
576 }
577 
578 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
579 					  int pipe, bool *enabled,
580 					  unsigned char *buf, int max_bytes)
581 {
582 	struct drm_device *dev = dev_get_drvdata(kdev);
583 	struct amdgpu_device *adev = dev->dev_private;
584 	struct drm_connector *connector;
585 	struct drm_connector_list_iter conn_iter;
586 	struct amdgpu_dm_connector *aconnector;
587 	int ret = 0;
588 
589 	*enabled = false;
590 
591 	mutex_lock(&adev->dm.audio_lock);
592 
593 	drm_connector_list_iter_begin(dev, &conn_iter);
594 	drm_for_each_connector_iter(connector, &conn_iter) {
595 		aconnector = to_amdgpu_dm_connector(connector);
596 		if (aconnector->audio_inst != port)
597 			continue;
598 
599 		*enabled = true;
600 		ret = drm_eld_size(connector->eld);
601 		memcpy(buf, connector->eld, min(max_bytes, ret));
602 
603 		break;
604 	}
605 	drm_connector_list_iter_end(&conn_iter);
606 
607 	mutex_unlock(&adev->dm.audio_lock);
608 
609 	DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
610 
611 	return ret;
612 }
613 
614 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
615 	.get_eld = amdgpu_dm_audio_component_get_eld,
616 };
617 
618 static int amdgpu_dm_audio_component_bind(struct device *kdev,
619 				       struct device *hda_kdev, void *data)
620 {
621 	struct drm_device *dev = dev_get_drvdata(kdev);
622 	struct amdgpu_device *adev = dev->dev_private;
623 	struct drm_audio_component *acomp = data;
624 
625 	acomp->ops = &amdgpu_dm_audio_component_ops;
626 	acomp->dev = kdev;
627 	adev->dm.audio_component = acomp;
628 
629 	return 0;
630 }
631 
632 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
633 					  struct device *hda_kdev, void *data)
634 {
635 	struct drm_device *dev = dev_get_drvdata(kdev);
636 	struct amdgpu_device *adev = dev->dev_private;
637 	struct drm_audio_component *acomp = data;
638 
639 	acomp->ops = NULL;
640 	acomp->dev = NULL;
641 	adev->dm.audio_component = NULL;
642 }
643 
644 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
645 	.bind	= amdgpu_dm_audio_component_bind,
646 	.unbind	= amdgpu_dm_audio_component_unbind,
647 };
648 
649 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
650 {
651 	int i, ret;
652 
653 	if (!amdgpu_audio)
654 		return 0;
655 
656 	adev->mode_info.audio.enabled = true;
657 
658 	adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
659 
660 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
661 		adev->mode_info.audio.pin[i].channels = -1;
662 		adev->mode_info.audio.pin[i].rate = -1;
663 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
664 		adev->mode_info.audio.pin[i].status_bits = 0;
665 		adev->mode_info.audio.pin[i].category_code = 0;
666 		adev->mode_info.audio.pin[i].connected = false;
667 		adev->mode_info.audio.pin[i].id =
668 			adev->dm.dc->res_pool->audios[i]->inst;
669 		adev->mode_info.audio.pin[i].offset = 0;
670 	}
671 
672 	ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
673 	if (ret < 0)
674 		return ret;
675 
676 	adev->dm.audio_registered = true;
677 
678 	return 0;
679 }
680 
681 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
682 {
683 	if (!amdgpu_audio)
684 		return;
685 
686 	if (!adev->mode_info.audio.enabled)
687 		return;
688 
689 	if (adev->dm.audio_registered) {
690 		component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
691 		adev->dm.audio_registered = false;
692 	}
693 
694 	/* TODO: Disable audio? */
695 
696 	adev->mode_info.audio.enabled = false;
697 }
698 
699 void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
700 {
701 	struct drm_audio_component *acomp = adev->dm.audio_component;
702 
703 	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
704 		DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
705 
706 		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
707 						 pin, -1);
708 	}
709 }
710 
711 static int dm_dmub_hw_init(struct amdgpu_device *adev)
712 {
713 	const struct dmcub_firmware_header_v1_0 *hdr;
714 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
715 	struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
716 	const struct firmware *dmub_fw = adev->dm.dmub_fw;
717 	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
718 	struct abm *abm = adev->dm.dc->res_pool->abm;
719 	struct dmub_srv_hw_params hw_params;
720 	enum dmub_status status;
721 	const unsigned char *fw_inst_const, *fw_bss_data;
722 	uint32_t i, fw_inst_const_size, fw_bss_data_size;
723 	bool has_hw_support;
724 
725 	if (!dmub_srv)
726 		/* DMUB isn't supported on the ASIC. */
727 		return 0;
728 
729 	if (!fb_info) {
730 		DRM_ERROR("No framebuffer info for DMUB service.\n");
731 		return -EINVAL;
732 	}
733 
734 	if (!dmub_fw) {
735 		/* Firmware required for DMUB support. */
736 		DRM_ERROR("No firmware provided for DMUB.\n");
737 		return -EINVAL;
738 	}
739 
740 	status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
741 	if (status != DMUB_STATUS_OK) {
742 		DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
743 		return -EINVAL;
744 	}
745 
746 	if (!has_hw_support) {
747 		DRM_INFO("DMUB unsupported on ASIC\n");
748 		return 0;
749 	}
750 
751 	hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
752 
753 	fw_inst_const = dmub_fw->data +
754 			le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
755 			PSP_HEADER_BYTES;
756 
757 	fw_bss_data = dmub_fw->data +
758 		      le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
759 		      le32_to_cpu(hdr->inst_const_bytes);
760 
761 	/* Copy firmware and bios info into FB memory. */
762 	fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
763 			     PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
764 
765 	fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
766 
767 	/* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
768 	 * amdgpu_ucode_init_single_fw will load dmub firmware
769 	 * fw_inst_const part to cw0; otherwise, the firmware back door load
770 	 * will be done by dm_dmub_hw_init
771 	 */
772 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
773 		memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
774 				fw_inst_const_size);
775 	}
776 
777 	if (fw_bss_data_size)
778 		memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
779 		       fw_bss_data, fw_bss_data_size);
780 
781 	/* Copy firmware bios info into FB memory. */
782 	memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
783 	       adev->bios_size);
784 
785 	/* Reset regions that need to be reset. */
786 	memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
787 	fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
788 
789 	memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
790 	       fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
791 
792 	memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
793 	       fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
794 
795 	/* Initialize hardware. */
796 	memset(&hw_params, 0, sizeof(hw_params));
797 	hw_params.fb_base = adev->gmc.fb_start;
798 	hw_params.fb_offset = adev->gmc.aper_base;
799 
800 	/* backdoor load firmware and trigger dmub running */
801 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
802 		hw_params.load_inst_const = true;
803 
804 	if (dmcu)
805 		hw_params.psp_version = dmcu->psp_version;
806 
807 	for (i = 0; i < fb_info->num_fb; ++i)
808 		hw_params.fb[i] = &fb_info->fb[i];
809 
810 	status = dmub_srv_hw_init(dmub_srv, &hw_params);
811 	if (status != DMUB_STATUS_OK) {
812 		DRM_ERROR("Error initializing DMUB HW: %d\n", status);
813 		return -EINVAL;
814 	}
815 
816 	/* Wait for firmware load to finish. */
817 	status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
818 	if (status != DMUB_STATUS_OK)
819 		DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
820 
821 	/* Init DMCU and ABM if available. */
822 	if (dmcu && abm) {
823 		dmcu->funcs->dmcu_init(dmcu);
824 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
825 	}
826 
827 	adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
828 	if (!adev->dm.dc->ctx->dmub_srv) {
829 		DRM_ERROR("Couldn't allocate DC DMUB server!\n");
830 		return -ENOMEM;
831 	}
832 
833 	DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
834 		 adev->dm.dmcub_fw_version);
835 
836 	return 0;
837 }
838 
839 static int amdgpu_dm_init(struct amdgpu_device *adev)
840 {
841 	struct dc_init_data init_data;
842 #ifdef CONFIG_DRM_AMD_DC_HDCP
843 	struct dc_callback_init init_params;
844 #endif
845 	int r;
846 
847 	adev->dm.ddev = adev->ddev;
848 	adev->dm.adev = adev;
849 
850 	/* Zero all the fields */
851 	memset(&init_data, 0, sizeof(init_data));
852 #ifdef CONFIG_DRM_AMD_DC_HDCP
853 	memset(&init_params, 0, sizeof(init_params));
854 #endif
855 
856 	mutex_init(&adev->dm.dc_lock);
857 	mutex_init(&adev->dm.audio_lock);
858 
859 	if(amdgpu_dm_irq_init(adev)) {
860 		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
861 		goto error;
862 	}
863 
864 	init_data.asic_id.chip_family = adev->family;
865 
866 	init_data.asic_id.pci_revision_id = adev->pdev->revision;
867 	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
868 
869 	init_data.asic_id.vram_width = adev->gmc.vram_width;
870 	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
871 	init_data.asic_id.atombios_base_address =
872 		adev->mode_info.atom_context->bios;
873 
874 	init_data.driver = adev;
875 
876 	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
877 
878 	if (!adev->dm.cgs_device) {
879 		DRM_ERROR("amdgpu: failed to create cgs device.\n");
880 		goto error;
881 	}
882 
883 	init_data.cgs_device = adev->dm.cgs_device;
884 
885 	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
886 
887 	switch (adev->asic_type) {
888 	case CHIP_CARRIZO:
889 	case CHIP_STONEY:
890 	case CHIP_RAVEN:
891 	case CHIP_RENOIR:
892 		init_data.flags.gpu_vm_support = true;
893 		break;
894 	default:
895 		break;
896 	}
897 
898 	if (amdgpu_dc_feature_mask & DC_FBC_MASK)
899 		init_data.flags.fbc_support = true;
900 
901 	if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
902 		init_data.flags.multi_mon_pp_mclk_switch = true;
903 
904 	if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
905 		init_data.flags.disable_fractional_pwm = true;
906 
907 	init_data.flags.power_down_display_on_boot = true;
908 
909 	init_data.soc_bounding_box = adev->dm.soc_bounding_box;
910 
911 	/* Display Core create. */
912 	adev->dm.dc = dc_create(&init_data);
913 
914 	if (adev->dm.dc) {
915 		DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
916 	} else {
917 		DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
918 		goto error;
919 	}
920 
921 	if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
922 		adev->dm.dc->debug.force_single_disp_pipe_split = false;
923 		adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
924 	}
925 
926 	if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
927 		adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
928 
929 	if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
930 		adev->dm.dc->debug.disable_stutter = true;
931 
932 	if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
933 		adev->dm.dc->debug.disable_dsc = true;
934 
935 	if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
936 		adev->dm.dc->debug.disable_clock_gate = true;
937 
938 	r = dm_dmub_hw_init(adev);
939 	if (r) {
940 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
941 		goto error;
942 	}
943 
944 	dc_hardware_init(adev->dm.dc);
945 
946 	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
947 	if (!adev->dm.freesync_module) {
948 		DRM_ERROR(
949 		"amdgpu: failed to initialize freesync_module.\n");
950 	} else
951 		DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
952 				adev->dm.freesync_module);
953 
954 	amdgpu_dm_init_color_mod();
955 
956 #ifdef CONFIG_DRM_AMD_DC_HDCP
957 	if (adev->asic_type >= CHIP_RAVEN) {
958 		adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
959 
960 		if (!adev->dm.hdcp_workqueue)
961 			DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
962 		else
963 			DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
964 
965 		dc_init_callbacks(adev->dm.dc, &init_params);
966 	}
967 #endif
968 	if (amdgpu_dm_initialize_drm_device(adev)) {
969 		DRM_ERROR(
970 		"amdgpu: failed to initialize sw for display support.\n");
971 		goto error;
972 	}
973 
974 	/* Update the actual used number of crtc */
975 	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
976 
977 	/* TODO: Add_display_info? */
978 
979 	/* TODO use dynamic cursor width */
980 	adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
981 	adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
982 
983 	if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
984 		DRM_ERROR(
985 		"amdgpu: failed to initialize sw for display support.\n");
986 		goto error;
987 	}
988 
989 	DRM_DEBUG_DRIVER("KMS initialized.\n");
990 
991 	return 0;
992 error:
993 	amdgpu_dm_fini(adev);
994 
995 	return -EINVAL;
996 }
997 
998 static void amdgpu_dm_fini(struct amdgpu_device *adev)
999 {
1000 	amdgpu_dm_audio_fini(adev);
1001 
1002 	amdgpu_dm_destroy_drm_device(&adev->dm);
1003 
1004 #ifdef CONFIG_DRM_AMD_DC_HDCP
1005 	if (adev->dm.hdcp_workqueue) {
1006 		hdcp_destroy(adev->dm.hdcp_workqueue);
1007 		adev->dm.hdcp_workqueue = NULL;
1008 	}
1009 
1010 	if (adev->dm.dc)
1011 		dc_deinit_callbacks(adev->dm.dc);
1012 #endif
1013 	if (adev->dm.dc->ctx->dmub_srv) {
1014 		dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1015 		adev->dm.dc->ctx->dmub_srv = NULL;
1016 	}
1017 
1018 	if (adev->dm.dmub_bo)
1019 		amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1020 				      &adev->dm.dmub_bo_gpu_addr,
1021 				      &adev->dm.dmub_bo_cpu_addr);
1022 
1023 	/* DC Destroy TODO: Replace destroy DAL */
1024 	if (adev->dm.dc)
1025 		dc_destroy(&adev->dm.dc);
1026 	/*
1027 	 * TODO: pageflip, vlank interrupt
1028 	 *
1029 	 * amdgpu_dm_irq_fini(adev);
1030 	 */
1031 
1032 	if (adev->dm.cgs_device) {
1033 		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1034 		adev->dm.cgs_device = NULL;
1035 	}
1036 	if (adev->dm.freesync_module) {
1037 		mod_freesync_destroy(adev->dm.freesync_module);
1038 		adev->dm.freesync_module = NULL;
1039 	}
1040 
1041 	mutex_destroy(&adev->dm.audio_lock);
1042 	mutex_destroy(&adev->dm.dc_lock);
1043 
1044 	return;
1045 }
1046 
1047 static int load_dmcu_fw(struct amdgpu_device *adev)
1048 {
1049 	const char *fw_name_dmcu = NULL;
1050 	int r;
1051 	const struct dmcu_firmware_header_v1_0 *hdr;
1052 
1053 	switch(adev->asic_type) {
1054 	case CHIP_BONAIRE:
1055 	case CHIP_HAWAII:
1056 	case CHIP_KAVERI:
1057 	case CHIP_KABINI:
1058 	case CHIP_MULLINS:
1059 	case CHIP_TONGA:
1060 	case CHIP_FIJI:
1061 	case CHIP_CARRIZO:
1062 	case CHIP_STONEY:
1063 	case CHIP_POLARIS11:
1064 	case CHIP_POLARIS10:
1065 	case CHIP_POLARIS12:
1066 	case CHIP_VEGAM:
1067 	case CHIP_VEGA10:
1068 	case CHIP_VEGA12:
1069 	case CHIP_VEGA20:
1070 	case CHIP_NAVI10:
1071 	case CHIP_NAVI14:
1072 	case CHIP_RENOIR:
1073 		return 0;
1074 	case CHIP_NAVI12:
1075 		fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1076 		break;
1077 	case CHIP_RAVEN:
1078 		if (ASICREV_IS_PICASSO(adev->external_rev_id))
1079 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1080 		else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1081 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1082 		else
1083 			return 0;
1084 		break;
1085 	default:
1086 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1087 		return -EINVAL;
1088 	}
1089 
1090 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1091 		DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1092 		return 0;
1093 	}
1094 
1095 	r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1096 	if (r == -ENOENT) {
1097 		/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1098 		DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1099 		adev->dm.fw_dmcu = NULL;
1100 		return 0;
1101 	}
1102 	if (r) {
1103 		dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1104 			fw_name_dmcu);
1105 		return r;
1106 	}
1107 
1108 	r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1109 	if (r) {
1110 		dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1111 			fw_name_dmcu);
1112 		release_firmware(adev->dm.fw_dmcu);
1113 		adev->dm.fw_dmcu = NULL;
1114 		return r;
1115 	}
1116 
1117 	hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1118 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1119 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1120 	adev->firmware.fw_size +=
1121 		ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1122 
1123 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1124 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1125 	adev->firmware.fw_size +=
1126 		ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1127 
1128 	adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1129 
1130 	DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1131 
1132 	return 0;
1133 }
1134 
1135 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1136 {
1137 	struct amdgpu_device *adev = ctx;
1138 
1139 	return dm_read_reg(adev->dm.dc->ctx, address);
1140 }
1141 
1142 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1143 				     uint32_t value)
1144 {
1145 	struct amdgpu_device *adev = ctx;
1146 
1147 	return dm_write_reg(adev->dm.dc->ctx, address, value);
1148 }
1149 
1150 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1151 {
1152 	struct dmub_srv_create_params create_params;
1153 	struct dmub_srv_region_params region_params;
1154 	struct dmub_srv_region_info region_info;
1155 	struct dmub_srv_fb_params fb_params;
1156 	struct dmub_srv_fb_info *fb_info;
1157 	struct dmub_srv *dmub_srv;
1158 	const struct dmcub_firmware_header_v1_0 *hdr;
1159 	const char *fw_name_dmub;
1160 	enum dmub_asic dmub_asic;
1161 	enum dmub_status status;
1162 	int r;
1163 
1164 	switch (adev->asic_type) {
1165 	case CHIP_RENOIR:
1166 		dmub_asic = DMUB_ASIC_DCN21;
1167 		fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1168 		break;
1169 
1170 	default:
1171 		/* ASIC doesn't support DMUB. */
1172 		return 0;
1173 	}
1174 
1175 	r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1176 	if (r) {
1177 		DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1178 		return 0;
1179 	}
1180 
1181 	r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1182 	if (r) {
1183 		DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1184 		return 0;
1185 	}
1186 
1187 	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1188 
1189 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1190 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1191 			AMDGPU_UCODE_ID_DMCUB;
1192 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1193 			adev->dm.dmub_fw;
1194 		adev->firmware.fw_size +=
1195 			ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1196 
1197 		DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1198 			 adev->dm.dmcub_fw_version);
1199 	}
1200 
1201 	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1202 
1203 	adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1204 	dmub_srv = adev->dm.dmub_srv;
1205 
1206 	if (!dmub_srv) {
1207 		DRM_ERROR("Failed to allocate DMUB service!\n");
1208 		return -ENOMEM;
1209 	}
1210 
1211 	memset(&create_params, 0, sizeof(create_params));
1212 	create_params.user_ctx = adev;
1213 	create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1214 	create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1215 	create_params.asic = dmub_asic;
1216 
1217 	/* Create the DMUB service. */
1218 	status = dmub_srv_create(dmub_srv, &create_params);
1219 	if (status != DMUB_STATUS_OK) {
1220 		DRM_ERROR("Error creating DMUB service: %d\n", status);
1221 		return -EINVAL;
1222 	}
1223 
1224 	/* Calculate the size of all the regions for the DMUB service. */
1225 	memset(&region_params, 0, sizeof(region_params));
1226 
1227 	region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1228 					PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1229 	region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1230 	region_params.vbios_size = adev->bios_size;
1231 	region_params.fw_bss_data =
1232 		adev->dm.dmub_fw->data +
1233 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1234 		le32_to_cpu(hdr->inst_const_bytes);
1235 	region_params.fw_inst_const =
1236 		adev->dm.dmub_fw->data +
1237 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1238 		PSP_HEADER_BYTES;
1239 
1240 	status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1241 					   &region_info);
1242 
1243 	if (status != DMUB_STATUS_OK) {
1244 		DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1245 		return -EINVAL;
1246 	}
1247 
1248 	/*
1249 	 * Allocate a framebuffer based on the total size of all the regions.
1250 	 * TODO: Move this into GART.
1251 	 */
1252 	r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1253 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1254 				    &adev->dm.dmub_bo_gpu_addr,
1255 				    &adev->dm.dmub_bo_cpu_addr);
1256 	if (r)
1257 		return r;
1258 
1259 	/* Rebase the regions on the framebuffer address. */
1260 	memset(&fb_params, 0, sizeof(fb_params));
1261 	fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1262 	fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1263 	fb_params.region_info = &region_info;
1264 
1265 	adev->dm.dmub_fb_info =
1266 		kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1267 	fb_info = adev->dm.dmub_fb_info;
1268 
1269 	if (!fb_info) {
1270 		DRM_ERROR(
1271 			"Failed to allocate framebuffer info for DMUB service!\n");
1272 		return -ENOMEM;
1273 	}
1274 
1275 	status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1276 	if (status != DMUB_STATUS_OK) {
1277 		DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1278 		return -EINVAL;
1279 	}
1280 
1281 	return 0;
1282 }
1283 
1284 static int dm_sw_init(void *handle)
1285 {
1286 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1287 	int r;
1288 
1289 	r = dm_dmub_sw_init(adev);
1290 	if (r)
1291 		return r;
1292 
1293 	return load_dmcu_fw(adev);
1294 }
1295 
1296 static int dm_sw_fini(void *handle)
1297 {
1298 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1299 
1300 	kfree(adev->dm.dmub_fb_info);
1301 	adev->dm.dmub_fb_info = NULL;
1302 
1303 	if (adev->dm.dmub_srv) {
1304 		dmub_srv_destroy(adev->dm.dmub_srv);
1305 		adev->dm.dmub_srv = NULL;
1306 	}
1307 
1308 	if (adev->dm.dmub_fw) {
1309 		release_firmware(adev->dm.dmub_fw);
1310 		adev->dm.dmub_fw = NULL;
1311 	}
1312 
1313 	if(adev->dm.fw_dmcu) {
1314 		release_firmware(adev->dm.fw_dmcu);
1315 		adev->dm.fw_dmcu = NULL;
1316 	}
1317 
1318 	return 0;
1319 }
1320 
1321 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1322 {
1323 	struct amdgpu_dm_connector *aconnector;
1324 	struct drm_connector *connector;
1325 	struct drm_connector_list_iter iter;
1326 	int ret = 0;
1327 
1328 	drm_connector_list_iter_begin(dev, &iter);
1329 	drm_for_each_connector_iter(connector, &iter) {
1330 		aconnector = to_amdgpu_dm_connector(connector);
1331 		if (aconnector->dc_link->type == dc_connection_mst_branch &&
1332 		    aconnector->mst_mgr.aux) {
1333 			DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1334 					 aconnector,
1335 					 aconnector->base.base.id);
1336 
1337 			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1338 			if (ret < 0) {
1339 				DRM_ERROR("DM_MST: Failed to start MST\n");
1340 				aconnector->dc_link->type =
1341 					dc_connection_single;
1342 				break;
1343 			}
1344 		}
1345 	}
1346 	drm_connector_list_iter_end(&iter);
1347 
1348 	return ret;
1349 }
1350 
1351 static int dm_late_init(void *handle)
1352 {
1353 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1354 
1355 	struct dmcu_iram_parameters params;
1356 	unsigned int linear_lut[16];
1357 	int i;
1358 	struct dmcu *dmcu = NULL;
1359 	bool ret = false;
1360 
1361 	if (!adev->dm.fw_dmcu)
1362 		return detect_mst_link_for_all_connectors(adev->ddev);
1363 
1364 	dmcu = adev->dm.dc->res_pool->dmcu;
1365 
1366 	for (i = 0; i < 16; i++)
1367 		linear_lut[i] = 0xFFFF * i / 15;
1368 
1369 	params.set = 0;
1370 	params.backlight_ramping_start = 0xCCCC;
1371 	params.backlight_ramping_reduction = 0xCCCCCCCC;
1372 	params.backlight_lut_array_size = 16;
1373 	params.backlight_lut_array = linear_lut;
1374 
1375 	/* Min backlight level after ABM reduction,  Don't allow below 1%
1376 	 * 0xFFFF x 0.01 = 0x28F
1377 	 */
1378 	params.min_abm_backlight = 0x28F;
1379 
1380 	/* todo will enable for navi10 */
1381 	if (adev->asic_type <= CHIP_RAVEN) {
1382 		ret = dmcu_load_iram(dmcu, params);
1383 
1384 		if (!ret)
1385 			return -EINVAL;
1386 	}
1387 
1388 	return detect_mst_link_for_all_connectors(adev->ddev);
1389 }
1390 
1391 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1392 {
1393 	struct amdgpu_dm_connector *aconnector;
1394 	struct drm_connector *connector;
1395 	struct drm_connector_list_iter iter;
1396 	struct drm_dp_mst_topology_mgr *mgr;
1397 	int ret;
1398 	bool need_hotplug = false;
1399 
1400 	drm_connector_list_iter_begin(dev, &iter);
1401 	drm_for_each_connector_iter(connector, &iter) {
1402 		aconnector = to_amdgpu_dm_connector(connector);
1403 		if (aconnector->dc_link->type != dc_connection_mst_branch ||
1404 		    aconnector->mst_port)
1405 			continue;
1406 
1407 		mgr = &aconnector->mst_mgr;
1408 
1409 		if (suspend) {
1410 			drm_dp_mst_topology_mgr_suspend(mgr);
1411 		} else {
1412 			ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1413 			if (ret < 0) {
1414 				drm_dp_mst_topology_mgr_set_mst(mgr, false);
1415 				need_hotplug = true;
1416 			}
1417 		}
1418 	}
1419 	drm_connector_list_iter_end(&iter);
1420 
1421 	if (need_hotplug)
1422 		drm_kms_helper_hotplug_event(dev);
1423 }
1424 
1425 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1426 {
1427 	struct smu_context *smu = &adev->smu;
1428 	int ret = 0;
1429 
1430 	if (!is_support_sw_smu(adev))
1431 		return 0;
1432 
1433 	/* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1434 	 * on window driver dc implementation.
1435 	 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1436 	 * should be passed to smu during boot up and resume from s3.
1437 	 * boot up: dc calculate dcn watermark clock settings within dc_create,
1438 	 * dcn20_resource_construct
1439 	 * then call pplib functions below to pass the settings to smu:
1440 	 * smu_set_watermarks_for_clock_ranges
1441 	 * smu_set_watermarks_table
1442 	 * navi10_set_watermarks_table
1443 	 * smu_write_watermarks_table
1444 	 *
1445 	 * For Renoir, clock settings of dcn watermark are also fixed values.
1446 	 * dc has implemented different flow for window driver:
1447 	 * dc_hardware_init / dc_set_power_state
1448 	 * dcn10_init_hw
1449 	 * notify_wm_ranges
1450 	 * set_wm_ranges
1451 	 * -- Linux
1452 	 * smu_set_watermarks_for_clock_ranges
1453 	 * renoir_set_watermarks_table
1454 	 * smu_write_watermarks_table
1455 	 *
1456 	 * For Linux,
1457 	 * dc_hardware_init -> amdgpu_dm_init
1458 	 * dc_set_power_state --> dm_resume
1459 	 *
1460 	 * therefore, this function apply to navi10/12/14 but not Renoir
1461 	 * *
1462 	 */
1463 	switch(adev->asic_type) {
1464 	case CHIP_NAVI10:
1465 	case CHIP_NAVI14:
1466 	case CHIP_NAVI12:
1467 		break;
1468 	default:
1469 		return 0;
1470 	}
1471 
1472 	mutex_lock(&smu->mutex);
1473 
1474 	/* pass data to smu controller */
1475 	if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
1476 			!(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
1477 		ret = smu_write_watermarks_table(smu);
1478 
1479 		if (ret) {
1480 			mutex_unlock(&smu->mutex);
1481 			DRM_ERROR("Failed to update WMTABLE!\n");
1482 			return ret;
1483 		}
1484 		smu->watermarks_bitmap |= WATERMARKS_LOADED;
1485 	}
1486 
1487 	mutex_unlock(&smu->mutex);
1488 
1489 	return 0;
1490 }
1491 
1492 /**
1493  * dm_hw_init() - Initialize DC device
1494  * @handle: The base driver device containing the amdgpu_dm device.
1495  *
1496  * Initialize the &struct amdgpu_display_manager device. This involves calling
1497  * the initializers of each DM component, then populating the struct with them.
1498  *
1499  * Although the function implies hardware initialization, both hardware and
1500  * software are initialized here. Splitting them out to their relevant init
1501  * hooks is a future TODO item.
1502  *
1503  * Some notable things that are initialized here:
1504  *
1505  * - Display Core, both software and hardware
1506  * - DC modules that we need (freesync and color management)
1507  * - DRM software states
1508  * - Interrupt sources and handlers
1509  * - Vblank support
1510  * - Debug FS entries, if enabled
1511  */
1512 static int dm_hw_init(void *handle)
1513 {
1514 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1515 	/* Create DAL display manager */
1516 	amdgpu_dm_init(adev);
1517 	amdgpu_dm_hpd_init(adev);
1518 
1519 	return 0;
1520 }
1521 
1522 /**
1523  * dm_hw_fini() - Teardown DC device
1524  * @handle: The base driver device containing the amdgpu_dm device.
1525  *
1526  * Teardown components within &struct amdgpu_display_manager that require
1527  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1528  * were loaded. Also flush IRQ workqueues and disable them.
1529  */
1530 static int dm_hw_fini(void *handle)
1531 {
1532 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1533 
1534 	amdgpu_dm_hpd_fini(adev);
1535 
1536 	amdgpu_dm_irq_fini(adev);
1537 	amdgpu_dm_fini(adev);
1538 	return 0;
1539 }
1540 
1541 
1542 static int dm_enable_vblank(struct drm_crtc *crtc);
1543 static void dm_disable_vblank(struct drm_crtc *crtc);
1544 
1545 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1546 				 struct dc_state *state, bool enable)
1547 {
1548 	enum dc_irq_source irq_source;
1549 	struct amdgpu_crtc *acrtc;
1550 	int rc = -EBUSY;
1551 	int i = 0;
1552 
1553 	for (i = 0; i < state->stream_count; i++) {
1554 		acrtc = get_crtc_by_otg_inst(
1555 				adev, state->stream_status[i].primary_otg_inst);
1556 
1557 		if (acrtc && state->stream_status[i].plane_count != 0) {
1558 			irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1559 			rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1560 			DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
1561 				  acrtc->crtc_id, enable ? "en" : "dis", rc);
1562 			if (rc)
1563 				DRM_WARN("Failed to %s pflip interrupts\n",
1564 					 enable ? "enable" : "disable");
1565 
1566 			if (enable) {
1567 				rc = dm_enable_vblank(&acrtc->base);
1568 				if (rc)
1569 					DRM_WARN("Failed to enable vblank interrupts\n");
1570 			} else {
1571 				dm_disable_vblank(&acrtc->base);
1572 			}
1573 
1574 		}
1575 	}
1576 
1577 }
1578 
1579 enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1580 {
1581 	struct dc_state *context = NULL;
1582 	enum dc_status res = DC_ERROR_UNEXPECTED;
1583 	int i;
1584 	struct dc_stream_state *del_streams[MAX_PIPES];
1585 	int del_streams_count = 0;
1586 
1587 	memset(del_streams, 0, sizeof(del_streams));
1588 
1589 	context = dc_create_state(dc);
1590 	if (context == NULL)
1591 		goto context_alloc_fail;
1592 
1593 	dc_resource_state_copy_construct_current(dc, context);
1594 
1595 	/* First remove from context all streams */
1596 	for (i = 0; i < context->stream_count; i++) {
1597 		struct dc_stream_state *stream = context->streams[i];
1598 
1599 		del_streams[del_streams_count++] = stream;
1600 	}
1601 
1602 	/* Remove all planes for removed streams and then remove the streams */
1603 	for (i = 0; i < del_streams_count; i++) {
1604 		if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1605 			res = DC_FAIL_DETACH_SURFACES;
1606 			goto fail;
1607 		}
1608 
1609 		res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1610 		if (res != DC_OK)
1611 			goto fail;
1612 	}
1613 
1614 
1615 	res = dc_validate_global_state(dc, context, false);
1616 
1617 	if (res != DC_OK) {
1618 		DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1619 		goto fail;
1620 	}
1621 
1622 	res = dc_commit_state(dc, context);
1623 
1624 fail:
1625 	dc_release_state(context);
1626 
1627 context_alloc_fail:
1628 	return res;
1629 }
1630 
1631 static int dm_suspend(void *handle)
1632 {
1633 	struct amdgpu_device *adev = handle;
1634 	struct amdgpu_display_manager *dm = &adev->dm;
1635 	int ret = 0;
1636 
1637 	if (adev->in_gpu_reset) {
1638 		mutex_lock(&dm->dc_lock);
1639 		dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1640 
1641 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1642 
1643 		amdgpu_dm_commit_zero_streams(dm->dc);
1644 
1645 		amdgpu_dm_irq_suspend(adev);
1646 
1647 		return ret;
1648 	}
1649 
1650 	WARN_ON(adev->dm.cached_state);
1651 	adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
1652 
1653 	s3_handle_mst(adev->ddev, true);
1654 
1655 	amdgpu_dm_irq_suspend(adev);
1656 
1657 
1658 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1659 
1660 	return 0;
1661 }
1662 
1663 static struct amdgpu_dm_connector *
1664 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1665 					     struct drm_crtc *crtc)
1666 {
1667 	uint32_t i;
1668 	struct drm_connector_state *new_con_state;
1669 	struct drm_connector *connector;
1670 	struct drm_crtc *crtc_from_state;
1671 
1672 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
1673 		crtc_from_state = new_con_state->crtc;
1674 
1675 		if (crtc_from_state == crtc)
1676 			return to_amdgpu_dm_connector(connector);
1677 	}
1678 
1679 	return NULL;
1680 }
1681 
1682 static void emulated_link_detect(struct dc_link *link)
1683 {
1684 	struct dc_sink_init_data sink_init_data = { 0 };
1685 	struct display_sink_capability sink_caps = { 0 };
1686 	enum dc_edid_status edid_status;
1687 	struct dc_context *dc_ctx = link->ctx;
1688 	struct dc_sink *sink = NULL;
1689 	struct dc_sink *prev_sink = NULL;
1690 
1691 	link->type = dc_connection_none;
1692 	prev_sink = link->local_sink;
1693 
1694 	if (prev_sink != NULL)
1695 		dc_sink_retain(prev_sink);
1696 
1697 	switch (link->connector_signal) {
1698 	case SIGNAL_TYPE_HDMI_TYPE_A: {
1699 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1700 		sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1701 		break;
1702 	}
1703 
1704 	case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1705 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1706 		sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1707 		break;
1708 	}
1709 
1710 	case SIGNAL_TYPE_DVI_DUAL_LINK: {
1711 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1712 		sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1713 		break;
1714 	}
1715 
1716 	case SIGNAL_TYPE_LVDS: {
1717 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1718 		sink_caps.signal = SIGNAL_TYPE_LVDS;
1719 		break;
1720 	}
1721 
1722 	case SIGNAL_TYPE_EDP: {
1723 		sink_caps.transaction_type =
1724 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1725 		sink_caps.signal = SIGNAL_TYPE_EDP;
1726 		break;
1727 	}
1728 
1729 	case SIGNAL_TYPE_DISPLAY_PORT: {
1730 		sink_caps.transaction_type =
1731 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1732 		sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1733 		break;
1734 	}
1735 
1736 	default:
1737 		DC_ERROR("Invalid connector type! signal:%d\n",
1738 			link->connector_signal);
1739 		return;
1740 	}
1741 
1742 	sink_init_data.link = link;
1743 	sink_init_data.sink_signal = sink_caps.signal;
1744 
1745 	sink = dc_sink_create(&sink_init_data);
1746 	if (!sink) {
1747 		DC_ERROR("Failed to create sink!\n");
1748 		return;
1749 	}
1750 
1751 	/* dc_sink_create returns a new reference */
1752 	link->local_sink = sink;
1753 
1754 	edid_status = dm_helpers_read_local_edid(
1755 			link->ctx,
1756 			link,
1757 			sink);
1758 
1759 	if (edid_status != EDID_OK)
1760 		DC_ERROR("Failed to read EDID");
1761 
1762 }
1763 
1764 static void dm_gpureset_commit_state(struct dc_state *dc_state,
1765 				     struct amdgpu_display_manager *dm)
1766 {
1767 	struct {
1768 		struct dc_surface_update surface_updates[MAX_SURFACES];
1769 		struct dc_plane_info plane_infos[MAX_SURFACES];
1770 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
1771 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
1772 		struct dc_stream_update stream_update;
1773 	} * bundle;
1774 	int k, m;
1775 
1776 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
1777 
1778 	if (!bundle) {
1779 		dm_error("Failed to allocate update bundle\n");
1780 		goto cleanup;
1781 	}
1782 
1783 	for (k = 0; k < dc_state->stream_count; k++) {
1784 		bundle->stream_update.stream = dc_state->streams[k];
1785 
1786 		for (m = 0; m < dc_state->stream_status->plane_count; m++) {
1787 			bundle->surface_updates[m].surface =
1788 				dc_state->stream_status->plane_states[m];
1789 			bundle->surface_updates[m].surface->force_full_update =
1790 				true;
1791 		}
1792 		dc_commit_updates_for_stream(
1793 			dm->dc, bundle->surface_updates,
1794 			dc_state->stream_status->plane_count,
1795 			dc_state->streams[k], &bundle->stream_update, dc_state);
1796 	}
1797 
1798 cleanup:
1799 	kfree(bundle);
1800 
1801 	return;
1802 }
1803 
1804 static int dm_resume(void *handle)
1805 {
1806 	struct amdgpu_device *adev = handle;
1807 	struct drm_device *ddev = adev->ddev;
1808 	struct amdgpu_display_manager *dm = &adev->dm;
1809 	struct amdgpu_dm_connector *aconnector;
1810 	struct drm_connector *connector;
1811 	struct drm_connector_list_iter iter;
1812 	struct drm_crtc *crtc;
1813 	struct drm_crtc_state *new_crtc_state;
1814 	struct dm_crtc_state *dm_new_crtc_state;
1815 	struct drm_plane *plane;
1816 	struct drm_plane_state *new_plane_state;
1817 	struct dm_plane_state *dm_new_plane_state;
1818 	struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
1819 	enum dc_connection_type new_connection_type = dc_connection_none;
1820 	struct dc_state *dc_state;
1821 	int i, r, j;
1822 
1823 	if (adev->in_gpu_reset) {
1824 		dc_state = dm->cached_dc_state;
1825 
1826 		r = dm_dmub_hw_init(adev);
1827 		if (r)
1828 			DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1829 
1830 		dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1831 		dc_resume(dm->dc);
1832 
1833 		amdgpu_dm_irq_resume_early(adev);
1834 
1835 		for (i = 0; i < dc_state->stream_count; i++) {
1836 			dc_state->streams[i]->mode_changed = true;
1837 			for (j = 0; j < dc_state->stream_status->plane_count; j++) {
1838 				dc_state->stream_status->plane_states[j]->update_flags.raw
1839 					= 0xffffffff;
1840 			}
1841 		}
1842 
1843 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
1844 
1845 		dm_gpureset_commit_state(dm->cached_dc_state, dm);
1846 
1847 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
1848 
1849 		dc_release_state(dm->cached_dc_state);
1850 		dm->cached_dc_state = NULL;
1851 
1852 		amdgpu_dm_irq_resume_late(adev);
1853 
1854 		mutex_unlock(&dm->dc_lock);
1855 
1856 		return 0;
1857 	}
1858 	/* Recreate dc_state - DC invalidates it when setting power state to S3. */
1859 	dc_release_state(dm_state->context);
1860 	dm_state->context = dc_create_state(dm->dc);
1861 	/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
1862 	dc_resource_state_construct(dm->dc, dm_state->context);
1863 
1864 	/* Before powering on DC we need to re-initialize DMUB. */
1865 	r = dm_dmub_hw_init(adev);
1866 	if (r)
1867 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1868 
1869 	/* power on hardware */
1870 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1871 
1872 	/* program HPD filter */
1873 	dc_resume(dm->dc);
1874 
1875 	/*
1876 	 * early enable HPD Rx IRQ, should be done before set mode as short
1877 	 * pulse interrupts are used for MST
1878 	 */
1879 	amdgpu_dm_irq_resume_early(adev);
1880 
1881 	/* On resume we need to rewrite the MSTM control bits to enable MST*/
1882 	s3_handle_mst(ddev, false);
1883 
1884 	/* Do detection*/
1885 	drm_connector_list_iter_begin(ddev, &iter);
1886 	drm_for_each_connector_iter(connector, &iter) {
1887 		aconnector = to_amdgpu_dm_connector(connector);
1888 
1889 		/*
1890 		 * this is the case when traversing through already created
1891 		 * MST connectors, should be skipped
1892 		 */
1893 		if (aconnector->mst_port)
1894 			continue;
1895 
1896 		mutex_lock(&aconnector->hpd_lock);
1897 		if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
1898 			DRM_ERROR("KMS: Failed to detect connector\n");
1899 
1900 		if (aconnector->base.force && new_connection_type == dc_connection_none)
1901 			emulated_link_detect(aconnector->dc_link);
1902 		else
1903 			dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
1904 
1905 		if (aconnector->fake_enable && aconnector->dc_link->local_sink)
1906 			aconnector->fake_enable = false;
1907 
1908 		if (aconnector->dc_sink)
1909 			dc_sink_release(aconnector->dc_sink);
1910 		aconnector->dc_sink = NULL;
1911 		amdgpu_dm_update_connector_after_detect(aconnector);
1912 		mutex_unlock(&aconnector->hpd_lock);
1913 	}
1914 	drm_connector_list_iter_end(&iter);
1915 
1916 	/* Force mode set in atomic commit */
1917 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
1918 		new_crtc_state->active_changed = true;
1919 
1920 	/*
1921 	 * atomic_check is expected to create the dc states. We need to release
1922 	 * them here, since they were duplicated as part of the suspend
1923 	 * procedure.
1924 	 */
1925 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
1926 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1927 		if (dm_new_crtc_state->stream) {
1928 			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
1929 			dc_stream_release(dm_new_crtc_state->stream);
1930 			dm_new_crtc_state->stream = NULL;
1931 		}
1932 	}
1933 
1934 	for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
1935 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
1936 		if (dm_new_plane_state->dc_state) {
1937 			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
1938 			dc_plane_state_release(dm_new_plane_state->dc_state);
1939 			dm_new_plane_state->dc_state = NULL;
1940 		}
1941 	}
1942 
1943 	drm_atomic_helper_resume(ddev, dm->cached_state);
1944 
1945 	dm->cached_state = NULL;
1946 
1947 	amdgpu_dm_irq_resume_late(adev);
1948 
1949 	amdgpu_dm_smu_write_watermarks_table(adev);
1950 
1951 	return 0;
1952 }
1953 
1954 /**
1955  * DOC: DM Lifecycle
1956  *
1957  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
1958  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
1959  * the base driver's device list to be initialized and torn down accordingly.
1960  *
1961  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
1962  */
1963 
1964 static const struct amd_ip_funcs amdgpu_dm_funcs = {
1965 	.name = "dm",
1966 	.early_init = dm_early_init,
1967 	.late_init = dm_late_init,
1968 	.sw_init = dm_sw_init,
1969 	.sw_fini = dm_sw_fini,
1970 	.hw_init = dm_hw_init,
1971 	.hw_fini = dm_hw_fini,
1972 	.suspend = dm_suspend,
1973 	.resume = dm_resume,
1974 	.is_idle = dm_is_idle,
1975 	.wait_for_idle = dm_wait_for_idle,
1976 	.check_soft_reset = dm_check_soft_reset,
1977 	.soft_reset = dm_soft_reset,
1978 	.set_clockgating_state = dm_set_clockgating_state,
1979 	.set_powergating_state = dm_set_powergating_state,
1980 };
1981 
1982 const struct amdgpu_ip_block_version dm_ip_block =
1983 {
1984 	.type = AMD_IP_BLOCK_TYPE_DCE,
1985 	.major = 1,
1986 	.minor = 0,
1987 	.rev = 0,
1988 	.funcs = &amdgpu_dm_funcs,
1989 };
1990 
1991 
1992 /**
1993  * DOC: atomic
1994  *
1995  * *WIP*
1996  */
1997 
1998 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
1999 	.fb_create = amdgpu_display_user_framebuffer_create,
2000 	.output_poll_changed = drm_fb_helper_output_poll_changed,
2001 	.atomic_check = amdgpu_dm_atomic_check,
2002 	.atomic_commit = amdgpu_dm_atomic_commit,
2003 };
2004 
2005 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2006 	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2007 };
2008 
2009 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2010 {
2011 	u32 max_cll, min_cll, max, min, q, r;
2012 	struct amdgpu_dm_backlight_caps *caps;
2013 	struct amdgpu_display_manager *dm;
2014 	struct drm_connector *conn_base;
2015 	struct amdgpu_device *adev;
2016 	static const u8 pre_computed_values[] = {
2017 		50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2018 		71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2019 
2020 	if (!aconnector || !aconnector->dc_link)
2021 		return;
2022 
2023 	conn_base = &aconnector->base;
2024 	adev = conn_base->dev->dev_private;
2025 	dm = &adev->dm;
2026 	caps = &dm->backlight_caps;
2027 	caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2028 	caps->aux_support = false;
2029 	max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2030 	min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2031 
2032 	if (caps->ext_caps->bits.oled == 1 ||
2033 	    caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2034 	    caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2035 		caps->aux_support = true;
2036 
2037 	/* From the specification (CTA-861-G), for calculating the maximum
2038 	 * luminance we need to use:
2039 	 *	Luminance = 50*2**(CV/32)
2040 	 * Where CV is a one-byte value.
2041 	 * For calculating this expression we may need float point precision;
2042 	 * to avoid this complexity level, we take advantage that CV is divided
2043 	 * by a constant. From the Euclids division algorithm, we know that CV
2044 	 * can be written as: CV = 32*q + r. Next, we replace CV in the
2045 	 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2046 	 * need to pre-compute the value of r/32. For pre-computing the values
2047 	 * We just used the following Ruby line:
2048 	 *	(0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2049 	 * The results of the above expressions can be verified at
2050 	 * pre_computed_values.
2051 	 */
2052 	q = max_cll >> 5;
2053 	r = max_cll % 32;
2054 	max = (1 << q) * pre_computed_values[r];
2055 
2056 	// min luminance: maxLum * (CV/255)^2 / 100
2057 	q = DIV_ROUND_CLOSEST(min_cll, 255);
2058 	min = max * DIV_ROUND_CLOSEST((q * q), 100);
2059 
2060 	caps->aux_max_input_signal = max;
2061 	caps->aux_min_input_signal = min;
2062 }
2063 
2064 void amdgpu_dm_update_connector_after_detect(
2065 		struct amdgpu_dm_connector *aconnector)
2066 {
2067 	struct drm_connector *connector = &aconnector->base;
2068 	struct drm_device *dev = connector->dev;
2069 	struct dc_sink *sink;
2070 
2071 	/* MST handled by drm_mst framework */
2072 	if (aconnector->mst_mgr.mst_state == true)
2073 		return;
2074 
2075 
2076 	sink = aconnector->dc_link->local_sink;
2077 	if (sink)
2078 		dc_sink_retain(sink);
2079 
2080 	/*
2081 	 * Edid mgmt connector gets first update only in mode_valid hook and then
2082 	 * the connector sink is set to either fake or physical sink depends on link status.
2083 	 * Skip if already done during boot.
2084 	 */
2085 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2086 			&& aconnector->dc_em_sink) {
2087 
2088 		/*
2089 		 * For S3 resume with headless use eml_sink to fake stream
2090 		 * because on resume connector->sink is set to NULL
2091 		 */
2092 		mutex_lock(&dev->mode_config.mutex);
2093 
2094 		if (sink) {
2095 			if (aconnector->dc_sink) {
2096 				amdgpu_dm_update_freesync_caps(connector, NULL);
2097 				/*
2098 				 * retain and release below are used to
2099 				 * bump up refcount for sink because the link doesn't point
2100 				 * to it anymore after disconnect, so on next crtc to connector
2101 				 * reshuffle by UMD we will get into unwanted dc_sink release
2102 				 */
2103 				dc_sink_release(aconnector->dc_sink);
2104 			}
2105 			aconnector->dc_sink = sink;
2106 			dc_sink_retain(aconnector->dc_sink);
2107 			amdgpu_dm_update_freesync_caps(connector,
2108 					aconnector->edid);
2109 		} else {
2110 			amdgpu_dm_update_freesync_caps(connector, NULL);
2111 			if (!aconnector->dc_sink) {
2112 				aconnector->dc_sink = aconnector->dc_em_sink;
2113 				dc_sink_retain(aconnector->dc_sink);
2114 			}
2115 		}
2116 
2117 		mutex_unlock(&dev->mode_config.mutex);
2118 
2119 		if (sink)
2120 			dc_sink_release(sink);
2121 		return;
2122 	}
2123 
2124 	/*
2125 	 * TODO: temporary guard to look for proper fix
2126 	 * if this sink is MST sink, we should not do anything
2127 	 */
2128 	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2129 		dc_sink_release(sink);
2130 		return;
2131 	}
2132 
2133 	if (aconnector->dc_sink == sink) {
2134 		/*
2135 		 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2136 		 * Do nothing!!
2137 		 */
2138 		DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2139 				aconnector->connector_id);
2140 		if (sink)
2141 			dc_sink_release(sink);
2142 		return;
2143 	}
2144 
2145 	DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2146 		aconnector->connector_id, aconnector->dc_sink, sink);
2147 
2148 	mutex_lock(&dev->mode_config.mutex);
2149 
2150 	/*
2151 	 * 1. Update status of the drm connector
2152 	 * 2. Send an event and let userspace tell us what to do
2153 	 */
2154 	if (sink) {
2155 		/*
2156 		 * TODO: check if we still need the S3 mode update workaround.
2157 		 * If yes, put it here.
2158 		 */
2159 		if (aconnector->dc_sink)
2160 			amdgpu_dm_update_freesync_caps(connector, NULL);
2161 
2162 		aconnector->dc_sink = sink;
2163 		dc_sink_retain(aconnector->dc_sink);
2164 		if (sink->dc_edid.length == 0) {
2165 			aconnector->edid = NULL;
2166 			if (aconnector->dc_link->aux_mode) {
2167 				drm_dp_cec_unset_edid(
2168 					&aconnector->dm_dp_aux.aux);
2169 			}
2170 		} else {
2171 			aconnector->edid =
2172 				(struct edid *)sink->dc_edid.raw_edid;
2173 
2174 			drm_connector_update_edid_property(connector,
2175 							   aconnector->edid);
2176 
2177 			if (aconnector->dc_link->aux_mode)
2178 				drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2179 						    aconnector->edid);
2180 		}
2181 
2182 		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2183 		update_connector_ext_caps(aconnector);
2184 	} else {
2185 		drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2186 		amdgpu_dm_update_freesync_caps(connector, NULL);
2187 		drm_connector_update_edid_property(connector, NULL);
2188 		aconnector->num_modes = 0;
2189 		dc_sink_release(aconnector->dc_sink);
2190 		aconnector->dc_sink = NULL;
2191 		aconnector->edid = NULL;
2192 #ifdef CONFIG_DRM_AMD_DC_HDCP
2193 		/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2194 		if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2195 			connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2196 #endif
2197 	}
2198 
2199 	mutex_unlock(&dev->mode_config.mutex);
2200 
2201 	if (sink)
2202 		dc_sink_release(sink);
2203 }
2204 
2205 static void handle_hpd_irq(void *param)
2206 {
2207 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2208 	struct drm_connector *connector = &aconnector->base;
2209 	struct drm_device *dev = connector->dev;
2210 	enum dc_connection_type new_connection_type = dc_connection_none;
2211 #ifdef CONFIG_DRM_AMD_DC_HDCP
2212 	struct amdgpu_device *adev = dev->dev_private;
2213 #endif
2214 
2215 	/*
2216 	 * In case of failure or MST no need to update connector status or notify the OS
2217 	 * since (for MST case) MST does this in its own context.
2218 	 */
2219 	mutex_lock(&aconnector->hpd_lock);
2220 
2221 #ifdef CONFIG_DRM_AMD_DC_HDCP
2222 	if (adev->dm.hdcp_workqueue)
2223 		hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2224 #endif
2225 	if (aconnector->fake_enable)
2226 		aconnector->fake_enable = false;
2227 
2228 	if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2229 		DRM_ERROR("KMS: Failed to detect connector\n");
2230 
2231 	if (aconnector->base.force && new_connection_type == dc_connection_none) {
2232 		emulated_link_detect(aconnector->dc_link);
2233 
2234 
2235 		drm_modeset_lock_all(dev);
2236 		dm_restore_drm_connector_state(dev, connector);
2237 		drm_modeset_unlock_all(dev);
2238 
2239 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2240 			drm_kms_helper_hotplug_event(dev);
2241 
2242 	} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2243 		amdgpu_dm_update_connector_after_detect(aconnector);
2244 
2245 
2246 		drm_modeset_lock_all(dev);
2247 		dm_restore_drm_connector_state(dev, connector);
2248 		drm_modeset_unlock_all(dev);
2249 
2250 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2251 			drm_kms_helper_hotplug_event(dev);
2252 	}
2253 	mutex_unlock(&aconnector->hpd_lock);
2254 
2255 }
2256 
2257 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2258 {
2259 	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2260 	uint8_t dret;
2261 	bool new_irq_handled = false;
2262 	int dpcd_addr;
2263 	int dpcd_bytes_to_read;
2264 
2265 	const int max_process_count = 30;
2266 	int process_count = 0;
2267 
2268 	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2269 
2270 	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2271 		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2272 		/* DPCD 0x200 - 0x201 for downstream IRQ */
2273 		dpcd_addr = DP_SINK_COUNT;
2274 	} else {
2275 		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2276 		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
2277 		dpcd_addr = DP_SINK_COUNT_ESI;
2278 	}
2279 
2280 	dret = drm_dp_dpcd_read(
2281 		&aconnector->dm_dp_aux.aux,
2282 		dpcd_addr,
2283 		esi,
2284 		dpcd_bytes_to_read);
2285 
2286 	while (dret == dpcd_bytes_to_read &&
2287 		process_count < max_process_count) {
2288 		uint8_t retry;
2289 		dret = 0;
2290 
2291 		process_count++;
2292 
2293 		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2294 		/* handle HPD short pulse irq */
2295 		if (aconnector->mst_mgr.mst_state)
2296 			drm_dp_mst_hpd_irq(
2297 				&aconnector->mst_mgr,
2298 				esi,
2299 				&new_irq_handled);
2300 
2301 		if (new_irq_handled) {
2302 			/* ACK at DPCD to notify down stream */
2303 			const int ack_dpcd_bytes_to_write =
2304 				dpcd_bytes_to_read - 1;
2305 
2306 			for (retry = 0; retry < 3; retry++) {
2307 				uint8_t wret;
2308 
2309 				wret = drm_dp_dpcd_write(
2310 					&aconnector->dm_dp_aux.aux,
2311 					dpcd_addr + 1,
2312 					&esi[1],
2313 					ack_dpcd_bytes_to_write);
2314 				if (wret == ack_dpcd_bytes_to_write)
2315 					break;
2316 			}
2317 
2318 			/* check if there is new irq to be handled */
2319 			dret = drm_dp_dpcd_read(
2320 				&aconnector->dm_dp_aux.aux,
2321 				dpcd_addr,
2322 				esi,
2323 				dpcd_bytes_to_read);
2324 
2325 			new_irq_handled = false;
2326 		} else {
2327 			break;
2328 		}
2329 	}
2330 
2331 	if (process_count == max_process_count)
2332 		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2333 }
2334 
2335 static void handle_hpd_rx_irq(void *param)
2336 {
2337 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2338 	struct drm_connector *connector = &aconnector->base;
2339 	struct drm_device *dev = connector->dev;
2340 	struct dc_link *dc_link = aconnector->dc_link;
2341 	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2342 	enum dc_connection_type new_connection_type = dc_connection_none;
2343 #ifdef CONFIG_DRM_AMD_DC_HDCP
2344 	union hpd_irq_data hpd_irq_data;
2345 	struct amdgpu_device *adev = dev->dev_private;
2346 
2347 	memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2348 #endif
2349 
2350 	/*
2351 	 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2352 	 * conflict, after implement i2c helper, this mutex should be
2353 	 * retired.
2354 	 */
2355 	if (dc_link->type != dc_connection_mst_branch)
2356 		mutex_lock(&aconnector->hpd_lock);
2357 
2358 
2359 #ifdef CONFIG_DRM_AMD_DC_HDCP
2360 	if (dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL) &&
2361 #else
2362 	if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
2363 #endif
2364 			!is_mst_root_connector) {
2365 		/* Downstream Port status changed. */
2366 		if (!dc_link_detect_sink(dc_link, &new_connection_type))
2367 			DRM_ERROR("KMS: Failed to detect connector\n");
2368 
2369 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
2370 			emulated_link_detect(dc_link);
2371 
2372 			if (aconnector->fake_enable)
2373 				aconnector->fake_enable = false;
2374 
2375 			amdgpu_dm_update_connector_after_detect(aconnector);
2376 
2377 
2378 			drm_modeset_lock_all(dev);
2379 			dm_restore_drm_connector_state(dev, connector);
2380 			drm_modeset_unlock_all(dev);
2381 
2382 			drm_kms_helper_hotplug_event(dev);
2383 		} else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2384 
2385 			if (aconnector->fake_enable)
2386 				aconnector->fake_enable = false;
2387 
2388 			amdgpu_dm_update_connector_after_detect(aconnector);
2389 
2390 
2391 			drm_modeset_lock_all(dev);
2392 			dm_restore_drm_connector_state(dev, connector);
2393 			drm_modeset_unlock_all(dev);
2394 
2395 			drm_kms_helper_hotplug_event(dev);
2396 		}
2397 	}
2398 #ifdef CONFIG_DRM_AMD_DC_HDCP
2399 	if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2400 		if (adev->dm.hdcp_workqueue)
2401 			hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
2402 	}
2403 #endif
2404 	if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2405 	    (dc_link->type == dc_connection_mst_branch))
2406 		dm_handle_hpd_rx_irq(aconnector);
2407 
2408 	if (dc_link->type != dc_connection_mst_branch) {
2409 		drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2410 		mutex_unlock(&aconnector->hpd_lock);
2411 	}
2412 }
2413 
2414 static void register_hpd_handlers(struct amdgpu_device *adev)
2415 {
2416 	struct drm_device *dev = adev->ddev;
2417 	struct drm_connector *connector;
2418 	struct amdgpu_dm_connector *aconnector;
2419 	const struct dc_link *dc_link;
2420 	struct dc_interrupt_params int_params = {0};
2421 
2422 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2423 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2424 
2425 	list_for_each_entry(connector,
2426 			&dev->mode_config.connector_list, head)	{
2427 
2428 		aconnector = to_amdgpu_dm_connector(connector);
2429 		dc_link = aconnector->dc_link;
2430 
2431 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2432 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2433 			int_params.irq_source = dc_link->irq_source_hpd;
2434 
2435 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2436 					handle_hpd_irq,
2437 					(void *) aconnector);
2438 		}
2439 
2440 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2441 
2442 			/* Also register for DP short pulse (hpd_rx). */
2443 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2444 			int_params.irq_source =	dc_link->irq_source_hpd_rx;
2445 
2446 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2447 					handle_hpd_rx_irq,
2448 					(void *) aconnector);
2449 		}
2450 	}
2451 }
2452 
2453 /* Register IRQ sources and initialize IRQ callbacks */
2454 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2455 {
2456 	struct dc *dc = adev->dm.dc;
2457 	struct common_irq_params *c_irq_params;
2458 	struct dc_interrupt_params int_params = {0};
2459 	int r;
2460 	int i;
2461 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2462 
2463 	if (adev->asic_type >= CHIP_VEGA10)
2464 		client_id = SOC15_IH_CLIENTID_DCE;
2465 
2466 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2467 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2468 
2469 	/*
2470 	 * Actions of amdgpu_irq_add_id():
2471 	 * 1. Register a set() function with base driver.
2472 	 *    Base driver will call set() function to enable/disable an
2473 	 *    interrupt in DC hardware.
2474 	 * 2. Register amdgpu_dm_irq_handler().
2475 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2476 	 *    coming from DC hardware.
2477 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2478 	 *    for acknowledging and handling. */
2479 
2480 	/* Use VBLANK interrupt */
2481 	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2482 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2483 		if (r) {
2484 			DRM_ERROR("Failed to add crtc irq id!\n");
2485 			return r;
2486 		}
2487 
2488 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2489 		int_params.irq_source =
2490 			dc_interrupt_to_irq_source(dc, i, 0);
2491 
2492 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2493 
2494 		c_irq_params->adev = adev;
2495 		c_irq_params->irq_src = int_params.irq_source;
2496 
2497 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2498 				dm_crtc_high_irq, c_irq_params);
2499 	}
2500 
2501 	/* Use VUPDATE interrupt */
2502 	for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2503 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2504 		if (r) {
2505 			DRM_ERROR("Failed to add vupdate irq id!\n");
2506 			return r;
2507 		}
2508 
2509 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2510 		int_params.irq_source =
2511 			dc_interrupt_to_irq_source(dc, i, 0);
2512 
2513 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2514 
2515 		c_irq_params->adev = adev;
2516 		c_irq_params->irq_src = int_params.irq_source;
2517 
2518 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2519 				dm_vupdate_high_irq, c_irq_params);
2520 	}
2521 
2522 	/* Use GRPH_PFLIP interrupt */
2523 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2524 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2525 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2526 		if (r) {
2527 			DRM_ERROR("Failed to add page flip irq id!\n");
2528 			return r;
2529 		}
2530 
2531 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2532 		int_params.irq_source =
2533 			dc_interrupt_to_irq_source(dc, i, 0);
2534 
2535 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2536 
2537 		c_irq_params->adev = adev;
2538 		c_irq_params->irq_src = int_params.irq_source;
2539 
2540 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2541 				dm_pflip_high_irq, c_irq_params);
2542 
2543 	}
2544 
2545 	/* HPD */
2546 	r = amdgpu_irq_add_id(adev, client_id,
2547 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2548 	if (r) {
2549 		DRM_ERROR("Failed to add hpd irq id!\n");
2550 		return r;
2551 	}
2552 
2553 	register_hpd_handlers(adev);
2554 
2555 	return 0;
2556 }
2557 
2558 #if defined(CONFIG_DRM_AMD_DC_DCN)
2559 /* Register IRQ sources and initialize IRQ callbacks */
2560 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2561 {
2562 	struct dc *dc = adev->dm.dc;
2563 	struct common_irq_params *c_irq_params;
2564 	struct dc_interrupt_params int_params = {0};
2565 	int r;
2566 	int i;
2567 
2568 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2569 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2570 
2571 	/*
2572 	 * Actions of amdgpu_irq_add_id():
2573 	 * 1. Register a set() function with base driver.
2574 	 *    Base driver will call set() function to enable/disable an
2575 	 *    interrupt in DC hardware.
2576 	 * 2. Register amdgpu_dm_irq_handler().
2577 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2578 	 *    coming from DC hardware.
2579 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2580 	 *    for acknowledging and handling.
2581 	 */
2582 
2583 	/* Use VSTARTUP interrupt */
2584 	for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2585 			i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2586 			i++) {
2587 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
2588 
2589 		if (r) {
2590 			DRM_ERROR("Failed to add crtc irq id!\n");
2591 			return r;
2592 		}
2593 
2594 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2595 		int_params.irq_source =
2596 			dc_interrupt_to_irq_source(dc, i, 0);
2597 
2598 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2599 
2600 		c_irq_params->adev = adev;
2601 		c_irq_params->irq_src = int_params.irq_source;
2602 
2603 		amdgpu_dm_irq_register_interrupt(
2604 			adev, &int_params, dm_crtc_high_irq, c_irq_params);
2605 	}
2606 
2607 	/* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
2608 	 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
2609 	 * to trigger at end of each vblank, regardless of state of the lock,
2610 	 * matching DCE behaviour.
2611 	 */
2612 	for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
2613 	     i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
2614 	     i++) {
2615 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
2616 
2617 		if (r) {
2618 			DRM_ERROR("Failed to add vupdate irq id!\n");
2619 			return r;
2620 		}
2621 
2622 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2623 		int_params.irq_source =
2624 			dc_interrupt_to_irq_source(dc, i, 0);
2625 
2626 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2627 
2628 		c_irq_params->adev = adev;
2629 		c_irq_params->irq_src = int_params.irq_source;
2630 
2631 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2632 				dm_vupdate_high_irq, c_irq_params);
2633 	}
2634 
2635 	/* Use GRPH_PFLIP interrupt */
2636 	for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2637 			i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2638 			i++) {
2639 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
2640 		if (r) {
2641 			DRM_ERROR("Failed to add page flip irq id!\n");
2642 			return r;
2643 		}
2644 
2645 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2646 		int_params.irq_source =
2647 			dc_interrupt_to_irq_source(dc, i, 0);
2648 
2649 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2650 
2651 		c_irq_params->adev = adev;
2652 		c_irq_params->irq_src = int_params.irq_source;
2653 
2654 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2655 				dm_pflip_high_irq, c_irq_params);
2656 
2657 	}
2658 
2659 	/* HPD */
2660 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
2661 			&adev->hpd_irq);
2662 	if (r) {
2663 		DRM_ERROR("Failed to add hpd irq id!\n");
2664 		return r;
2665 	}
2666 
2667 	register_hpd_handlers(adev);
2668 
2669 	return 0;
2670 }
2671 #endif
2672 
2673 /*
2674  * Acquires the lock for the atomic state object and returns
2675  * the new atomic state.
2676  *
2677  * This should only be called during atomic check.
2678  */
2679 static int dm_atomic_get_state(struct drm_atomic_state *state,
2680 			       struct dm_atomic_state **dm_state)
2681 {
2682 	struct drm_device *dev = state->dev;
2683 	struct amdgpu_device *adev = dev->dev_private;
2684 	struct amdgpu_display_manager *dm = &adev->dm;
2685 	struct drm_private_state *priv_state;
2686 
2687 	if (*dm_state)
2688 		return 0;
2689 
2690 	priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
2691 	if (IS_ERR(priv_state))
2692 		return PTR_ERR(priv_state);
2693 
2694 	*dm_state = to_dm_atomic_state(priv_state);
2695 
2696 	return 0;
2697 }
2698 
2699 struct dm_atomic_state *
2700 dm_atomic_get_new_state(struct drm_atomic_state *state)
2701 {
2702 	struct drm_device *dev = state->dev;
2703 	struct amdgpu_device *adev = dev->dev_private;
2704 	struct amdgpu_display_manager *dm = &adev->dm;
2705 	struct drm_private_obj *obj;
2706 	struct drm_private_state *new_obj_state;
2707 	int i;
2708 
2709 	for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
2710 		if (obj->funcs == dm->atomic_obj.funcs)
2711 			return to_dm_atomic_state(new_obj_state);
2712 	}
2713 
2714 	return NULL;
2715 }
2716 
2717 struct dm_atomic_state *
2718 dm_atomic_get_old_state(struct drm_atomic_state *state)
2719 {
2720 	struct drm_device *dev = state->dev;
2721 	struct amdgpu_device *adev = dev->dev_private;
2722 	struct amdgpu_display_manager *dm = &adev->dm;
2723 	struct drm_private_obj *obj;
2724 	struct drm_private_state *old_obj_state;
2725 	int i;
2726 
2727 	for_each_old_private_obj_in_state(state, obj, old_obj_state, i) {
2728 		if (obj->funcs == dm->atomic_obj.funcs)
2729 			return to_dm_atomic_state(old_obj_state);
2730 	}
2731 
2732 	return NULL;
2733 }
2734 
2735 static struct drm_private_state *
2736 dm_atomic_duplicate_state(struct drm_private_obj *obj)
2737 {
2738 	struct dm_atomic_state *old_state, *new_state;
2739 
2740 	new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
2741 	if (!new_state)
2742 		return NULL;
2743 
2744 	__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
2745 
2746 	old_state = to_dm_atomic_state(obj->state);
2747 
2748 	if (old_state && old_state->context)
2749 		new_state->context = dc_copy_state(old_state->context);
2750 
2751 	if (!new_state->context) {
2752 		kfree(new_state);
2753 		return NULL;
2754 	}
2755 
2756 	return &new_state->base;
2757 }
2758 
2759 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
2760 				    struct drm_private_state *state)
2761 {
2762 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
2763 
2764 	if (dm_state && dm_state->context)
2765 		dc_release_state(dm_state->context);
2766 
2767 	kfree(dm_state);
2768 }
2769 
2770 static struct drm_private_state_funcs dm_atomic_state_funcs = {
2771 	.atomic_duplicate_state = dm_atomic_duplicate_state,
2772 	.atomic_destroy_state = dm_atomic_destroy_state,
2773 };
2774 
2775 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
2776 {
2777 	struct dm_atomic_state *state;
2778 	int r;
2779 
2780 	adev->mode_info.mode_config_initialized = true;
2781 
2782 	adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
2783 	adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
2784 
2785 	adev->ddev->mode_config.max_width = 16384;
2786 	adev->ddev->mode_config.max_height = 16384;
2787 
2788 	adev->ddev->mode_config.preferred_depth = 24;
2789 	adev->ddev->mode_config.prefer_shadow = 1;
2790 	/* indicates support for immediate flip */
2791 	adev->ddev->mode_config.async_page_flip = true;
2792 
2793 	adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
2794 
2795 	state = kzalloc(sizeof(*state), GFP_KERNEL);
2796 	if (!state)
2797 		return -ENOMEM;
2798 
2799 	state->context = dc_create_state(adev->dm.dc);
2800 	if (!state->context) {
2801 		kfree(state);
2802 		return -ENOMEM;
2803 	}
2804 
2805 	dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
2806 
2807 	drm_atomic_private_obj_init(adev->ddev,
2808 				    &adev->dm.atomic_obj,
2809 				    &state->base,
2810 				    &dm_atomic_state_funcs);
2811 
2812 	r = amdgpu_display_modeset_create_props(adev);
2813 	if (r)
2814 		return r;
2815 
2816 	r = amdgpu_dm_audio_init(adev);
2817 	if (r)
2818 		return r;
2819 
2820 	return 0;
2821 }
2822 
2823 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
2824 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
2825 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
2826 
2827 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2828 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2829 
2830 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
2831 {
2832 #if defined(CONFIG_ACPI)
2833 	struct amdgpu_dm_backlight_caps caps;
2834 
2835 	if (dm->backlight_caps.caps_valid)
2836 		return;
2837 
2838 	amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
2839 	if (caps.caps_valid) {
2840 		dm->backlight_caps.caps_valid = true;
2841 		if (caps.aux_support)
2842 			return;
2843 		dm->backlight_caps.min_input_signal = caps.min_input_signal;
2844 		dm->backlight_caps.max_input_signal = caps.max_input_signal;
2845 	} else {
2846 		dm->backlight_caps.min_input_signal =
2847 				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2848 		dm->backlight_caps.max_input_signal =
2849 				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2850 	}
2851 #else
2852 	if (dm->backlight_caps.aux_support)
2853 		return;
2854 
2855 	dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2856 	dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2857 #endif
2858 }
2859 
2860 static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
2861 {
2862 	bool rc;
2863 
2864 	if (!link)
2865 		return 1;
2866 
2867 	rc = dc_link_set_backlight_level_nits(link, true, brightness,
2868 					      AUX_BL_DEFAULT_TRANSITION_TIME_MS);
2869 
2870 	return rc ? 0 : 1;
2871 }
2872 
2873 static u32 convert_brightness(const struct amdgpu_dm_backlight_caps *caps,
2874 			      const uint32_t user_brightness)
2875 {
2876 	u32 min, max, conversion_pace;
2877 	u32 brightness = user_brightness;
2878 
2879 	if (!caps)
2880 		goto out;
2881 
2882 	if (!caps->aux_support) {
2883 		max = caps->max_input_signal;
2884 		min = caps->min_input_signal;
2885 		/*
2886 		 * The brightness input is in the range 0-255
2887 		 * It needs to be rescaled to be between the
2888 		 * requested min and max input signal
2889 		 * It also needs to be scaled up by 0x101 to
2890 		 * match the DC interface which has a range of
2891 		 * 0 to 0xffff
2892 		 */
2893 		conversion_pace = 0x101;
2894 		brightness =
2895 			user_brightness
2896 			* conversion_pace
2897 			* (max - min)
2898 			/ AMDGPU_MAX_BL_LEVEL
2899 			+ min * conversion_pace;
2900 	} else {
2901 		/* TODO
2902 		 * We are doing a linear interpolation here, which is OK but
2903 		 * does not provide the optimal result. We probably want
2904 		 * something close to the Perceptual Quantizer (PQ) curve.
2905 		 */
2906 		max = caps->aux_max_input_signal;
2907 		min = caps->aux_min_input_signal;
2908 
2909 		brightness = (AMDGPU_MAX_BL_LEVEL - user_brightness) * min
2910 			       + user_brightness * max;
2911 		// Multiple the value by 1000 since we use millinits
2912 		brightness *= 1000;
2913 		brightness = DIV_ROUND_CLOSEST(brightness, AMDGPU_MAX_BL_LEVEL);
2914 	}
2915 
2916 out:
2917 	return brightness;
2918 }
2919 
2920 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
2921 {
2922 	struct amdgpu_display_manager *dm = bl_get_data(bd);
2923 	struct amdgpu_dm_backlight_caps caps;
2924 	struct dc_link *link = NULL;
2925 	u32 brightness;
2926 	bool rc;
2927 
2928 	amdgpu_dm_update_backlight_caps(dm);
2929 	caps = dm->backlight_caps;
2930 
2931 	link = (struct dc_link *)dm->backlight_link;
2932 
2933 	brightness = convert_brightness(&caps, bd->props.brightness);
2934 	// Change brightness based on AUX property
2935 	if (caps.aux_support)
2936 		return set_backlight_via_aux(link, brightness);
2937 
2938 	rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
2939 
2940 	return rc ? 0 : 1;
2941 }
2942 
2943 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
2944 {
2945 	struct amdgpu_display_manager *dm = bl_get_data(bd);
2946 	int ret = dc_link_get_backlight_level(dm->backlight_link);
2947 
2948 	if (ret == DC_ERROR_UNEXPECTED)
2949 		return bd->props.brightness;
2950 	return ret;
2951 }
2952 
2953 static const struct backlight_ops amdgpu_dm_backlight_ops = {
2954 	.options = BL_CORE_SUSPENDRESUME,
2955 	.get_brightness = amdgpu_dm_backlight_get_brightness,
2956 	.update_status	= amdgpu_dm_backlight_update_status,
2957 };
2958 
2959 static void
2960 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
2961 {
2962 	char bl_name[16];
2963 	struct backlight_properties props = { 0 };
2964 
2965 	amdgpu_dm_update_backlight_caps(dm);
2966 
2967 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
2968 	props.brightness = AMDGPU_MAX_BL_LEVEL;
2969 	props.type = BACKLIGHT_RAW;
2970 
2971 	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
2972 			dm->adev->ddev->primary->index);
2973 
2974 	dm->backlight_dev = backlight_device_register(bl_name,
2975 			dm->adev->ddev->dev,
2976 			dm,
2977 			&amdgpu_dm_backlight_ops,
2978 			&props);
2979 
2980 	if (IS_ERR(dm->backlight_dev))
2981 		DRM_ERROR("DM: Backlight registration failed!\n");
2982 	else
2983 		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
2984 }
2985 
2986 #endif
2987 
2988 static int initialize_plane(struct amdgpu_display_manager *dm,
2989 			    struct amdgpu_mode_info *mode_info, int plane_id,
2990 			    enum drm_plane_type plane_type,
2991 			    const struct dc_plane_cap *plane_cap)
2992 {
2993 	struct drm_plane *plane;
2994 	unsigned long possible_crtcs;
2995 	int ret = 0;
2996 
2997 	plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
2998 	if (!plane) {
2999 		DRM_ERROR("KMS: Failed to allocate plane\n");
3000 		return -ENOMEM;
3001 	}
3002 	plane->type = plane_type;
3003 
3004 	/*
3005 	 * HACK: IGT tests expect that the primary plane for a CRTC
3006 	 * can only have one possible CRTC. Only expose support for
3007 	 * any CRTC if they're not going to be used as a primary plane
3008 	 * for a CRTC - like overlay or underlay planes.
3009 	 */
3010 	possible_crtcs = 1 << plane_id;
3011 	if (plane_id >= dm->dc->caps.max_streams)
3012 		possible_crtcs = 0xff;
3013 
3014 	ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3015 
3016 	if (ret) {
3017 		DRM_ERROR("KMS: Failed to initialize plane\n");
3018 		kfree(plane);
3019 		return ret;
3020 	}
3021 
3022 	if (mode_info)
3023 		mode_info->planes[plane_id] = plane;
3024 
3025 	return ret;
3026 }
3027 
3028 
3029 static void register_backlight_device(struct amdgpu_display_manager *dm,
3030 				      struct dc_link *link)
3031 {
3032 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3033 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3034 
3035 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3036 	    link->type != dc_connection_none) {
3037 		/*
3038 		 * Event if registration failed, we should continue with
3039 		 * DM initialization because not having a backlight control
3040 		 * is better then a black screen.
3041 		 */
3042 		amdgpu_dm_register_backlight_device(dm);
3043 
3044 		if (dm->backlight_dev)
3045 			dm->backlight_link = link;
3046 	}
3047 #endif
3048 }
3049 
3050 
3051 /*
3052  * In this architecture, the association
3053  * connector -> encoder -> crtc
3054  * id not really requried. The crtc and connector will hold the
3055  * display_index as an abstraction to use with DAL component
3056  *
3057  * Returns 0 on success
3058  */
3059 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3060 {
3061 	struct amdgpu_display_manager *dm = &adev->dm;
3062 	int32_t i;
3063 	struct amdgpu_dm_connector *aconnector = NULL;
3064 	struct amdgpu_encoder *aencoder = NULL;
3065 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
3066 	uint32_t link_cnt;
3067 	int32_t primary_planes;
3068 	enum dc_connection_type new_connection_type = dc_connection_none;
3069 	const struct dc_plane_cap *plane;
3070 
3071 	link_cnt = dm->dc->caps.max_links;
3072 	if (amdgpu_dm_mode_config_init(dm->adev)) {
3073 		DRM_ERROR("DM: Failed to initialize mode config\n");
3074 		return -EINVAL;
3075 	}
3076 
3077 	/* There is one primary plane per CRTC */
3078 	primary_planes = dm->dc->caps.max_streams;
3079 	ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3080 
3081 	/*
3082 	 * Initialize primary planes, implicit planes for legacy IOCTLS.
3083 	 * Order is reversed to match iteration order in atomic check.
3084 	 */
3085 	for (i = (primary_planes - 1); i >= 0; i--) {
3086 		plane = &dm->dc->caps.planes[i];
3087 
3088 		if (initialize_plane(dm, mode_info, i,
3089 				     DRM_PLANE_TYPE_PRIMARY, plane)) {
3090 			DRM_ERROR("KMS: Failed to initialize primary plane\n");
3091 			goto fail;
3092 		}
3093 	}
3094 
3095 	/*
3096 	 * Initialize overlay planes, index starting after primary planes.
3097 	 * These planes have a higher DRM index than the primary planes since
3098 	 * they should be considered as having a higher z-order.
3099 	 * Order is reversed to match iteration order in atomic check.
3100 	 *
3101 	 * Only support DCN for now, and only expose one so we don't encourage
3102 	 * userspace to use up all the pipes.
3103 	 */
3104 	for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3105 		struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3106 
3107 		if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3108 			continue;
3109 
3110 		if (!plane->blends_with_above || !plane->blends_with_below)
3111 			continue;
3112 
3113 		if (!plane->pixel_format_support.argb8888)
3114 			continue;
3115 
3116 		if (initialize_plane(dm, NULL, primary_planes + i,
3117 				     DRM_PLANE_TYPE_OVERLAY, plane)) {
3118 			DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3119 			goto fail;
3120 		}
3121 
3122 		/* Only create one overlay plane. */
3123 		break;
3124 	}
3125 
3126 	for (i = 0; i < dm->dc->caps.max_streams; i++)
3127 		if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3128 			DRM_ERROR("KMS: Failed to initialize crtc\n");
3129 			goto fail;
3130 		}
3131 
3132 	dm->display_indexes_num = dm->dc->caps.max_streams;
3133 
3134 	/* loops over all connectors on the board */
3135 	for (i = 0; i < link_cnt; i++) {
3136 		struct dc_link *link = NULL;
3137 
3138 		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3139 			DRM_ERROR(
3140 				"KMS: Cannot support more than %d display indexes\n",
3141 					AMDGPU_DM_MAX_DISPLAY_INDEX);
3142 			continue;
3143 		}
3144 
3145 		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3146 		if (!aconnector)
3147 			goto fail;
3148 
3149 		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3150 		if (!aencoder)
3151 			goto fail;
3152 
3153 		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3154 			DRM_ERROR("KMS: Failed to initialize encoder\n");
3155 			goto fail;
3156 		}
3157 
3158 		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3159 			DRM_ERROR("KMS: Failed to initialize connector\n");
3160 			goto fail;
3161 		}
3162 
3163 		link = dc_get_link_at_index(dm->dc, i);
3164 
3165 		if (!dc_link_detect_sink(link, &new_connection_type))
3166 			DRM_ERROR("KMS: Failed to detect connector\n");
3167 
3168 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
3169 			emulated_link_detect(link);
3170 			amdgpu_dm_update_connector_after_detect(aconnector);
3171 
3172 		} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3173 			amdgpu_dm_update_connector_after_detect(aconnector);
3174 			register_backlight_device(dm, link);
3175 			if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3176 				amdgpu_dm_set_psr_caps(link);
3177 		}
3178 
3179 
3180 	}
3181 
3182 	/* Software is initialized. Now we can register interrupt handlers. */
3183 	switch (adev->asic_type) {
3184 	case CHIP_BONAIRE:
3185 	case CHIP_HAWAII:
3186 	case CHIP_KAVERI:
3187 	case CHIP_KABINI:
3188 	case CHIP_MULLINS:
3189 	case CHIP_TONGA:
3190 	case CHIP_FIJI:
3191 	case CHIP_CARRIZO:
3192 	case CHIP_STONEY:
3193 	case CHIP_POLARIS11:
3194 	case CHIP_POLARIS10:
3195 	case CHIP_POLARIS12:
3196 	case CHIP_VEGAM:
3197 	case CHIP_VEGA10:
3198 	case CHIP_VEGA12:
3199 	case CHIP_VEGA20:
3200 		if (dce110_register_irq_handlers(dm->adev)) {
3201 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3202 			goto fail;
3203 		}
3204 		break;
3205 #if defined(CONFIG_DRM_AMD_DC_DCN)
3206 	case CHIP_RAVEN:
3207 	case CHIP_NAVI12:
3208 	case CHIP_NAVI10:
3209 	case CHIP_NAVI14:
3210 	case CHIP_RENOIR:
3211 		if (dcn10_register_irq_handlers(dm->adev)) {
3212 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3213 			goto fail;
3214 		}
3215 		break;
3216 #endif
3217 	default:
3218 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3219 		goto fail;
3220 	}
3221 
3222 	/* No userspace support. */
3223 	dm->dc->debug.disable_tri_buf = true;
3224 
3225 	return 0;
3226 fail:
3227 	kfree(aencoder);
3228 	kfree(aconnector);
3229 
3230 	return -EINVAL;
3231 }
3232 
3233 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3234 {
3235 	drm_mode_config_cleanup(dm->ddev);
3236 	drm_atomic_private_obj_fini(&dm->atomic_obj);
3237 	return;
3238 }
3239 
3240 /******************************************************************************
3241  * amdgpu_display_funcs functions
3242  *****************************************************************************/
3243 
3244 /*
3245  * dm_bandwidth_update - program display watermarks
3246  *
3247  * @adev: amdgpu_device pointer
3248  *
3249  * Calculate and program the display watermarks and line buffer allocation.
3250  */
3251 static void dm_bandwidth_update(struct amdgpu_device *adev)
3252 {
3253 	/* TODO: implement later */
3254 }
3255 
3256 static const struct amdgpu_display_funcs dm_display_funcs = {
3257 	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3258 	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3259 	.backlight_set_level = NULL, /* never called for DC */
3260 	.backlight_get_level = NULL, /* never called for DC */
3261 	.hpd_sense = NULL,/* called unconditionally */
3262 	.hpd_set_polarity = NULL, /* called unconditionally */
3263 	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3264 	.page_flip_get_scanoutpos =
3265 		dm_crtc_get_scanoutpos,/* called unconditionally */
3266 	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3267 	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
3268 };
3269 
3270 #if defined(CONFIG_DEBUG_KERNEL_DC)
3271 
3272 static ssize_t s3_debug_store(struct device *device,
3273 			      struct device_attribute *attr,
3274 			      const char *buf,
3275 			      size_t count)
3276 {
3277 	int ret;
3278 	int s3_state;
3279 	struct drm_device *drm_dev = dev_get_drvdata(device);
3280 	struct amdgpu_device *adev = drm_dev->dev_private;
3281 
3282 	ret = kstrtoint(buf, 0, &s3_state);
3283 
3284 	if (ret == 0) {
3285 		if (s3_state) {
3286 			dm_resume(adev);
3287 			drm_kms_helper_hotplug_event(adev->ddev);
3288 		} else
3289 			dm_suspend(adev);
3290 	}
3291 
3292 	return ret == 0 ? count : 0;
3293 }
3294 
3295 DEVICE_ATTR_WO(s3_debug);
3296 
3297 #endif
3298 
3299 static int dm_early_init(void *handle)
3300 {
3301 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3302 
3303 	switch (adev->asic_type) {
3304 	case CHIP_BONAIRE:
3305 	case CHIP_HAWAII:
3306 		adev->mode_info.num_crtc = 6;
3307 		adev->mode_info.num_hpd = 6;
3308 		adev->mode_info.num_dig = 6;
3309 		break;
3310 	case CHIP_KAVERI:
3311 		adev->mode_info.num_crtc = 4;
3312 		adev->mode_info.num_hpd = 6;
3313 		adev->mode_info.num_dig = 7;
3314 		break;
3315 	case CHIP_KABINI:
3316 	case CHIP_MULLINS:
3317 		adev->mode_info.num_crtc = 2;
3318 		adev->mode_info.num_hpd = 6;
3319 		adev->mode_info.num_dig = 6;
3320 		break;
3321 	case CHIP_FIJI:
3322 	case CHIP_TONGA:
3323 		adev->mode_info.num_crtc = 6;
3324 		adev->mode_info.num_hpd = 6;
3325 		adev->mode_info.num_dig = 7;
3326 		break;
3327 	case CHIP_CARRIZO:
3328 		adev->mode_info.num_crtc = 3;
3329 		adev->mode_info.num_hpd = 6;
3330 		adev->mode_info.num_dig = 9;
3331 		break;
3332 	case CHIP_STONEY:
3333 		adev->mode_info.num_crtc = 2;
3334 		adev->mode_info.num_hpd = 6;
3335 		adev->mode_info.num_dig = 9;
3336 		break;
3337 	case CHIP_POLARIS11:
3338 	case CHIP_POLARIS12:
3339 		adev->mode_info.num_crtc = 5;
3340 		adev->mode_info.num_hpd = 5;
3341 		adev->mode_info.num_dig = 5;
3342 		break;
3343 	case CHIP_POLARIS10:
3344 	case CHIP_VEGAM:
3345 		adev->mode_info.num_crtc = 6;
3346 		adev->mode_info.num_hpd = 6;
3347 		adev->mode_info.num_dig = 6;
3348 		break;
3349 	case CHIP_VEGA10:
3350 	case CHIP_VEGA12:
3351 	case CHIP_VEGA20:
3352 		adev->mode_info.num_crtc = 6;
3353 		adev->mode_info.num_hpd = 6;
3354 		adev->mode_info.num_dig = 6;
3355 		break;
3356 #if defined(CONFIG_DRM_AMD_DC_DCN)
3357 	case CHIP_RAVEN:
3358 		adev->mode_info.num_crtc = 4;
3359 		adev->mode_info.num_hpd = 4;
3360 		adev->mode_info.num_dig = 4;
3361 		break;
3362 #endif
3363 	case CHIP_NAVI10:
3364 	case CHIP_NAVI12:
3365 		adev->mode_info.num_crtc = 6;
3366 		adev->mode_info.num_hpd = 6;
3367 		adev->mode_info.num_dig = 6;
3368 		break;
3369 	case CHIP_NAVI14:
3370 		adev->mode_info.num_crtc = 5;
3371 		adev->mode_info.num_hpd = 5;
3372 		adev->mode_info.num_dig = 5;
3373 		break;
3374 	case CHIP_RENOIR:
3375 		adev->mode_info.num_crtc = 4;
3376 		adev->mode_info.num_hpd = 4;
3377 		adev->mode_info.num_dig = 4;
3378 		break;
3379 	default:
3380 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3381 		return -EINVAL;
3382 	}
3383 
3384 	amdgpu_dm_set_irq_funcs(adev);
3385 
3386 	if (adev->mode_info.funcs == NULL)
3387 		adev->mode_info.funcs = &dm_display_funcs;
3388 
3389 	/*
3390 	 * Note: Do NOT change adev->audio_endpt_rreg and
3391 	 * adev->audio_endpt_wreg because they are initialised in
3392 	 * amdgpu_device_init()
3393 	 */
3394 #if defined(CONFIG_DEBUG_KERNEL_DC)
3395 	device_create_file(
3396 		adev->ddev->dev,
3397 		&dev_attr_s3_debug);
3398 #endif
3399 
3400 	return 0;
3401 }
3402 
3403 static bool modeset_required(struct drm_crtc_state *crtc_state,
3404 			     struct dc_stream_state *new_stream,
3405 			     struct dc_stream_state *old_stream)
3406 {
3407 	if (!drm_atomic_crtc_needs_modeset(crtc_state))
3408 		return false;
3409 
3410 	if (!crtc_state->enable)
3411 		return false;
3412 
3413 	return crtc_state->active;
3414 }
3415 
3416 static bool modereset_required(struct drm_crtc_state *crtc_state)
3417 {
3418 	if (!drm_atomic_crtc_needs_modeset(crtc_state))
3419 		return false;
3420 
3421 	return !crtc_state->enable || !crtc_state->active;
3422 }
3423 
3424 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3425 {
3426 	drm_encoder_cleanup(encoder);
3427 	kfree(encoder);
3428 }
3429 
3430 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3431 	.destroy = amdgpu_dm_encoder_destroy,
3432 };
3433 
3434 
3435 static int fill_dc_scaling_info(const struct drm_plane_state *state,
3436 				struct dc_scaling_info *scaling_info)
3437 {
3438 	int scale_w, scale_h;
3439 
3440 	memset(scaling_info, 0, sizeof(*scaling_info));
3441 
3442 	/* Source is fixed 16.16 but we ignore mantissa for now... */
3443 	scaling_info->src_rect.x = state->src_x >> 16;
3444 	scaling_info->src_rect.y = state->src_y >> 16;
3445 
3446 	scaling_info->src_rect.width = state->src_w >> 16;
3447 	if (scaling_info->src_rect.width == 0)
3448 		return -EINVAL;
3449 
3450 	scaling_info->src_rect.height = state->src_h >> 16;
3451 	if (scaling_info->src_rect.height == 0)
3452 		return -EINVAL;
3453 
3454 	scaling_info->dst_rect.x = state->crtc_x;
3455 	scaling_info->dst_rect.y = state->crtc_y;
3456 
3457 	if (state->crtc_w == 0)
3458 		return -EINVAL;
3459 
3460 	scaling_info->dst_rect.width = state->crtc_w;
3461 
3462 	if (state->crtc_h == 0)
3463 		return -EINVAL;
3464 
3465 	scaling_info->dst_rect.height = state->crtc_h;
3466 
3467 	/* DRM doesn't specify clipping on destination output. */
3468 	scaling_info->clip_rect = scaling_info->dst_rect;
3469 
3470 	/* TODO: Validate scaling per-format with DC plane caps */
3471 	scale_w = scaling_info->dst_rect.width * 1000 /
3472 		  scaling_info->src_rect.width;
3473 
3474 	if (scale_w < 250 || scale_w > 16000)
3475 		return -EINVAL;
3476 
3477 	scale_h = scaling_info->dst_rect.height * 1000 /
3478 		  scaling_info->src_rect.height;
3479 
3480 	if (scale_h < 250 || scale_h > 16000)
3481 		return -EINVAL;
3482 
3483 	/*
3484 	 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3485 	 * assume reasonable defaults based on the format.
3486 	 */
3487 
3488 	return 0;
3489 }
3490 
3491 static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
3492 		       uint64_t *tiling_flags, bool *tmz_surface)
3493 {
3494 	struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
3495 	int r = amdgpu_bo_reserve(rbo, false);
3496 
3497 	if (unlikely(r)) {
3498 		/* Don't show error message when returning -ERESTARTSYS */
3499 		if (r != -ERESTARTSYS)
3500 			DRM_ERROR("Unable to reserve buffer: %d\n", r);
3501 		return r;
3502 	}
3503 
3504 	if (tiling_flags)
3505 		amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
3506 
3507 	if (tmz_surface)
3508 		*tmz_surface = amdgpu_bo_encrypted(rbo);
3509 
3510 	amdgpu_bo_unreserve(rbo);
3511 
3512 	return r;
3513 }
3514 
3515 static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags)
3516 {
3517 	uint32_t offset = AMDGPU_TILING_GET(tiling_flags, DCC_OFFSET_256B);
3518 
3519 	return offset ? (address + offset * 256) : 0;
3520 }
3521 
3522 static int
3523 fill_plane_dcc_attributes(struct amdgpu_device *adev,
3524 			  const struct amdgpu_framebuffer *afb,
3525 			  const enum surface_pixel_format format,
3526 			  const enum dc_rotation_angle rotation,
3527 			  const struct plane_size *plane_size,
3528 			  const union dc_tiling_info *tiling_info,
3529 			  const uint64_t info,
3530 			  struct dc_plane_dcc_param *dcc,
3531 			  struct dc_plane_address *address,
3532 			  bool force_disable_dcc)
3533 {
3534 	struct dc *dc = adev->dm.dc;
3535 	struct dc_dcc_surface_param input;
3536 	struct dc_surface_dcc_cap output;
3537 	uint32_t offset = AMDGPU_TILING_GET(info, DCC_OFFSET_256B);
3538 	uint32_t i64b = AMDGPU_TILING_GET(info, DCC_INDEPENDENT_64B) != 0;
3539 	uint64_t dcc_address;
3540 
3541 	memset(&input, 0, sizeof(input));
3542 	memset(&output, 0, sizeof(output));
3543 
3544 	if (force_disable_dcc)
3545 		return 0;
3546 
3547 	if (!offset)
3548 		return 0;
3549 
3550 	if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3551 		return 0;
3552 
3553 	if (!dc->cap_funcs.get_dcc_compression_cap)
3554 		return -EINVAL;
3555 
3556 	input.format = format;
3557 	input.surface_size.width = plane_size->surface_size.width;
3558 	input.surface_size.height = plane_size->surface_size.height;
3559 	input.swizzle_mode = tiling_info->gfx9.swizzle;
3560 
3561 	if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
3562 		input.scan = SCAN_DIRECTION_HORIZONTAL;
3563 	else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
3564 		input.scan = SCAN_DIRECTION_VERTICAL;
3565 
3566 	if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
3567 		return -EINVAL;
3568 
3569 	if (!output.capable)
3570 		return -EINVAL;
3571 
3572 	if (i64b == 0 && output.grph.rgb.independent_64b_blks != 0)
3573 		return -EINVAL;
3574 
3575 	dcc->enable = 1;
3576 	dcc->meta_pitch =
3577 		AMDGPU_TILING_GET(info, DCC_PITCH_MAX) + 1;
3578 	dcc->independent_64b_blks = i64b;
3579 
3580 	dcc_address = get_dcc_address(afb->address, info);
3581 	address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
3582 	address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
3583 
3584 	return 0;
3585 }
3586 
3587 static int
3588 fill_plane_buffer_attributes(struct amdgpu_device *adev,
3589 			     const struct amdgpu_framebuffer *afb,
3590 			     const enum surface_pixel_format format,
3591 			     const enum dc_rotation_angle rotation,
3592 			     const uint64_t tiling_flags,
3593 			     union dc_tiling_info *tiling_info,
3594 			     struct plane_size *plane_size,
3595 			     struct dc_plane_dcc_param *dcc,
3596 			     struct dc_plane_address *address,
3597 			     bool tmz_surface,
3598 			     bool force_disable_dcc)
3599 {
3600 	const struct drm_framebuffer *fb = &afb->base;
3601 	int ret;
3602 
3603 	memset(tiling_info, 0, sizeof(*tiling_info));
3604 	memset(plane_size, 0, sizeof(*plane_size));
3605 	memset(dcc, 0, sizeof(*dcc));
3606 	memset(address, 0, sizeof(*address));
3607 
3608 	address->tmz_surface = tmz_surface;
3609 
3610 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
3611 		plane_size->surface_size.x = 0;
3612 		plane_size->surface_size.y = 0;
3613 		plane_size->surface_size.width = fb->width;
3614 		plane_size->surface_size.height = fb->height;
3615 		plane_size->surface_pitch =
3616 			fb->pitches[0] / fb->format->cpp[0];
3617 
3618 		address->type = PLN_ADDR_TYPE_GRAPHICS;
3619 		address->grph.addr.low_part = lower_32_bits(afb->address);
3620 		address->grph.addr.high_part = upper_32_bits(afb->address);
3621 	} else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
3622 		uint64_t chroma_addr = afb->address + fb->offsets[1];
3623 
3624 		plane_size->surface_size.x = 0;
3625 		plane_size->surface_size.y = 0;
3626 		plane_size->surface_size.width = fb->width;
3627 		plane_size->surface_size.height = fb->height;
3628 		plane_size->surface_pitch =
3629 			fb->pitches[0] / fb->format->cpp[0];
3630 
3631 		plane_size->chroma_size.x = 0;
3632 		plane_size->chroma_size.y = 0;
3633 		/* TODO: set these based on surface format */
3634 		plane_size->chroma_size.width = fb->width / 2;
3635 		plane_size->chroma_size.height = fb->height / 2;
3636 
3637 		plane_size->chroma_pitch =
3638 			fb->pitches[1] / fb->format->cpp[1];
3639 
3640 		address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
3641 		address->video_progressive.luma_addr.low_part =
3642 			lower_32_bits(afb->address);
3643 		address->video_progressive.luma_addr.high_part =
3644 			upper_32_bits(afb->address);
3645 		address->video_progressive.chroma_addr.low_part =
3646 			lower_32_bits(chroma_addr);
3647 		address->video_progressive.chroma_addr.high_part =
3648 			upper_32_bits(chroma_addr);
3649 	}
3650 
3651 	/* Fill GFX8 params */
3652 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3653 		unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3654 
3655 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3656 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3657 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3658 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3659 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3660 
3661 		/* XXX fix me for VI */
3662 		tiling_info->gfx8.num_banks = num_banks;
3663 		tiling_info->gfx8.array_mode =
3664 				DC_ARRAY_2D_TILED_THIN1;
3665 		tiling_info->gfx8.tile_split = tile_split;
3666 		tiling_info->gfx8.bank_width = bankw;
3667 		tiling_info->gfx8.bank_height = bankh;
3668 		tiling_info->gfx8.tile_aspect = mtaspect;
3669 		tiling_info->gfx8.tile_mode =
3670 				DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3671 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3672 			== DC_ARRAY_1D_TILED_THIN1) {
3673 		tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3674 	}
3675 
3676 	tiling_info->gfx8.pipe_config =
3677 			AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
3678 
3679 	if (adev->asic_type == CHIP_VEGA10 ||
3680 	    adev->asic_type == CHIP_VEGA12 ||
3681 	    adev->asic_type == CHIP_VEGA20 ||
3682 	    adev->asic_type == CHIP_NAVI10 ||
3683 	    adev->asic_type == CHIP_NAVI14 ||
3684 	    adev->asic_type == CHIP_NAVI12 ||
3685 	    adev->asic_type == CHIP_RENOIR ||
3686 	    adev->asic_type == CHIP_RAVEN) {
3687 		/* Fill GFX9 params */
3688 		tiling_info->gfx9.num_pipes =
3689 			adev->gfx.config.gb_addr_config_fields.num_pipes;
3690 		tiling_info->gfx9.num_banks =
3691 			adev->gfx.config.gb_addr_config_fields.num_banks;
3692 		tiling_info->gfx9.pipe_interleave =
3693 			adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3694 		tiling_info->gfx9.num_shader_engines =
3695 			adev->gfx.config.gb_addr_config_fields.num_se;
3696 		tiling_info->gfx9.max_compressed_frags =
3697 			adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3698 		tiling_info->gfx9.num_rb_per_se =
3699 			adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3700 		tiling_info->gfx9.swizzle =
3701 			AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
3702 		tiling_info->gfx9.shaderEnable = 1;
3703 
3704 		ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
3705 						plane_size, tiling_info,
3706 						tiling_flags, dcc, address,
3707 						force_disable_dcc);
3708 		if (ret)
3709 			return ret;
3710 	}
3711 
3712 	return 0;
3713 }
3714 
3715 static void
3716 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
3717 			       bool *per_pixel_alpha, bool *global_alpha,
3718 			       int *global_alpha_value)
3719 {
3720 	*per_pixel_alpha = false;
3721 	*global_alpha = false;
3722 	*global_alpha_value = 0xff;
3723 
3724 	if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
3725 		return;
3726 
3727 	if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
3728 		static const uint32_t alpha_formats[] = {
3729 			DRM_FORMAT_ARGB8888,
3730 			DRM_FORMAT_RGBA8888,
3731 			DRM_FORMAT_ABGR8888,
3732 		};
3733 		uint32_t format = plane_state->fb->format->format;
3734 		unsigned int i;
3735 
3736 		for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
3737 			if (format == alpha_formats[i]) {
3738 				*per_pixel_alpha = true;
3739 				break;
3740 			}
3741 		}
3742 	}
3743 
3744 	if (plane_state->alpha < 0xffff) {
3745 		*global_alpha = true;
3746 		*global_alpha_value = plane_state->alpha >> 8;
3747 	}
3748 }
3749 
3750 static int
3751 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
3752 			    const enum surface_pixel_format format,
3753 			    enum dc_color_space *color_space)
3754 {
3755 	bool full_range;
3756 
3757 	*color_space = COLOR_SPACE_SRGB;
3758 
3759 	/* DRM color properties only affect non-RGB formats. */
3760 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3761 		return 0;
3762 
3763 	full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
3764 
3765 	switch (plane_state->color_encoding) {
3766 	case DRM_COLOR_YCBCR_BT601:
3767 		if (full_range)
3768 			*color_space = COLOR_SPACE_YCBCR601;
3769 		else
3770 			*color_space = COLOR_SPACE_YCBCR601_LIMITED;
3771 		break;
3772 
3773 	case DRM_COLOR_YCBCR_BT709:
3774 		if (full_range)
3775 			*color_space = COLOR_SPACE_YCBCR709;
3776 		else
3777 			*color_space = COLOR_SPACE_YCBCR709_LIMITED;
3778 		break;
3779 
3780 	case DRM_COLOR_YCBCR_BT2020:
3781 		if (full_range)
3782 			*color_space = COLOR_SPACE_2020_YCBCR;
3783 		else
3784 			return -EINVAL;
3785 		break;
3786 
3787 	default:
3788 		return -EINVAL;
3789 	}
3790 
3791 	return 0;
3792 }
3793 
3794 static int
3795 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
3796 			    const struct drm_plane_state *plane_state,
3797 			    const uint64_t tiling_flags,
3798 			    struct dc_plane_info *plane_info,
3799 			    struct dc_plane_address *address,
3800 			    bool tmz_surface,
3801 			    bool force_disable_dcc)
3802 {
3803 	const struct drm_framebuffer *fb = plane_state->fb;
3804 	const struct amdgpu_framebuffer *afb =
3805 		to_amdgpu_framebuffer(plane_state->fb);
3806 	struct drm_format_name_buf format_name;
3807 	int ret;
3808 
3809 	memset(plane_info, 0, sizeof(*plane_info));
3810 
3811 	switch (fb->format->format) {
3812 	case DRM_FORMAT_C8:
3813 		plane_info->format =
3814 			SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
3815 		break;
3816 	case DRM_FORMAT_RGB565:
3817 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
3818 		break;
3819 	case DRM_FORMAT_XRGB8888:
3820 	case DRM_FORMAT_ARGB8888:
3821 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
3822 		break;
3823 	case DRM_FORMAT_XRGB2101010:
3824 	case DRM_FORMAT_ARGB2101010:
3825 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
3826 		break;
3827 	case DRM_FORMAT_XBGR2101010:
3828 	case DRM_FORMAT_ABGR2101010:
3829 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
3830 		break;
3831 	case DRM_FORMAT_XBGR8888:
3832 	case DRM_FORMAT_ABGR8888:
3833 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
3834 		break;
3835 	case DRM_FORMAT_NV21:
3836 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
3837 		break;
3838 	case DRM_FORMAT_NV12:
3839 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
3840 		break;
3841 	case DRM_FORMAT_P010:
3842 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
3843 		break;
3844 	case DRM_FORMAT_XRGB16161616F:
3845 	case DRM_FORMAT_ARGB16161616F:
3846 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
3847 		break;
3848 	case DRM_FORMAT_XBGR16161616F:
3849 	case DRM_FORMAT_ABGR16161616F:
3850 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
3851 		break;
3852 	default:
3853 		DRM_ERROR(
3854 			"Unsupported screen format %s\n",
3855 			drm_get_format_name(fb->format->format, &format_name));
3856 		return -EINVAL;
3857 	}
3858 
3859 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
3860 	case DRM_MODE_ROTATE_0:
3861 		plane_info->rotation = ROTATION_ANGLE_0;
3862 		break;
3863 	case DRM_MODE_ROTATE_90:
3864 		plane_info->rotation = ROTATION_ANGLE_90;
3865 		break;
3866 	case DRM_MODE_ROTATE_180:
3867 		plane_info->rotation = ROTATION_ANGLE_180;
3868 		break;
3869 	case DRM_MODE_ROTATE_270:
3870 		plane_info->rotation = ROTATION_ANGLE_270;
3871 		break;
3872 	default:
3873 		plane_info->rotation = ROTATION_ANGLE_0;
3874 		break;
3875 	}
3876 
3877 	plane_info->visible = true;
3878 	plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
3879 
3880 	plane_info->layer_index = 0;
3881 
3882 	ret = fill_plane_color_attributes(plane_state, plane_info->format,
3883 					  &plane_info->color_space);
3884 	if (ret)
3885 		return ret;
3886 
3887 	ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
3888 					   plane_info->rotation, tiling_flags,
3889 					   &plane_info->tiling_info,
3890 					   &plane_info->plane_size,
3891 					   &plane_info->dcc, address, tmz_surface,
3892 					   force_disable_dcc);
3893 	if (ret)
3894 		return ret;
3895 
3896 	fill_blending_from_plane_state(
3897 		plane_state, &plane_info->per_pixel_alpha,
3898 		&plane_info->global_alpha, &plane_info->global_alpha_value);
3899 
3900 	return 0;
3901 }
3902 
3903 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
3904 				    struct dc_plane_state *dc_plane_state,
3905 				    struct drm_plane_state *plane_state,
3906 				    struct drm_crtc_state *crtc_state)
3907 {
3908 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
3909 	const struct amdgpu_framebuffer *amdgpu_fb =
3910 		to_amdgpu_framebuffer(plane_state->fb);
3911 	struct dc_scaling_info scaling_info;
3912 	struct dc_plane_info plane_info;
3913 	uint64_t tiling_flags;
3914 	int ret;
3915 	bool tmz_surface = false;
3916 	bool force_disable_dcc = false;
3917 
3918 	ret = fill_dc_scaling_info(plane_state, &scaling_info);
3919 	if (ret)
3920 		return ret;
3921 
3922 	dc_plane_state->src_rect = scaling_info.src_rect;
3923 	dc_plane_state->dst_rect = scaling_info.dst_rect;
3924 	dc_plane_state->clip_rect = scaling_info.clip_rect;
3925 	dc_plane_state->scaling_quality = scaling_info.scaling_quality;
3926 
3927 	ret = get_fb_info(amdgpu_fb, &tiling_flags, &tmz_surface);
3928 	if (ret)
3929 		return ret;
3930 
3931 	force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
3932 	ret = fill_dc_plane_info_and_addr(adev, plane_state, tiling_flags,
3933 					  &plane_info,
3934 					  &dc_plane_state->address,
3935 					  tmz_surface,
3936 					  force_disable_dcc);
3937 	if (ret)
3938 		return ret;
3939 
3940 	dc_plane_state->format = plane_info.format;
3941 	dc_plane_state->color_space = plane_info.color_space;
3942 	dc_plane_state->format = plane_info.format;
3943 	dc_plane_state->plane_size = plane_info.plane_size;
3944 	dc_plane_state->rotation = plane_info.rotation;
3945 	dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
3946 	dc_plane_state->stereo_format = plane_info.stereo_format;
3947 	dc_plane_state->tiling_info = plane_info.tiling_info;
3948 	dc_plane_state->visible = plane_info.visible;
3949 	dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
3950 	dc_plane_state->global_alpha = plane_info.global_alpha;
3951 	dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
3952 	dc_plane_state->dcc = plane_info.dcc;
3953 	dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
3954 
3955 	/*
3956 	 * Always set input transfer function, since plane state is refreshed
3957 	 * every time.
3958 	 */
3959 	ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
3960 	if (ret)
3961 		return ret;
3962 
3963 	return 0;
3964 }
3965 
3966 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
3967 					   const struct dm_connector_state *dm_state,
3968 					   struct dc_stream_state *stream)
3969 {
3970 	enum amdgpu_rmx_type rmx_type;
3971 
3972 	struct rect src = { 0 }; /* viewport in composition space*/
3973 	struct rect dst = { 0 }; /* stream addressable area */
3974 
3975 	/* no mode. nothing to be done */
3976 	if (!mode)
3977 		return;
3978 
3979 	/* Full screen scaling by default */
3980 	src.width = mode->hdisplay;
3981 	src.height = mode->vdisplay;
3982 	dst.width = stream->timing.h_addressable;
3983 	dst.height = stream->timing.v_addressable;
3984 
3985 	if (dm_state) {
3986 		rmx_type = dm_state->scaling;
3987 		if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
3988 			if (src.width * dst.height <
3989 					src.height * dst.width) {
3990 				/* height needs less upscaling/more downscaling */
3991 				dst.width = src.width *
3992 						dst.height / src.height;
3993 			} else {
3994 				/* width needs less upscaling/more downscaling */
3995 				dst.height = src.height *
3996 						dst.width / src.width;
3997 			}
3998 		} else if (rmx_type == RMX_CENTER) {
3999 			dst = src;
4000 		}
4001 
4002 		dst.x = (stream->timing.h_addressable - dst.width) / 2;
4003 		dst.y = (stream->timing.v_addressable - dst.height) / 2;
4004 
4005 		if (dm_state->underscan_enable) {
4006 			dst.x += dm_state->underscan_hborder / 2;
4007 			dst.y += dm_state->underscan_vborder / 2;
4008 			dst.width -= dm_state->underscan_hborder;
4009 			dst.height -= dm_state->underscan_vborder;
4010 		}
4011 	}
4012 
4013 	stream->src = src;
4014 	stream->dst = dst;
4015 
4016 	DRM_DEBUG_DRIVER("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
4017 			dst.x, dst.y, dst.width, dst.height);
4018 
4019 }
4020 
4021 static enum dc_color_depth
4022 convert_color_depth_from_display_info(const struct drm_connector *connector,
4023 				      bool is_y420, int requested_bpc)
4024 {
4025 	uint8_t bpc;
4026 
4027 	if (is_y420) {
4028 		bpc = 8;
4029 
4030 		/* Cap display bpc based on HDMI 2.0 HF-VSDB */
4031 		if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
4032 			bpc = 16;
4033 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
4034 			bpc = 12;
4035 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
4036 			bpc = 10;
4037 	} else {
4038 		bpc = (uint8_t)connector->display_info.bpc;
4039 		/* Assume 8 bpc by default if no bpc is specified. */
4040 		bpc = bpc ? bpc : 8;
4041 	}
4042 
4043 	if (requested_bpc > 0) {
4044 		/*
4045 		 * Cap display bpc based on the user requested value.
4046 		 *
4047 		 * The value for state->max_bpc may not correctly updated
4048 		 * depending on when the connector gets added to the state
4049 		 * or if this was called outside of atomic check, so it
4050 		 * can't be used directly.
4051 		 */
4052 		bpc = min_t(u8, bpc, requested_bpc);
4053 
4054 		/* Round down to the nearest even number. */
4055 		bpc = bpc - (bpc & 1);
4056 	}
4057 
4058 	switch (bpc) {
4059 	case 0:
4060 		/*
4061 		 * Temporary Work around, DRM doesn't parse color depth for
4062 		 * EDID revision before 1.4
4063 		 * TODO: Fix edid parsing
4064 		 */
4065 		return COLOR_DEPTH_888;
4066 	case 6:
4067 		return COLOR_DEPTH_666;
4068 	case 8:
4069 		return COLOR_DEPTH_888;
4070 	case 10:
4071 		return COLOR_DEPTH_101010;
4072 	case 12:
4073 		return COLOR_DEPTH_121212;
4074 	case 14:
4075 		return COLOR_DEPTH_141414;
4076 	case 16:
4077 		return COLOR_DEPTH_161616;
4078 	default:
4079 		return COLOR_DEPTH_UNDEFINED;
4080 	}
4081 }
4082 
4083 static enum dc_aspect_ratio
4084 get_aspect_ratio(const struct drm_display_mode *mode_in)
4085 {
4086 	/* 1-1 mapping, since both enums follow the HDMI spec. */
4087 	return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
4088 }
4089 
4090 static enum dc_color_space
4091 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
4092 {
4093 	enum dc_color_space color_space = COLOR_SPACE_SRGB;
4094 
4095 	switch (dc_crtc_timing->pixel_encoding)	{
4096 	case PIXEL_ENCODING_YCBCR422:
4097 	case PIXEL_ENCODING_YCBCR444:
4098 	case PIXEL_ENCODING_YCBCR420:
4099 	{
4100 		/*
4101 		 * 27030khz is the separation point between HDTV and SDTV
4102 		 * according to HDMI spec, we use YCbCr709 and YCbCr601
4103 		 * respectively
4104 		 */
4105 		if (dc_crtc_timing->pix_clk_100hz > 270300) {
4106 			if (dc_crtc_timing->flags.Y_ONLY)
4107 				color_space =
4108 					COLOR_SPACE_YCBCR709_LIMITED;
4109 			else
4110 				color_space = COLOR_SPACE_YCBCR709;
4111 		} else {
4112 			if (dc_crtc_timing->flags.Y_ONLY)
4113 				color_space =
4114 					COLOR_SPACE_YCBCR601_LIMITED;
4115 			else
4116 				color_space = COLOR_SPACE_YCBCR601;
4117 		}
4118 
4119 	}
4120 	break;
4121 	case PIXEL_ENCODING_RGB:
4122 		color_space = COLOR_SPACE_SRGB;
4123 		break;
4124 
4125 	default:
4126 		WARN_ON(1);
4127 		break;
4128 	}
4129 
4130 	return color_space;
4131 }
4132 
4133 static bool adjust_colour_depth_from_display_info(
4134 	struct dc_crtc_timing *timing_out,
4135 	const struct drm_display_info *info)
4136 {
4137 	enum dc_color_depth depth = timing_out->display_color_depth;
4138 	int normalized_clk;
4139 	do {
4140 		normalized_clk = timing_out->pix_clk_100hz / 10;
4141 		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
4142 		if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
4143 			normalized_clk /= 2;
4144 		/* Adjusting pix clock following on HDMI spec based on colour depth */
4145 		switch (depth) {
4146 		case COLOR_DEPTH_888:
4147 			break;
4148 		case COLOR_DEPTH_101010:
4149 			normalized_clk = (normalized_clk * 30) / 24;
4150 			break;
4151 		case COLOR_DEPTH_121212:
4152 			normalized_clk = (normalized_clk * 36) / 24;
4153 			break;
4154 		case COLOR_DEPTH_161616:
4155 			normalized_clk = (normalized_clk * 48) / 24;
4156 			break;
4157 		default:
4158 			/* The above depths are the only ones valid for HDMI. */
4159 			return false;
4160 		}
4161 		if (normalized_clk <= info->max_tmds_clock) {
4162 			timing_out->display_color_depth = depth;
4163 			return true;
4164 		}
4165 	} while (--depth > COLOR_DEPTH_666);
4166 	return false;
4167 }
4168 
4169 static void fill_stream_properties_from_drm_display_mode(
4170 	struct dc_stream_state *stream,
4171 	const struct drm_display_mode *mode_in,
4172 	const struct drm_connector *connector,
4173 	const struct drm_connector_state *connector_state,
4174 	const struct dc_stream_state *old_stream,
4175 	int requested_bpc)
4176 {
4177 	struct dc_crtc_timing *timing_out = &stream->timing;
4178 	const struct drm_display_info *info = &connector->display_info;
4179 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4180 	struct hdmi_vendor_infoframe hv_frame;
4181 	struct hdmi_avi_infoframe avi_frame;
4182 
4183 	memset(&hv_frame, 0, sizeof(hv_frame));
4184 	memset(&avi_frame, 0, sizeof(avi_frame));
4185 
4186 	timing_out->h_border_left = 0;
4187 	timing_out->h_border_right = 0;
4188 	timing_out->v_border_top = 0;
4189 	timing_out->v_border_bottom = 0;
4190 	/* TODO: un-hardcode */
4191 	if (drm_mode_is_420_only(info, mode_in)
4192 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4193 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4194 	else if (drm_mode_is_420_also(info, mode_in)
4195 			&& aconnector->force_yuv420_output)
4196 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4197 	else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
4198 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4199 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
4200 	else
4201 		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
4202 
4203 	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
4204 	timing_out->display_color_depth = convert_color_depth_from_display_info(
4205 		connector,
4206 		(timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
4207 		requested_bpc);
4208 	timing_out->scan_type = SCANNING_TYPE_NODATA;
4209 	timing_out->hdmi_vic = 0;
4210 
4211 	if(old_stream) {
4212 		timing_out->vic = old_stream->timing.vic;
4213 		timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4214 		timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4215 	} else {
4216 		timing_out->vic = drm_match_cea_mode(mode_in);
4217 		if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4218 			timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4219 		if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4220 			timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4221 	}
4222 
4223 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4224 		drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4225 		timing_out->vic = avi_frame.video_code;
4226 		drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4227 		timing_out->hdmi_vic = hv_frame.vic;
4228 	}
4229 
4230 	timing_out->h_addressable = mode_in->crtc_hdisplay;
4231 	timing_out->h_total = mode_in->crtc_htotal;
4232 	timing_out->h_sync_width =
4233 		mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
4234 	timing_out->h_front_porch =
4235 		mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
4236 	timing_out->v_total = mode_in->crtc_vtotal;
4237 	timing_out->v_addressable = mode_in->crtc_vdisplay;
4238 	timing_out->v_front_porch =
4239 		mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
4240 	timing_out->v_sync_width =
4241 		mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
4242 	timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
4243 	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
4244 
4245 	stream->output_color_space = get_output_color_space(timing_out);
4246 
4247 	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
4248 	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
4249 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4250 		if (!adjust_colour_depth_from_display_info(timing_out, info) &&
4251 		    drm_mode_is_420_also(info, mode_in) &&
4252 		    timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
4253 			timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4254 			adjust_colour_depth_from_display_info(timing_out, info);
4255 		}
4256 	}
4257 }
4258 
4259 static void fill_audio_info(struct audio_info *audio_info,
4260 			    const struct drm_connector *drm_connector,
4261 			    const struct dc_sink *dc_sink)
4262 {
4263 	int i = 0;
4264 	int cea_revision = 0;
4265 	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
4266 
4267 	audio_info->manufacture_id = edid_caps->manufacturer_id;
4268 	audio_info->product_id = edid_caps->product_id;
4269 
4270 	cea_revision = drm_connector->display_info.cea_rev;
4271 
4272 	strscpy(audio_info->display_name,
4273 		edid_caps->display_name,
4274 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
4275 
4276 	if (cea_revision >= 3) {
4277 		audio_info->mode_count = edid_caps->audio_mode_count;
4278 
4279 		for (i = 0; i < audio_info->mode_count; ++i) {
4280 			audio_info->modes[i].format_code =
4281 					(enum audio_format_code)
4282 					(edid_caps->audio_modes[i].format_code);
4283 			audio_info->modes[i].channel_count =
4284 					edid_caps->audio_modes[i].channel_count;
4285 			audio_info->modes[i].sample_rates.all =
4286 					edid_caps->audio_modes[i].sample_rate;
4287 			audio_info->modes[i].sample_size =
4288 					edid_caps->audio_modes[i].sample_size;
4289 		}
4290 	}
4291 
4292 	audio_info->flags.all = edid_caps->speaker_flags;
4293 
4294 	/* TODO: We only check for the progressive mode, check for interlace mode too */
4295 	if (drm_connector->latency_present[0]) {
4296 		audio_info->video_latency = drm_connector->video_latency[0];
4297 		audio_info->audio_latency = drm_connector->audio_latency[0];
4298 	}
4299 
4300 	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
4301 
4302 }
4303 
4304 static void
4305 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
4306 				      struct drm_display_mode *dst_mode)
4307 {
4308 	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
4309 	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
4310 	dst_mode->crtc_clock = src_mode->crtc_clock;
4311 	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
4312 	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
4313 	dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
4314 	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
4315 	dst_mode->crtc_htotal = src_mode->crtc_htotal;
4316 	dst_mode->crtc_hskew = src_mode->crtc_hskew;
4317 	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
4318 	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
4319 	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
4320 	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
4321 	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
4322 }
4323 
4324 static void
4325 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
4326 					const struct drm_display_mode *native_mode,
4327 					bool scale_enabled)
4328 {
4329 	if (scale_enabled) {
4330 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4331 	} else if (native_mode->clock == drm_mode->clock &&
4332 			native_mode->htotal == drm_mode->htotal &&
4333 			native_mode->vtotal == drm_mode->vtotal) {
4334 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4335 	} else {
4336 		/* no scaling nor amdgpu inserted, no need to patch */
4337 	}
4338 }
4339 
4340 static struct dc_sink *
4341 create_fake_sink(struct amdgpu_dm_connector *aconnector)
4342 {
4343 	struct dc_sink_init_data sink_init_data = { 0 };
4344 	struct dc_sink *sink = NULL;
4345 	sink_init_data.link = aconnector->dc_link;
4346 	sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
4347 
4348 	sink = dc_sink_create(&sink_init_data);
4349 	if (!sink) {
4350 		DRM_ERROR("Failed to create sink!\n");
4351 		return NULL;
4352 	}
4353 	sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
4354 
4355 	return sink;
4356 }
4357 
4358 static void set_multisync_trigger_params(
4359 		struct dc_stream_state *stream)
4360 {
4361 	if (stream->triggered_crtc_reset.enabled) {
4362 		stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
4363 		stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
4364 	}
4365 }
4366 
4367 static void set_master_stream(struct dc_stream_state *stream_set[],
4368 			      int stream_count)
4369 {
4370 	int j, highest_rfr = 0, master_stream = 0;
4371 
4372 	for (j = 0;  j < stream_count; j++) {
4373 		if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
4374 			int refresh_rate = 0;
4375 
4376 			refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
4377 				(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
4378 			if (refresh_rate > highest_rfr) {
4379 				highest_rfr = refresh_rate;
4380 				master_stream = j;
4381 			}
4382 		}
4383 	}
4384 	for (j = 0;  j < stream_count; j++) {
4385 		if (stream_set[j])
4386 			stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
4387 	}
4388 }
4389 
4390 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
4391 {
4392 	int i = 0;
4393 
4394 	if (context->stream_count < 2)
4395 		return;
4396 	for (i = 0; i < context->stream_count ; i++) {
4397 		if (!context->streams[i])
4398 			continue;
4399 		/*
4400 		 * TODO: add a function to read AMD VSDB bits and set
4401 		 * crtc_sync_master.multi_sync_enabled flag
4402 		 * For now it's set to false
4403 		 */
4404 		set_multisync_trigger_params(context->streams[i]);
4405 	}
4406 	set_master_stream(context->streams, context->stream_count);
4407 }
4408 
4409 static struct dc_stream_state *
4410 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
4411 		       const struct drm_display_mode *drm_mode,
4412 		       const struct dm_connector_state *dm_state,
4413 		       const struct dc_stream_state *old_stream,
4414 		       int requested_bpc)
4415 {
4416 	struct drm_display_mode *preferred_mode = NULL;
4417 	struct drm_connector *drm_connector;
4418 	const struct drm_connector_state *con_state =
4419 		dm_state ? &dm_state->base : NULL;
4420 	struct dc_stream_state *stream = NULL;
4421 	struct drm_display_mode mode = *drm_mode;
4422 	bool native_mode_found = false;
4423 	bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
4424 	int mode_refresh;
4425 	int preferred_refresh = 0;
4426 #if defined(CONFIG_DRM_AMD_DC_DCN)
4427 	struct dsc_dec_dpcd_caps dsc_caps;
4428 #endif
4429 	uint32_t link_bandwidth_kbps;
4430 
4431 	struct dc_sink *sink = NULL;
4432 	if (aconnector == NULL) {
4433 		DRM_ERROR("aconnector is NULL!\n");
4434 		return stream;
4435 	}
4436 
4437 	drm_connector = &aconnector->base;
4438 
4439 	if (!aconnector->dc_sink) {
4440 		sink = create_fake_sink(aconnector);
4441 		if (!sink)
4442 			return stream;
4443 	} else {
4444 		sink = aconnector->dc_sink;
4445 		dc_sink_retain(sink);
4446 	}
4447 
4448 	stream = dc_create_stream_for_sink(sink);
4449 
4450 	if (stream == NULL) {
4451 		DRM_ERROR("Failed to create stream for sink!\n");
4452 		goto finish;
4453 	}
4454 
4455 	stream->dm_stream_context = aconnector;
4456 
4457 	stream->timing.flags.LTE_340MCSC_SCRAMBLE =
4458 		drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
4459 
4460 	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
4461 		/* Search for preferred mode */
4462 		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
4463 			native_mode_found = true;
4464 			break;
4465 		}
4466 	}
4467 	if (!native_mode_found)
4468 		preferred_mode = list_first_entry_or_null(
4469 				&aconnector->base.modes,
4470 				struct drm_display_mode,
4471 				head);
4472 
4473 	mode_refresh = drm_mode_vrefresh(&mode);
4474 
4475 	if (preferred_mode == NULL) {
4476 		/*
4477 		 * This may not be an error, the use case is when we have no
4478 		 * usermode calls to reset and set mode upon hotplug. In this
4479 		 * case, we call set mode ourselves to restore the previous mode
4480 		 * and the modelist may not be filled in in time.
4481 		 */
4482 		DRM_DEBUG_DRIVER("No preferred mode found\n");
4483 	} else {
4484 		decide_crtc_timing_for_drm_display_mode(
4485 				&mode, preferred_mode,
4486 				dm_state ? (dm_state->scaling != RMX_OFF) : false);
4487 		preferred_refresh = drm_mode_vrefresh(preferred_mode);
4488 	}
4489 
4490 	if (!dm_state)
4491 		drm_mode_set_crtcinfo(&mode, 0);
4492 
4493 	/*
4494 	* If scaling is enabled and refresh rate didn't change
4495 	* we copy the vic and polarities of the old timings
4496 	*/
4497 	if (!scale || mode_refresh != preferred_refresh)
4498 		fill_stream_properties_from_drm_display_mode(stream,
4499 			&mode, &aconnector->base, con_state, NULL, requested_bpc);
4500 	else
4501 		fill_stream_properties_from_drm_display_mode(stream,
4502 			&mode, &aconnector->base, con_state, old_stream, requested_bpc);
4503 
4504 	stream->timing.flags.DSC = 0;
4505 
4506 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
4507 #if defined(CONFIG_DRM_AMD_DC_DCN)
4508 		dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
4509 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
4510 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_ext_caps.raw,
4511 				      &dsc_caps);
4512 #endif
4513 		link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
4514 							     dc_link_get_link_cap(aconnector->dc_link));
4515 
4516 #if defined(CONFIG_DRM_AMD_DC_DCN)
4517 		if (dsc_caps.is_dsc_supported)
4518 			if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
4519 						  &dsc_caps,
4520 						  aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
4521 						  link_bandwidth_kbps,
4522 						  &stream->timing,
4523 						  &stream->timing.dsc_cfg))
4524 				stream->timing.flags.DSC = 1;
4525 #endif
4526 	}
4527 
4528 	update_stream_scaling_settings(&mode, dm_state, stream);
4529 
4530 	fill_audio_info(
4531 		&stream->audio_info,
4532 		drm_connector,
4533 		sink);
4534 
4535 	update_stream_signal(stream, sink);
4536 
4537 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4538 		mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket, false, false);
4539 	if (stream->link->psr_settings.psr_feature_enabled)	{
4540 		struct dc  *core_dc = stream->link->ctx->dc;
4541 
4542 		if (dc_is_dmcu_initialized(core_dc)) {
4543 			//
4544 			// should decide stream support vsc sdp colorimetry capability
4545 			// before building vsc info packet
4546 			//
4547 			stream->use_vsc_sdp_for_colorimetry = false;
4548 			if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
4549 				stream->use_vsc_sdp_for_colorimetry =
4550 					aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
4551 			} else {
4552 				if (stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 &&
4553 					stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) {
4554 					stream->use_vsc_sdp_for_colorimetry = true;
4555 				}
4556 			}
4557 			mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
4558 		}
4559 	}
4560 finish:
4561 	dc_sink_release(sink);
4562 
4563 	return stream;
4564 }
4565 
4566 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
4567 {
4568 	drm_crtc_cleanup(crtc);
4569 	kfree(crtc);
4570 }
4571 
4572 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
4573 				  struct drm_crtc_state *state)
4574 {
4575 	struct dm_crtc_state *cur = to_dm_crtc_state(state);
4576 
4577 	/* TODO Destroy dc_stream objects are stream object is flattened */
4578 	if (cur->stream)
4579 		dc_stream_release(cur->stream);
4580 
4581 
4582 	__drm_atomic_helper_crtc_destroy_state(state);
4583 
4584 
4585 	kfree(state);
4586 }
4587 
4588 static void dm_crtc_reset_state(struct drm_crtc *crtc)
4589 {
4590 	struct dm_crtc_state *state;
4591 
4592 	if (crtc->state)
4593 		dm_crtc_destroy_state(crtc, crtc->state);
4594 
4595 	state = kzalloc(sizeof(*state), GFP_KERNEL);
4596 	if (WARN_ON(!state))
4597 		return;
4598 
4599 	crtc->state = &state->base;
4600 	crtc->state->crtc = crtc;
4601 
4602 }
4603 
4604 static struct drm_crtc_state *
4605 dm_crtc_duplicate_state(struct drm_crtc *crtc)
4606 {
4607 	struct dm_crtc_state *state, *cur;
4608 
4609 	cur = to_dm_crtc_state(crtc->state);
4610 
4611 	if (WARN_ON(!crtc->state))
4612 		return NULL;
4613 
4614 	state = kzalloc(sizeof(*state), GFP_KERNEL);
4615 	if (!state)
4616 		return NULL;
4617 
4618 	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
4619 
4620 	if (cur->stream) {
4621 		state->stream = cur->stream;
4622 		dc_stream_retain(state->stream);
4623 	}
4624 
4625 	state->active_planes = cur->active_planes;
4626 	state->interrupts_enabled = cur->interrupts_enabled;
4627 	state->vrr_params = cur->vrr_params;
4628 	state->vrr_infopacket = cur->vrr_infopacket;
4629 	state->abm_level = cur->abm_level;
4630 	state->vrr_supported = cur->vrr_supported;
4631 	state->freesync_config = cur->freesync_config;
4632 	state->crc_src = cur->crc_src;
4633 	state->cm_has_degamma = cur->cm_has_degamma;
4634 	state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
4635 
4636 	/* TODO Duplicate dc_stream after objects are stream object is flattened */
4637 
4638 	return &state->base;
4639 }
4640 
4641 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
4642 {
4643 	enum dc_irq_source irq_source;
4644 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4645 	struct amdgpu_device *adev = crtc->dev->dev_private;
4646 	int rc;
4647 
4648 	irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
4649 
4650 	rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4651 
4652 	DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
4653 			 acrtc->crtc_id, enable ? "en" : "dis", rc);
4654 	return rc;
4655 }
4656 
4657 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
4658 {
4659 	enum dc_irq_source irq_source;
4660 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4661 	struct amdgpu_device *adev = crtc->dev->dev_private;
4662 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
4663 	int rc = 0;
4664 
4665 	if (enable) {
4666 		/* vblank irq on -> Only need vupdate irq in vrr mode */
4667 		if (amdgpu_dm_vrr_active(acrtc_state))
4668 			rc = dm_set_vupdate_irq(crtc, true);
4669 	} else {
4670 		/* vblank irq off -> vupdate irq off */
4671 		rc = dm_set_vupdate_irq(crtc, false);
4672 	}
4673 
4674 	if (rc)
4675 		return rc;
4676 
4677 	irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
4678 	return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4679 }
4680 
4681 static int dm_enable_vblank(struct drm_crtc *crtc)
4682 {
4683 	return dm_set_vblank(crtc, true);
4684 }
4685 
4686 static void dm_disable_vblank(struct drm_crtc *crtc)
4687 {
4688 	dm_set_vblank(crtc, false);
4689 }
4690 
4691 /* Implemented only the options currently availible for the driver */
4692 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
4693 	.reset = dm_crtc_reset_state,
4694 	.destroy = amdgpu_dm_crtc_destroy,
4695 	.gamma_set = drm_atomic_helper_legacy_gamma_set,
4696 	.set_config = drm_atomic_helper_set_config,
4697 	.page_flip = drm_atomic_helper_page_flip,
4698 	.atomic_duplicate_state = dm_crtc_duplicate_state,
4699 	.atomic_destroy_state = dm_crtc_destroy_state,
4700 	.set_crc_source = amdgpu_dm_crtc_set_crc_source,
4701 	.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
4702 	.get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
4703 	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
4704 	.enable_vblank = dm_enable_vblank,
4705 	.disable_vblank = dm_disable_vblank,
4706 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
4707 };
4708 
4709 static enum drm_connector_status
4710 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
4711 {
4712 	bool connected;
4713 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4714 
4715 	/*
4716 	 * Notes:
4717 	 * 1. This interface is NOT called in context of HPD irq.
4718 	 * 2. This interface *is called* in context of user-mode ioctl. Which
4719 	 * makes it a bad place for *any* MST-related activity.
4720 	 */
4721 
4722 	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
4723 	    !aconnector->fake_enable)
4724 		connected = (aconnector->dc_sink != NULL);
4725 	else
4726 		connected = (aconnector->base.force == DRM_FORCE_ON);
4727 
4728 	return (connected ? connector_status_connected :
4729 			connector_status_disconnected);
4730 }
4731 
4732 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
4733 					    struct drm_connector_state *connector_state,
4734 					    struct drm_property *property,
4735 					    uint64_t val)
4736 {
4737 	struct drm_device *dev = connector->dev;
4738 	struct amdgpu_device *adev = dev->dev_private;
4739 	struct dm_connector_state *dm_old_state =
4740 		to_dm_connector_state(connector->state);
4741 	struct dm_connector_state *dm_new_state =
4742 		to_dm_connector_state(connector_state);
4743 
4744 	int ret = -EINVAL;
4745 
4746 	if (property == dev->mode_config.scaling_mode_property) {
4747 		enum amdgpu_rmx_type rmx_type;
4748 
4749 		switch (val) {
4750 		case DRM_MODE_SCALE_CENTER:
4751 			rmx_type = RMX_CENTER;
4752 			break;
4753 		case DRM_MODE_SCALE_ASPECT:
4754 			rmx_type = RMX_ASPECT;
4755 			break;
4756 		case DRM_MODE_SCALE_FULLSCREEN:
4757 			rmx_type = RMX_FULL;
4758 			break;
4759 		case DRM_MODE_SCALE_NONE:
4760 		default:
4761 			rmx_type = RMX_OFF;
4762 			break;
4763 		}
4764 
4765 		if (dm_old_state->scaling == rmx_type)
4766 			return 0;
4767 
4768 		dm_new_state->scaling = rmx_type;
4769 		ret = 0;
4770 	} else if (property == adev->mode_info.underscan_hborder_property) {
4771 		dm_new_state->underscan_hborder = val;
4772 		ret = 0;
4773 	} else if (property == adev->mode_info.underscan_vborder_property) {
4774 		dm_new_state->underscan_vborder = val;
4775 		ret = 0;
4776 	} else if (property == adev->mode_info.underscan_property) {
4777 		dm_new_state->underscan_enable = val;
4778 		ret = 0;
4779 	} else if (property == adev->mode_info.abm_level_property) {
4780 		dm_new_state->abm_level = val;
4781 		ret = 0;
4782 	}
4783 
4784 	return ret;
4785 }
4786 
4787 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
4788 					    const struct drm_connector_state *state,
4789 					    struct drm_property *property,
4790 					    uint64_t *val)
4791 {
4792 	struct drm_device *dev = connector->dev;
4793 	struct amdgpu_device *adev = dev->dev_private;
4794 	struct dm_connector_state *dm_state =
4795 		to_dm_connector_state(state);
4796 	int ret = -EINVAL;
4797 
4798 	if (property == dev->mode_config.scaling_mode_property) {
4799 		switch (dm_state->scaling) {
4800 		case RMX_CENTER:
4801 			*val = DRM_MODE_SCALE_CENTER;
4802 			break;
4803 		case RMX_ASPECT:
4804 			*val = DRM_MODE_SCALE_ASPECT;
4805 			break;
4806 		case RMX_FULL:
4807 			*val = DRM_MODE_SCALE_FULLSCREEN;
4808 			break;
4809 		case RMX_OFF:
4810 		default:
4811 			*val = DRM_MODE_SCALE_NONE;
4812 			break;
4813 		}
4814 		ret = 0;
4815 	} else if (property == adev->mode_info.underscan_hborder_property) {
4816 		*val = dm_state->underscan_hborder;
4817 		ret = 0;
4818 	} else if (property == adev->mode_info.underscan_vborder_property) {
4819 		*val = dm_state->underscan_vborder;
4820 		ret = 0;
4821 	} else if (property == adev->mode_info.underscan_property) {
4822 		*val = dm_state->underscan_enable;
4823 		ret = 0;
4824 	} else if (property == adev->mode_info.abm_level_property) {
4825 		*val = dm_state->abm_level;
4826 		ret = 0;
4827 	}
4828 
4829 	return ret;
4830 }
4831 
4832 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
4833 {
4834 	struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
4835 
4836 	drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
4837 }
4838 
4839 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
4840 {
4841 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4842 	const struct dc_link *link = aconnector->dc_link;
4843 	struct amdgpu_device *adev = connector->dev->dev_private;
4844 	struct amdgpu_display_manager *dm = &adev->dm;
4845 
4846 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4847 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4848 
4849 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4850 	    link->type != dc_connection_none &&
4851 	    dm->backlight_dev) {
4852 		backlight_device_unregister(dm->backlight_dev);
4853 		dm->backlight_dev = NULL;
4854 	}
4855 #endif
4856 
4857 	if (aconnector->dc_em_sink)
4858 		dc_sink_release(aconnector->dc_em_sink);
4859 	aconnector->dc_em_sink = NULL;
4860 	if (aconnector->dc_sink)
4861 		dc_sink_release(aconnector->dc_sink);
4862 	aconnector->dc_sink = NULL;
4863 
4864 	drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
4865 	drm_connector_unregister(connector);
4866 	drm_connector_cleanup(connector);
4867 	if (aconnector->i2c) {
4868 		i2c_del_adapter(&aconnector->i2c->base);
4869 		kfree(aconnector->i2c);
4870 	}
4871 	kfree(aconnector->dm_dp_aux.aux.name);
4872 
4873 	kfree(connector);
4874 }
4875 
4876 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
4877 {
4878 	struct dm_connector_state *state =
4879 		to_dm_connector_state(connector->state);
4880 
4881 	if (connector->state)
4882 		__drm_atomic_helper_connector_destroy_state(connector->state);
4883 
4884 	kfree(state);
4885 
4886 	state = kzalloc(sizeof(*state), GFP_KERNEL);
4887 
4888 	if (state) {
4889 		state->scaling = RMX_OFF;
4890 		state->underscan_enable = false;
4891 		state->underscan_hborder = 0;
4892 		state->underscan_vborder = 0;
4893 		state->base.max_requested_bpc = 8;
4894 		state->vcpi_slots = 0;
4895 		state->pbn = 0;
4896 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4897 			state->abm_level = amdgpu_dm_abm_level;
4898 
4899 		__drm_atomic_helper_connector_reset(connector, &state->base);
4900 	}
4901 }
4902 
4903 struct drm_connector_state *
4904 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
4905 {
4906 	struct dm_connector_state *state =
4907 		to_dm_connector_state(connector->state);
4908 
4909 	struct dm_connector_state *new_state =
4910 			kmemdup(state, sizeof(*state), GFP_KERNEL);
4911 
4912 	if (!new_state)
4913 		return NULL;
4914 
4915 	__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
4916 
4917 	new_state->freesync_capable = state->freesync_capable;
4918 	new_state->abm_level = state->abm_level;
4919 	new_state->scaling = state->scaling;
4920 	new_state->underscan_enable = state->underscan_enable;
4921 	new_state->underscan_hborder = state->underscan_hborder;
4922 	new_state->underscan_vborder = state->underscan_vborder;
4923 	new_state->vcpi_slots = state->vcpi_slots;
4924 	new_state->pbn = state->pbn;
4925 	return &new_state->base;
4926 }
4927 
4928 static int
4929 amdgpu_dm_connector_late_register(struct drm_connector *connector)
4930 {
4931 	struct amdgpu_dm_connector *amdgpu_dm_connector =
4932 		to_amdgpu_dm_connector(connector);
4933 	int r;
4934 
4935 	if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
4936 	    (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
4937 		amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
4938 		r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
4939 		if (r)
4940 			return r;
4941 	}
4942 
4943 #if defined(CONFIG_DEBUG_FS)
4944 	connector_debugfs_init(amdgpu_dm_connector);
4945 #endif
4946 
4947 	return 0;
4948 }
4949 
4950 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
4951 	.reset = amdgpu_dm_connector_funcs_reset,
4952 	.detect = amdgpu_dm_connector_detect,
4953 	.fill_modes = drm_helper_probe_single_connector_modes,
4954 	.destroy = amdgpu_dm_connector_destroy,
4955 	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
4956 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4957 	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
4958 	.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
4959 	.late_register = amdgpu_dm_connector_late_register,
4960 	.early_unregister = amdgpu_dm_connector_unregister
4961 };
4962 
4963 static int get_modes(struct drm_connector *connector)
4964 {
4965 	return amdgpu_dm_connector_get_modes(connector);
4966 }
4967 
4968 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
4969 {
4970 	struct dc_sink_init_data init_params = {
4971 			.link = aconnector->dc_link,
4972 			.sink_signal = SIGNAL_TYPE_VIRTUAL
4973 	};
4974 	struct edid *edid;
4975 
4976 	if (!aconnector->base.edid_blob_ptr) {
4977 		DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
4978 				aconnector->base.name);
4979 
4980 		aconnector->base.force = DRM_FORCE_OFF;
4981 		aconnector->base.override_edid = false;
4982 		return;
4983 	}
4984 
4985 	edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
4986 
4987 	aconnector->edid = edid;
4988 
4989 	aconnector->dc_em_sink = dc_link_add_remote_sink(
4990 		aconnector->dc_link,
4991 		(uint8_t *)edid,
4992 		(edid->extensions + 1) * EDID_LENGTH,
4993 		&init_params);
4994 
4995 	if (aconnector->base.force == DRM_FORCE_ON) {
4996 		aconnector->dc_sink = aconnector->dc_link->local_sink ?
4997 		aconnector->dc_link->local_sink :
4998 		aconnector->dc_em_sink;
4999 		dc_sink_retain(aconnector->dc_sink);
5000 	}
5001 }
5002 
5003 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
5004 {
5005 	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
5006 
5007 	/*
5008 	 * In case of headless boot with force on for DP managed connector
5009 	 * Those settings have to be != 0 to get initial modeset
5010 	 */
5011 	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5012 		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
5013 		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
5014 	}
5015 
5016 
5017 	aconnector->base.override_edid = true;
5018 	create_eml_sink(aconnector);
5019 }
5020 
5021 static struct dc_stream_state *
5022 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5023 				const struct drm_display_mode *drm_mode,
5024 				const struct dm_connector_state *dm_state,
5025 				const struct dc_stream_state *old_stream)
5026 {
5027 	struct drm_connector *connector = &aconnector->base;
5028 	struct amdgpu_device *adev = connector->dev->dev_private;
5029 	struct dc_stream_state *stream;
5030 	int requested_bpc = connector->state ? connector->state->max_requested_bpc : 8;
5031 	enum dc_status dc_result = DC_OK;
5032 
5033 	do {
5034 		stream = create_stream_for_sink(aconnector, drm_mode,
5035 						dm_state, old_stream,
5036 						requested_bpc);
5037 		if (stream == NULL) {
5038 			DRM_ERROR("Failed to create stream for sink!\n");
5039 			break;
5040 		}
5041 
5042 		dc_result = dc_validate_stream(adev->dm.dc, stream);
5043 
5044 		if (dc_result != DC_OK) {
5045 			DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d\n",
5046 				      drm_mode->hdisplay,
5047 				      drm_mode->vdisplay,
5048 				      drm_mode->clock,
5049 				      dc_result);
5050 
5051 			dc_stream_release(stream);
5052 			stream = NULL;
5053 			requested_bpc -= 2; /* lower bpc to retry validation */
5054 		}
5055 
5056 	} while (stream == NULL && requested_bpc >= 6);
5057 
5058 	return stream;
5059 }
5060 
5061 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
5062 				   struct drm_display_mode *mode)
5063 {
5064 	int result = MODE_ERROR;
5065 	struct dc_sink *dc_sink;
5066 	/* TODO: Unhardcode stream count */
5067 	struct dc_stream_state *stream;
5068 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5069 
5070 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
5071 			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
5072 		return result;
5073 
5074 	/*
5075 	 * Only run this the first time mode_valid is called to initilialize
5076 	 * EDID mgmt
5077 	 */
5078 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
5079 		!aconnector->dc_em_sink)
5080 		handle_edid_mgmt(aconnector);
5081 
5082 	dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
5083 
5084 	if (dc_sink == NULL) {
5085 		DRM_ERROR("dc_sink is NULL!\n");
5086 		goto fail;
5087 	}
5088 
5089 	stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
5090 	if (stream) {
5091 		dc_stream_release(stream);
5092 		result = MODE_OK;
5093 	}
5094 
5095 fail:
5096 	/* TODO: error handling*/
5097 	return result;
5098 }
5099 
5100 static int fill_hdr_info_packet(const struct drm_connector_state *state,
5101 				struct dc_info_packet *out)
5102 {
5103 	struct hdmi_drm_infoframe frame;
5104 	unsigned char buf[30]; /* 26 + 4 */
5105 	ssize_t len;
5106 	int ret, i;
5107 
5108 	memset(out, 0, sizeof(*out));
5109 
5110 	if (!state->hdr_output_metadata)
5111 		return 0;
5112 
5113 	ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
5114 	if (ret)
5115 		return ret;
5116 
5117 	len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
5118 	if (len < 0)
5119 		return (int)len;
5120 
5121 	/* Static metadata is a fixed 26 bytes + 4 byte header. */
5122 	if (len != 30)
5123 		return -EINVAL;
5124 
5125 	/* Prepare the infopacket for DC. */
5126 	switch (state->connector->connector_type) {
5127 	case DRM_MODE_CONNECTOR_HDMIA:
5128 		out->hb0 = 0x87; /* type */
5129 		out->hb1 = 0x01; /* version */
5130 		out->hb2 = 0x1A; /* length */
5131 		out->sb[0] = buf[3]; /* checksum */
5132 		i = 1;
5133 		break;
5134 
5135 	case DRM_MODE_CONNECTOR_DisplayPort:
5136 	case DRM_MODE_CONNECTOR_eDP:
5137 		out->hb0 = 0x00; /* sdp id, zero */
5138 		out->hb1 = 0x87; /* type */
5139 		out->hb2 = 0x1D; /* payload len - 1 */
5140 		out->hb3 = (0x13 << 2); /* sdp version */
5141 		out->sb[0] = 0x01; /* version */
5142 		out->sb[1] = 0x1A; /* length */
5143 		i = 2;
5144 		break;
5145 
5146 	default:
5147 		return -EINVAL;
5148 	}
5149 
5150 	memcpy(&out->sb[i], &buf[4], 26);
5151 	out->valid = true;
5152 
5153 	print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
5154 		       sizeof(out->sb), false);
5155 
5156 	return 0;
5157 }
5158 
5159 static bool
5160 is_hdr_metadata_different(const struct drm_connector_state *old_state,
5161 			  const struct drm_connector_state *new_state)
5162 {
5163 	struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
5164 	struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
5165 
5166 	if (old_blob != new_blob) {
5167 		if (old_blob && new_blob &&
5168 		    old_blob->length == new_blob->length)
5169 			return memcmp(old_blob->data, new_blob->data,
5170 				      old_blob->length);
5171 
5172 		return true;
5173 	}
5174 
5175 	return false;
5176 }
5177 
5178 static int
5179 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
5180 				 struct drm_atomic_state *state)
5181 {
5182 	struct drm_connector_state *new_con_state =
5183 		drm_atomic_get_new_connector_state(state, conn);
5184 	struct drm_connector_state *old_con_state =
5185 		drm_atomic_get_old_connector_state(state, conn);
5186 	struct drm_crtc *crtc = new_con_state->crtc;
5187 	struct drm_crtc_state *new_crtc_state;
5188 	int ret;
5189 
5190 	if (!crtc)
5191 		return 0;
5192 
5193 	if (is_hdr_metadata_different(old_con_state, new_con_state)) {
5194 		struct dc_info_packet hdr_infopacket;
5195 
5196 		ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
5197 		if (ret)
5198 			return ret;
5199 
5200 		new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
5201 		if (IS_ERR(new_crtc_state))
5202 			return PTR_ERR(new_crtc_state);
5203 
5204 		/*
5205 		 * DC considers the stream backends changed if the
5206 		 * static metadata changes. Forcing the modeset also
5207 		 * gives a simple way for userspace to switch from
5208 		 * 8bpc to 10bpc when setting the metadata to enter
5209 		 * or exit HDR.
5210 		 *
5211 		 * Changing the static metadata after it's been
5212 		 * set is permissible, however. So only force a
5213 		 * modeset if we're entering or exiting HDR.
5214 		 */
5215 		new_crtc_state->mode_changed =
5216 			!old_con_state->hdr_output_metadata ||
5217 			!new_con_state->hdr_output_metadata;
5218 	}
5219 
5220 	return 0;
5221 }
5222 
5223 static const struct drm_connector_helper_funcs
5224 amdgpu_dm_connector_helper_funcs = {
5225 	/*
5226 	 * If hotplugging a second bigger display in FB Con mode, bigger resolution
5227 	 * modes will be filtered by drm_mode_validate_size(), and those modes
5228 	 * are missing after user start lightdm. So we need to renew modes list.
5229 	 * in get_modes call back, not just return the modes count
5230 	 */
5231 	.get_modes = get_modes,
5232 	.mode_valid = amdgpu_dm_connector_mode_valid,
5233 	.atomic_check = amdgpu_dm_connector_atomic_check,
5234 };
5235 
5236 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
5237 {
5238 }
5239 
5240 static bool does_crtc_have_active_cursor(struct drm_crtc_state *new_crtc_state)
5241 {
5242 	struct drm_device *dev = new_crtc_state->crtc->dev;
5243 	struct drm_plane *plane;
5244 
5245 	drm_for_each_plane_mask(plane, dev, new_crtc_state->plane_mask) {
5246 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
5247 			return true;
5248 	}
5249 
5250 	return false;
5251 }
5252 
5253 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
5254 {
5255 	struct drm_atomic_state *state = new_crtc_state->state;
5256 	struct drm_plane *plane;
5257 	int num_active = 0;
5258 
5259 	drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
5260 		struct drm_plane_state *new_plane_state;
5261 
5262 		/* Cursor planes are "fake". */
5263 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
5264 			continue;
5265 
5266 		new_plane_state = drm_atomic_get_new_plane_state(state, plane);
5267 
5268 		if (!new_plane_state) {
5269 			/*
5270 			 * The plane is enable on the CRTC and hasn't changed
5271 			 * state. This means that it previously passed
5272 			 * validation and is therefore enabled.
5273 			 */
5274 			num_active += 1;
5275 			continue;
5276 		}
5277 
5278 		/* We need a framebuffer to be considered enabled. */
5279 		num_active += (new_plane_state->fb != NULL);
5280 	}
5281 
5282 	return num_active;
5283 }
5284 
5285 /*
5286  * Sets whether interrupts should be enabled on a specific CRTC.
5287  * We require that the stream be enabled and that there exist active
5288  * DC planes on the stream.
5289  */
5290 static void
5291 dm_update_crtc_interrupt_state(struct drm_crtc *crtc,
5292 			       struct drm_crtc_state *new_crtc_state)
5293 {
5294 	struct dm_crtc_state *dm_new_crtc_state =
5295 		to_dm_crtc_state(new_crtc_state);
5296 
5297 	dm_new_crtc_state->active_planes = 0;
5298 	dm_new_crtc_state->interrupts_enabled = false;
5299 
5300 	if (!dm_new_crtc_state->stream)
5301 		return;
5302 
5303 	dm_new_crtc_state->active_planes =
5304 		count_crtc_active_planes(new_crtc_state);
5305 
5306 	dm_new_crtc_state->interrupts_enabled =
5307 		dm_new_crtc_state->active_planes > 0;
5308 }
5309 
5310 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
5311 				       struct drm_crtc_state *state)
5312 {
5313 	struct amdgpu_device *adev = crtc->dev->dev_private;
5314 	struct dc *dc = adev->dm.dc;
5315 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
5316 	int ret = -EINVAL;
5317 
5318 	/*
5319 	 * Update interrupt state for the CRTC. This needs to happen whenever
5320 	 * the CRTC has changed or whenever any of its planes have changed.
5321 	 * Atomic check satisfies both of these requirements since the CRTC
5322 	 * is added to the state by DRM during drm_atomic_helper_check_planes.
5323 	 */
5324 	dm_update_crtc_interrupt_state(crtc, state);
5325 
5326 	if (unlikely(!dm_crtc_state->stream &&
5327 		     modeset_required(state, NULL, dm_crtc_state->stream))) {
5328 		WARN_ON(1);
5329 		return ret;
5330 	}
5331 
5332 	/* In some use cases, like reset, no stream is attached */
5333 	if (!dm_crtc_state->stream)
5334 		return 0;
5335 
5336 	/*
5337 	 * We want at least one hardware plane enabled to use
5338 	 * the stream with a cursor enabled.
5339 	 */
5340 	if (state->enable && state->active &&
5341 	    does_crtc_have_active_cursor(state) &&
5342 	    dm_crtc_state->active_planes == 0)
5343 		return -EINVAL;
5344 
5345 	if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
5346 		return 0;
5347 
5348 	return ret;
5349 }
5350 
5351 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
5352 				      const struct drm_display_mode *mode,
5353 				      struct drm_display_mode *adjusted_mode)
5354 {
5355 	return true;
5356 }
5357 
5358 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
5359 	.disable = dm_crtc_helper_disable,
5360 	.atomic_check = dm_crtc_helper_atomic_check,
5361 	.mode_fixup = dm_crtc_helper_mode_fixup,
5362 	.get_scanout_position = amdgpu_crtc_get_scanout_position,
5363 };
5364 
5365 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
5366 {
5367 
5368 }
5369 
5370 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
5371 {
5372 	switch (display_color_depth) {
5373 		case COLOR_DEPTH_666:
5374 			return 6;
5375 		case COLOR_DEPTH_888:
5376 			return 8;
5377 		case COLOR_DEPTH_101010:
5378 			return 10;
5379 		case COLOR_DEPTH_121212:
5380 			return 12;
5381 		case COLOR_DEPTH_141414:
5382 			return 14;
5383 		case COLOR_DEPTH_161616:
5384 			return 16;
5385 		default:
5386 			break;
5387 		}
5388 	return 0;
5389 }
5390 
5391 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
5392 					  struct drm_crtc_state *crtc_state,
5393 					  struct drm_connector_state *conn_state)
5394 {
5395 	struct drm_atomic_state *state = crtc_state->state;
5396 	struct drm_connector *connector = conn_state->connector;
5397 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5398 	struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
5399 	const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
5400 	struct drm_dp_mst_topology_mgr *mst_mgr;
5401 	struct drm_dp_mst_port *mst_port;
5402 	enum dc_color_depth color_depth;
5403 	int clock, bpp = 0;
5404 	bool is_y420 = false;
5405 
5406 	if (!aconnector->port || !aconnector->dc_sink)
5407 		return 0;
5408 
5409 	mst_port = aconnector->port;
5410 	mst_mgr = &aconnector->mst_port->mst_mgr;
5411 
5412 	if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
5413 		return 0;
5414 
5415 	if (!state->duplicated) {
5416 		int max_bpc = conn_state->max_requested_bpc;
5417 		is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
5418 				aconnector->force_yuv420_output;
5419 		color_depth = convert_color_depth_from_display_info(connector,
5420 								    is_y420,
5421 								    max_bpc);
5422 		bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
5423 		clock = adjusted_mode->clock;
5424 		dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
5425 	}
5426 	dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
5427 									   mst_mgr,
5428 									   mst_port,
5429 									   dm_new_connector_state->pbn,
5430 									   0);
5431 	if (dm_new_connector_state->vcpi_slots < 0) {
5432 		DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
5433 		return dm_new_connector_state->vcpi_slots;
5434 	}
5435 	return 0;
5436 }
5437 
5438 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
5439 	.disable = dm_encoder_helper_disable,
5440 	.atomic_check = dm_encoder_helper_atomic_check
5441 };
5442 
5443 #if defined(CONFIG_DRM_AMD_DC_DCN)
5444 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
5445 					    struct dc_state *dc_state)
5446 {
5447 	struct dc_stream_state *stream = NULL;
5448 	struct drm_connector *connector;
5449 	struct drm_connector_state *new_con_state, *old_con_state;
5450 	struct amdgpu_dm_connector *aconnector;
5451 	struct dm_connector_state *dm_conn_state;
5452 	int i, j, clock, bpp;
5453 	int vcpi, pbn_div, pbn = 0;
5454 
5455 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
5456 
5457 		aconnector = to_amdgpu_dm_connector(connector);
5458 
5459 		if (!aconnector->port)
5460 			continue;
5461 
5462 		if (!new_con_state || !new_con_state->crtc)
5463 			continue;
5464 
5465 		dm_conn_state = to_dm_connector_state(new_con_state);
5466 
5467 		for (j = 0; j < dc_state->stream_count; j++) {
5468 			stream = dc_state->streams[j];
5469 			if (!stream)
5470 				continue;
5471 
5472 			if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
5473 				break;
5474 
5475 			stream = NULL;
5476 		}
5477 
5478 		if (!stream)
5479 			continue;
5480 
5481 		if (stream->timing.flags.DSC != 1) {
5482 			drm_dp_mst_atomic_enable_dsc(state,
5483 						     aconnector->port,
5484 						     dm_conn_state->pbn,
5485 						     0,
5486 						     false);
5487 			continue;
5488 		}
5489 
5490 		pbn_div = dm_mst_get_pbn_divider(stream->link);
5491 		bpp = stream->timing.dsc_cfg.bits_per_pixel;
5492 		clock = stream->timing.pix_clk_100hz / 10;
5493 		pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
5494 		vcpi = drm_dp_mst_atomic_enable_dsc(state,
5495 						    aconnector->port,
5496 						    pbn, pbn_div,
5497 						    true);
5498 		if (vcpi < 0)
5499 			return vcpi;
5500 
5501 		dm_conn_state->pbn = pbn;
5502 		dm_conn_state->vcpi_slots = vcpi;
5503 	}
5504 	return 0;
5505 }
5506 #endif
5507 
5508 static void dm_drm_plane_reset(struct drm_plane *plane)
5509 {
5510 	struct dm_plane_state *amdgpu_state = NULL;
5511 
5512 	if (plane->state)
5513 		plane->funcs->atomic_destroy_state(plane, plane->state);
5514 
5515 	amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
5516 	WARN_ON(amdgpu_state == NULL);
5517 
5518 	if (amdgpu_state)
5519 		__drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
5520 }
5521 
5522 static struct drm_plane_state *
5523 dm_drm_plane_duplicate_state(struct drm_plane *plane)
5524 {
5525 	struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
5526 
5527 	old_dm_plane_state = to_dm_plane_state(plane->state);
5528 	dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
5529 	if (!dm_plane_state)
5530 		return NULL;
5531 
5532 	__drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
5533 
5534 	if (old_dm_plane_state->dc_state) {
5535 		dm_plane_state->dc_state = old_dm_plane_state->dc_state;
5536 		dc_plane_state_retain(dm_plane_state->dc_state);
5537 	}
5538 
5539 	return &dm_plane_state->base;
5540 }
5541 
5542 void dm_drm_plane_destroy_state(struct drm_plane *plane,
5543 				struct drm_plane_state *state)
5544 {
5545 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
5546 
5547 	if (dm_plane_state->dc_state)
5548 		dc_plane_state_release(dm_plane_state->dc_state);
5549 
5550 	drm_atomic_helper_plane_destroy_state(plane, state);
5551 }
5552 
5553 static const struct drm_plane_funcs dm_plane_funcs = {
5554 	.update_plane	= drm_atomic_helper_update_plane,
5555 	.disable_plane	= drm_atomic_helper_disable_plane,
5556 	.destroy	= drm_primary_helper_destroy,
5557 	.reset = dm_drm_plane_reset,
5558 	.atomic_duplicate_state = dm_drm_plane_duplicate_state,
5559 	.atomic_destroy_state = dm_drm_plane_destroy_state,
5560 };
5561 
5562 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
5563 				      struct drm_plane_state *new_state)
5564 {
5565 	struct amdgpu_framebuffer *afb;
5566 	struct drm_gem_object *obj;
5567 	struct amdgpu_device *adev;
5568 	struct amdgpu_bo *rbo;
5569 	struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
5570 	struct list_head list;
5571 	struct ttm_validate_buffer tv;
5572 	struct ww_acquire_ctx ticket;
5573 	uint64_t tiling_flags;
5574 	uint32_t domain;
5575 	int r;
5576 	bool tmz_surface = false;
5577 	bool force_disable_dcc = false;
5578 
5579 	dm_plane_state_old = to_dm_plane_state(plane->state);
5580 	dm_plane_state_new = to_dm_plane_state(new_state);
5581 
5582 	if (!new_state->fb) {
5583 		DRM_DEBUG_DRIVER("No FB bound\n");
5584 		return 0;
5585 	}
5586 
5587 	afb = to_amdgpu_framebuffer(new_state->fb);
5588 	obj = new_state->fb->obj[0];
5589 	rbo = gem_to_amdgpu_bo(obj);
5590 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
5591 	INIT_LIST_HEAD(&list);
5592 
5593 	tv.bo = &rbo->tbo;
5594 	tv.num_shared = 1;
5595 	list_add(&tv.head, &list);
5596 
5597 	r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
5598 	if (r) {
5599 		dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
5600 		return r;
5601 	}
5602 
5603 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
5604 		domain = amdgpu_display_supported_domains(adev, rbo->flags);
5605 	else
5606 		domain = AMDGPU_GEM_DOMAIN_VRAM;
5607 
5608 	r = amdgpu_bo_pin(rbo, domain);
5609 	if (unlikely(r != 0)) {
5610 		if (r != -ERESTARTSYS)
5611 			DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
5612 		ttm_eu_backoff_reservation(&ticket, &list);
5613 		return r;
5614 	}
5615 
5616 	r = amdgpu_ttm_alloc_gart(&rbo->tbo);
5617 	if (unlikely(r != 0)) {
5618 		amdgpu_bo_unpin(rbo);
5619 		ttm_eu_backoff_reservation(&ticket, &list);
5620 		DRM_ERROR("%p bind failed\n", rbo);
5621 		return r;
5622 	}
5623 
5624 	amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
5625 
5626 	tmz_surface = amdgpu_bo_encrypted(rbo);
5627 
5628 	ttm_eu_backoff_reservation(&ticket, &list);
5629 
5630 	afb->address = amdgpu_bo_gpu_offset(rbo);
5631 
5632 	amdgpu_bo_ref(rbo);
5633 
5634 	if (dm_plane_state_new->dc_state &&
5635 			dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
5636 		struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
5637 
5638 		force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5639 		fill_plane_buffer_attributes(
5640 			adev, afb, plane_state->format, plane_state->rotation,
5641 			tiling_flags, &plane_state->tiling_info,
5642 			&plane_state->plane_size, &plane_state->dcc,
5643 			&plane_state->address, tmz_surface,
5644 			force_disable_dcc);
5645 	}
5646 
5647 	return 0;
5648 }
5649 
5650 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
5651 				       struct drm_plane_state *old_state)
5652 {
5653 	struct amdgpu_bo *rbo;
5654 	int r;
5655 
5656 	if (!old_state->fb)
5657 		return;
5658 
5659 	rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
5660 	r = amdgpu_bo_reserve(rbo, false);
5661 	if (unlikely(r)) {
5662 		DRM_ERROR("failed to reserve rbo before unpin\n");
5663 		return;
5664 	}
5665 
5666 	amdgpu_bo_unpin(rbo);
5667 	amdgpu_bo_unreserve(rbo);
5668 	amdgpu_bo_unref(&rbo);
5669 }
5670 
5671 static int dm_plane_atomic_check(struct drm_plane *plane,
5672 				 struct drm_plane_state *state)
5673 {
5674 	struct amdgpu_device *adev = plane->dev->dev_private;
5675 	struct dc *dc = adev->dm.dc;
5676 	struct dm_plane_state *dm_plane_state;
5677 	struct dc_scaling_info scaling_info;
5678 	int ret;
5679 
5680 	dm_plane_state = to_dm_plane_state(state);
5681 
5682 	if (!dm_plane_state->dc_state)
5683 		return 0;
5684 
5685 	ret = fill_dc_scaling_info(state, &scaling_info);
5686 	if (ret)
5687 		return ret;
5688 
5689 	if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
5690 		return 0;
5691 
5692 	return -EINVAL;
5693 }
5694 
5695 static int dm_plane_atomic_async_check(struct drm_plane *plane,
5696 				       struct drm_plane_state *new_plane_state)
5697 {
5698 	/* Only support async updates on cursor planes. */
5699 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
5700 		return -EINVAL;
5701 
5702 	return 0;
5703 }
5704 
5705 static void dm_plane_atomic_async_update(struct drm_plane *plane,
5706 					 struct drm_plane_state *new_state)
5707 {
5708 	struct drm_plane_state *old_state =
5709 		drm_atomic_get_old_plane_state(new_state->state, plane);
5710 
5711 	swap(plane->state->fb, new_state->fb);
5712 
5713 	plane->state->src_x = new_state->src_x;
5714 	plane->state->src_y = new_state->src_y;
5715 	plane->state->src_w = new_state->src_w;
5716 	plane->state->src_h = new_state->src_h;
5717 	plane->state->crtc_x = new_state->crtc_x;
5718 	plane->state->crtc_y = new_state->crtc_y;
5719 	plane->state->crtc_w = new_state->crtc_w;
5720 	plane->state->crtc_h = new_state->crtc_h;
5721 
5722 	handle_cursor_update(plane, old_state);
5723 }
5724 
5725 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
5726 	.prepare_fb = dm_plane_helper_prepare_fb,
5727 	.cleanup_fb = dm_plane_helper_cleanup_fb,
5728 	.atomic_check = dm_plane_atomic_check,
5729 	.atomic_async_check = dm_plane_atomic_async_check,
5730 	.atomic_async_update = dm_plane_atomic_async_update
5731 };
5732 
5733 /*
5734  * TODO: these are currently initialized to rgb formats only.
5735  * For future use cases we should either initialize them dynamically based on
5736  * plane capabilities, or initialize this array to all formats, so internal drm
5737  * check will succeed, and let DC implement proper check
5738  */
5739 static const uint32_t rgb_formats[] = {
5740 	DRM_FORMAT_XRGB8888,
5741 	DRM_FORMAT_ARGB8888,
5742 	DRM_FORMAT_RGBA8888,
5743 	DRM_FORMAT_XRGB2101010,
5744 	DRM_FORMAT_XBGR2101010,
5745 	DRM_FORMAT_ARGB2101010,
5746 	DRM_FORMAT_ABGR2101010,
5747 	DRM_FORMAT_XBGR8888,
5748 	DRM_FORMAT_ABGR8888,
5749 	DRM_FORMAT_RGB565,
5750 };
5751 
5752 static const uint32_t overlay_formats[] = {
5753 	DRM_FORMAT_XRGB8888,
5754 	DRM_FORMAT_ARGB8888,
5755 	DRM_FORMAT_RGBA8888,
5756 	DRM_FORMAT_XBGR8888,
5757 	DRM_FORMAT_ABGR8888,
5758 	DRM_FORMAT_RGB565
5759 };
5760 
5761 static const u32 cursor_formats[] = {
5762 	DRM_FORMAT_ARGB8888
5763 };
5764 
5765 static int get_plane_formats(const struct drm_plane *plane,
5766 			     const struct dc_plane_cap *plane_cap,
5767 			     uint32_t *formats, int max_formats)
5768 {
5769 	int i, num_formats = 0;
5770 
5771 	/*
5772 	 * TODO: Query support for each group of formats directly from
5773 	 * DC plane caps. This will require adding more formats to the
5774 	 * caps list.
5775 	 */
5776 
5777 	switch (plane->type) {
5778 	case DRM_PLANE_TYPE_PRIMARY:
5779 		for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
5780 			if (num_formats >= max_formats)
5781 				break;
5782 
5783 			formats[num_formats++] = rgb_formats[i];
5784 		}
5785 
5786 		if (plane_cap && plane_cap->pixel_format_support.nv12)
5787 			formats[num_formats++] = DRM_FORMAT_NV12;
5788 		if (plane_cap && plane_cap->pixel_format_support.p010)
5789 			formats[num_formats++] = DRM_FORMAT_P010;
5790 		if (plane_cap && plane_cap->pixel_format_support.fp16) {
5791 			formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
5792 			formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
5793 			formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
5794 			formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
5795 		}
5796 		break;
5797 
5798 	case DRM_PLANE_TYPE_OVERLAY:
5799 		for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
5800 			if (num_formats >= max_formats)
5801 				break;
5802 
5803 			formats[num_formats++] = overlay_formats[i];
5804 		}
5805 		break;
5806 
5807 	case DRM_PLANE_TYPE_CURSOR:
5808 		for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
5809 			if (num_formats >= max_formats)
5810 				break;
5811 
5812 			formats[num_formats++] = cursor_formats[i];
5813 		}
5814 		break;
5815 	}
5816 
5817 	return num_formats;
5818 }
5819 
5820 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
5821 				struct drm_plane *plane,
5822 				unsigned long possible_crtcs,
5823 				const struct dc_plane_cap *plane_cap)
5824 {
5825 	uint32_t formats[32];
5826 	int num_formats;
5827 	int res = -EPERM;
5828 
5829 	num_formats = get_plane_formats(plane, plane_cap, formats,
5830 					ARRAY_SIZE(formats));
5831 
5832 	res = drm_universal_plane_init(dm->adev->ddev, plane, possible_crtcs,
5833 				       &dm_plane_funcs, formats, num_formats,
5834 				       NULL, plane->type, NULL);
5835 	if (res)
5836 		return res;
5837 
5838 	if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
5839 	    plane_cap && plane_cap->per_pixel_alpha) {
5840 		unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
5841 					  BIT(DRM_MODE_BLEND_PREMULTI);
5842 
5843 		drm_plane_create_alpha_property(plane);
5844 		drm_plane_create_blend_mode_property(plane, blend_caps);
5845 	}
5846 
5847 	if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
5848 	    plane_cap &&
5849 	    (plane_cap->pixel_format_support.nv12 ||
5850 	     plane_cap->pixel_format_support.p010)) {
5851 		/* This only affects YUV formats. */
5852 		drm_plane_create_color_properties(
5853 			plane,
5854 			BIT(DRM_COLOR_YCBCR_BT601) |
5855 			BIT(DRM_COLOR_YCBCR_BT709) |
5856 			BIT(DRM_COLOR_YCBCR_BT2020),
5857 			BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
5858 			BIT(DRM_COLOR_YCBCR_FULL_RANGE),
5859 			DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
5860 	}
5861 
5862 	drm_plane_helper_add(plane, &dm_plane_helper_funcs);
5863 
5864 	/* Create (reset) the plane state */
5865 	if (plane->funcs->reset)
5866 		plane->funcs->reset(plane);
5867 
5868 	return 0;
5869 }
5870 
5871 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
5872 			       struct drm_plane *plane,
5873 			       uint32_t crtc_index)
5874 {
5875 	struct amdgpu_crtc *acrtc = NULL;
5876 	struct drm_plane *cursor_plane;
5877 
5878 	int res = -ENOMEM;
5879 
5880 	cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
5881 	if (!cursor_plane)
5882 		goto fail;
5883 
5884 	cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
5885 	res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
5886 
5887 	acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
5888 	if (!acrtc)
5889 		goto fail;
5890 
5891 	res = drm_crtc_init_with_planes(
5892 			dm->ddev,
5893 			&acrtc->base,
5894 			plane,
5895 			cursor_plane,
5896 			&amdgpu_dm_crtc_funcs, NULL);
5897 
5898 	if (res)
5899 		goto fail;
5900 
5901 	drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
5902 
5903 	/* Create (reset) the plane state */
5904 	if (acrtc->base.funcs->reset)
5905 		acrtc->base.funcs->reset(&acrtc->base);
5906 
5907 	acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
5908 	acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
5909 
5910 	acrtc->crtc_id = crtc_index;
5911 	acrtc->base.enabled = false;
5912 	acrtc->otg_inst = -1;
5913 
5914 	dm->adev->mode_info.crtcs[crtc_index] = acrtc;
5915 	drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
5916 				   true, MAX_COLOR_LUT_ENTRIES);
5917 	drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
5918 
5919 	return 0;
5920 
5921 fail:
5922 	kfree(acrtc);
5923 	kfree(cursor_plane);
5924 	return res;
5925 }
5926 
5927 
5928 static int to_drm_connector_type(enum signal_type st)
5929 {
5930 	switch (st) {
5931 	case SIGNAL_TYPE_HDMI_TYPE_A:
5932 		return DRM_MODE_CONNECTOR_HDMIA;
5933 	case SIGNAL_TYPE_EDP:
5934 		return DRM_MODE_CONNECTOR_eDP;
5935 	case SIGNAL_TYPE_LVDS:
5936 		return DRM_MODE_CONNECTOR_LVDS;
5937 	case SIGNAL_TYPE_RGB:
5938 		return DRM_MODE_CONNECTOR_VGA;
5939 	case SIGNAL_TYPE_DISPLAY_PORT:
5940 	case SIGNAL_TYPE_DISPLAY_PORT_MST:
5941 		return DRM_MODE_CONNECTOR_DisplayPort;
5942 	case SIGNAL_TYPE_DVI_DUAL_LINK:
5943 	case SIGNAL_TYPE_DVI_SINGLE_LINK:
5944 		return DRM_MODE_CONNECTOR_DVID;
5945 	case SIGNAL_TYPE_VIRTUAL:
5946 		return DRM_MODE_CONNECTOR_VIRTUAL;
5947 
5948 	default:
5949 		return DRM_MODE_CONNECTOR_Unknown;
5950 	}
5951 }
5952 
5953 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
5954 {
5955 	struct drm_encoder *encoder;
5956 
5957 	/* There is only one encoder per connector */
5958 	drm_connector_for_each_possible_encoder(connector, encoder)
5959 		return encoder;
5960 
5961 	return NULL;
5962 }
5963 
5964 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
5965 {
5966 	struct drm_encoder *encoder;
5967 	struct amdgpu_encoder *amdgpu_encoder;
5968 
5969 	encoder = amdgpu_dm_connector_to_encoder(connector);
5970 
5971 	if (encoder == NULL)
5972 		return;
5973 
5974 	amdgpu_encoder = to_amdgpu_encoder(encoder);
5975 
5976 	amdgpu_encoder->native_mode.clock = 0;
5977 
5978 	if (!list_empty(&connector->probed_modes)) {
5979 		struct drm_display_mode *preferred_mode = NULL;
5980 
5981 		list_for_each_entry(preferred_mode,
5982 				    &connector->probed_modes,
5983 				    head) {
5984 			if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
5985 				amdgpu_encoder->native_mode = *preferred_mode;
5986 
5987 			break;
5988 		}
5989 
5990 	}
5991 }
5992 
5993 static struct drm_display_mode *
5994 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
5995 			     char *name,
5996 			     int hdisplay, int vdisplay)
5997 {
5998 	struct drm_device *dev = encoder->dev;
5999 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6000 	struct drm_display_mode *mode = NULL;
6001 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6002 
6003 	mode = drm_mode_duplicate(dev, native_mode);
6004 
6005 	if (mode == NULL)
6006 		return NULL;
6007 
6008 	mode->hdisplay = hdisplay;
6009 	mode->vdisplay = vdisplay;
6010 	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
6011 	strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
6012 
6013 	return mode;
6014 
6015 }
6016 
6017 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
6018 						 struct drm_connector *connector)
6019 {
6020 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6021 	struct drm_display_mode *mode = NULL;
6022 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6023 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6024 				to_amdgpu_dm_connector(connector);
6025 	int i;
6026 	int n;
6027 	struct mode_size {
6028 		char name[DRM_DISPLAY_MODE_LEN];
6029 		int w;
6030 		int h;
6031 	} common_modes[] = {
6032 		{  "640x480",  640,  480},
6033 		{  "800x600",  800,  600},
6034 		{ "1024x768", 1024,  768},
6035 		{ "1280x720", 1280,  720},
6036 		{ "1280x800", 1280,  800},
6037 		{"1280x1024", 1280, 1024},
6038 		{ "1440x900", 1440,  900},
6039 		{"1680x1050", 1680, 1050},
6040 		{"1600x1200", 1600, 1200},
6041 		{"1920x1080", 1920, 1080},
6042 		{"1920x1200", 1920, 1200}
6043 	};
6044 
6045 	n = ARRAY_SIZE(common_modes);
6046 
6047 	for (i = 0; i < n; i++) {
6048 		struct drm_display_mode *curmode = NULL;
6049 		bool mode_existed = false;
6050 
6051 		if (common_modes[i].w > native_mode->hdisplay ||
6052 		    common_modes[i].h > native_mode->vdisplay ||
6053 		   (common_modes[i].w == native_mode->hdisplay &&
6054 		    common_modes[i].h == native_mode->vdisplay))
6055 			continue;
6056 
6057 		list_for_each_entry(curmode, &connector->probed_modes, head) {
6058 			if (common_modes[i].w == curmode->hdisplay &&
6059 			    common_modes[i].h == curmode->vdisplay) {
6060 				mode_existed = true;
6061 				break;
6062 			}
6063 		}
6064 
6065 		if (mode_existed)
6066 			continue;
6067 
6068 		mode = amdgpu_dm_create_common_mode(encoder,
6069 				common_modes[i].name, common_modes[i].w,
6070 				common_modes[i].h);
6071 		drm_mode_probed_add(connector, mode);
6072 		amdgpu_dm_connector->num_modes++;
6073 	}
6074 }
6075 
6076 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
6077 					      struct edid *edid)
6078 {
6079 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6080 			to_amdgpu_dm_connector(connector);
6081 
6082 	if (edid) {
6083 		/* empty probed_modes */
6084 		INIT_LIST_HEAD(&connector->probed_modes);
6085 		amdgpu_dm_connector->num_modes =
6086 				drm_add_edid_modes(connector, edid);
6087 
6088 		/* sorting the probed modes before calling function
6089 		 * amdgpu_dm_get_native_mode() since EDID can have
6090 		 * more than one preferred mode. The modes that are
6091 		 * later in the probed mode list could be of higher
6092 		 * and preferred resolution. For example, 3840x2160
6093 		 * resolution in base EDID preferred timing and 4096x2160
6094 		 * preferred resolution in DID extension block later.
6095 		 */
6096 		drm_mode_sort(&connector->probed_modes);
6097 		amdgpu_dm_get_native_mode(connector);
6098 	} else {
6099 		amdgpu_dm_connector->num_modes = 0;
6100 	}
6101 }
6102 
6103 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
6104 {
6105 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6106 			to_amdgpu_dm_connector(connector);
6107 	struct drm_encoder *encoder;
6108 	struct edid *edid = amdgpu_dm_connector->edid;
6109 
6110 	encoder = amdgpu_dm_connector_to_encoder(connector);
6111 
6112 	if (!edid || !drm_edid_is_valid(edid)) {
6113 		amdgpu_dm_connector->num_modes =
6114 				drm_add_modes_noedid(connector, 640, 480);
6115 	} else {
6116 		amdgpu_dm_connector_ddc_get_modes(connector, edid);
6117 		amdgpu_dm_connector_add_common_modes(encoder, connector);
6118 	}
6119 	amdgpu_dm_fbc_init(connector);
6120 
6121 	return amdgpu_dm_connector->num_modes;
6122 }
6123 
6124 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
6125 				     struct amdgpu_dm_connector *aconnector,
6126 				     int connector_type,
6127 				     struct dc_link *link,
6128 				     int link_index)
6129 {
6130 	struct amdgpu_device *adev = dm->ddev->dev_private;
6131 
6132 	/*
6133 	 * Some of the properties below require access to state, like bpc.
6134 	 * Allocate some default initial connector state with our reset helper.
6135 	 */
6136 	if (aconnector->base.funcs->reset)
6137 		aconnector->base.funcs->reset(&aconnector->base);
6138 
6139 	aconnector->connector_id = link_index;
6140 	aconnector->dc_link = link;
6141 	aconnector->base.interlace_allowed = false;
6142 	aconnector->base.doublescan_allowed = false;
6143 	aconnector->base.stereo_allowed = false;
6144 	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
6145 	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6146 	aconnector->audio_inst = -1;
6147 	mutex_init(&aconnector->hpd_lock);
6148 
6149 	/*
6150 	 * configure support HPD hot plug connector_>polled default value is 0
6151 	 * which means HPD hot plug not supported
6152 	 */
6153 	switch (connector_type) {
6154 	case DRM_MODE_CONNECTOR_HDMIA:
6155 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6156 		aconnector->base.ycbcr_420_allowed =
6157 			link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
6158 		break;
6159 	case DRM_MODE_CONNECTOR_DisplayPort:
6160 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6161 		aconnector->base.ycbcr_420_allowed =
6162 			link->link_enc->features.dp_ycbcr420_supported ? true : false;
6163 		break;
6164 	case DRM_MODE_CONNECTOR_DVID:
6165 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6166 		break;
6167 	default:
6168 		break;
6169 	}
6170 
6171 	drm_object_attach_property(&aconnector->base.base,
6172 				dm->ddev->mode_config.scaling_mode_property,
6173 				DRM_MODE_SCALE_NONE);
6174 
6175 	drm_object_attach_property(&aconnector->base.base,
6176 				adev->mode_info.underscan_property,
6177 				UNDERSCAN_OFF);
6178 	drm_object_attach_property(&aconnector->base.base,
6179 				adev->mode_info.underscan_hborder_property,
6180 				0);
6181 	drm_object_attach_property(&aconnector->base.base,
6182 				adev->mode_info.underscan_vborder_property,
6183 				0);
6184 
6185 	if (!aconnector->mst_port)
6186 		drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
6187 
6188 	/* This defaults to the max in the range, but we want 8bpc for non-edp. */
6189 	aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
6190 	aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
6191 
6192 	if (connector_type == DRM_MODE_CONNECTOR_eDP &&
6193 	    dc_is_dmcu_initialized(adev->dm.dc)) {
6194 		drm_object_attach_property(&aconnector->base.base,
6195 				adev->mode_info.abm_level_property, 0);
6196 	}
6197 
6198 	if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
6199 	    connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
6200 	    connector_type == DRM_MODE_CONNECTOR_eDP) {
6201 		drm_object_attach_property(
6202 			&aconnector->base.base,
6203 			dm->ddev->mode_config.hdr_output_metadata_property, 0);
6204 
6205 		if (!aconnector->mst_port)
6206 			drm_connector_attach_vrr_capable_property(&aconnector->base);
6207 
6208 #ifdef CONFIG_DRM_AMD_DC_HDCP
6209 		if (adev->dm.hdcp_workqueue)
6210 			drm_connector_attach_content_protection_property(&aconnector->base, true);
6211 #endif
6212 	}
6213 }
6214 
6215 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
6216 			      struct i2c_msg *msgs, int num)
6217 {
6218 	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
6219 	struct ddc_service *ddc_service = i2c->ddc_service;
6220 	struct i2c_command cmd;
6221 	int i;
6222 	int result = -EIO;
6223 
6224 	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
6225 
6226 	if (!cmd.payloads)
6227 		return result;
6228 
6229 	cmd.number_of_payloads = num;
6230 	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
6231 	cmd.speed = 100;
6232 
6233 	for (i = 0; i < num; i++) {
6234 		cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
6235 		cmd.payloads[i].address = msgs[i].addr;
6236 		cmd.payloads[i].length = msgs[i].len;
6237 		cmd.payloads[i].data = msgs[i].buf;
6238 	}
6239 
6240 	if (dc_submit_i2c(
6241 			ddc_service->ctx->dc,
6242 			ddc_service->ddc_pin->hw_info.ddc_channel,
6243 			&cmd))
6244 		result = num;
6245 
6246 	kfree(cmd.payloads);
6247 	return result;
6248 }
6249 
6250 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
6251 {
6252 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
6253 }
6254 
6255 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
6256 	.master_xfer = amdgpu_dm_i2c_xfer,
6257 	.functionality = amdgpu_dm_i2c_func,
6258 };
6259 
6260 static struct amdgpu_i2c_adapter *
6261 create_i2c(struct ddc_service *ddc_service,
6262 	   int link_index,
6263 	   int *res)
6264 {
6265 	struct amdgpu_device *adev = ddc_service->ctx->driver_context;
6266 	struct amdgpu_i2c_adapter *i2c;
6267 
6268 	i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
6269 	if (!i2c)
6270 		return NULL;
6271 	i2c->base.owner = THIS_MODULE;
6272 	i2c->base.class = I2C_CLASS_DDC;
6273 	i2c->base.dev.parent = &adev->pdev->dev;
6274 	i2c->base.algo = &amdgpu_dm_i2c_algo;
6275 	snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
6276 	i2c_set_adapdata(&i2c->base, i2c);
6277 	i2c->ddc_service = ddc_service;
6278 	i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
6279 
6280 	return i2c;
6281 }
6282 
6283 
6284 /*
6285  * Note: this function assumes that dc_link_detect() was called for the
6286  * dc_link which will be represented by this aconnector.
6287  */
6288 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
6289 				    struct amdgpu_dm_connector *aconnector,
6290 				    uint32_t link_index,
6291 				    struct amdgpu_encoder *aencoder)
6292 {
6293 	int res = 0;
6294 	int connector_type;
6295 	struct dc *dc = dm->dc;
6296 	struct dc_link *link = dc_get_link_at_index(dc, link_index);
6297 	struct amdgpu_i2c_adapter *i2c;
6298 
6299 	link->priv = aconnector;
6300 
6301 	DRM_DEBUG_DRIVER("%s()\n", __func__);
6302 
6303 	i2c = create_i2c(link->ddc, link->link_index, &res);
6304 	if (!i2c) {
6305 		DRM_ERROR("Failed to create i2c adapter data\n");
6306 		return -ENOMEM;
6307 	}
6308 
6309 	aconnector->i2c = i2c;
6310 	res = i2c_add_adapter(&i2c->base);
6311 
6312 	if (res) {
6313 		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
6314 		goto out_free;
6315 	}
6316 
6317 	connector_type = to_drm_connector_type(link->connector_signal);
6318 
6319 	res = drm_connector_init_with_ddc(
6320 			dm->ddev,
6321 			&aconnector->base,
6322 			&amdgpu_dm_connector_funcs,
6323 			connector_type,
6324 			&i2c->base);
6325 
6326 	if (res) {
6327 		DRM_ERROR("connector_init failed\n");
6328 		aconnector->connector_id = -1;
6329 		goto out_free;
6330 	}
6331 
6332 	drm_connector_helper_add(
6333 			&aconnector->base,
6334 			&amdgpu_dm_connector_helper_funcs);
6335 
6336 	amdgpu_dm_connector_init_helper(
6337 		dm,
6338 		aconnector,
6339 		connector_type,
6340 		link,
6341 		link_index);
6342 
6343 	drm_connector_attach_encoder(
6344 		&aconnector->base, &aencoder->base);
6345 
6346 	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
6347 		|| connector_type == DRM_MODE_CONNECTOR_eDP)
6348 		amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
6349 
6350 out_free:
6351 	if (res) {
6352 		kfree(i2c);
6353 		aconnector->i2c = NULL;
6354 	}
6355 	return res;
6356 }
6357 
6358 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
6359 {
6360 	switch (adev->mode_info.num_crtc) {
6361 	case 1:
6362 		return 0x1;
6363 	case 2:
6364 		return 0x3;
6365 	case 3:
6366 		return 0x7;
6367 	case 4:
6368 		return 0xf;
6369 	case 5:
6370 		return 0x1f;
6371 	case 6:
6372 	default:
6373 		return 0x3f;
6374 	}
6375 }
6376 
6377 static int amdgpu_dm_encoder_init(struct drm_device *dev,
6378 				  struct amdgpu_encoder *aencoder,
6379 				  uint32_t link_index)
6380 {
6381 	struct amdgpu_device *adev = dev->dev_private;
6382 
6383 	int res = drm_encoder_init(dev,
6384 				   &aencoder->base,
6385 				   &amdgpu_dm_encoder_funcs,
6386 				   DRM_MODE_ENCODER_TMDS,
6387 				   NULL);
6388 
6389 	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
6390 
6391 	if (!res)
6392 		aencoder->encoder_id = link_index;
6393 	else
6394 		aencoder->encoder_id = -1;
6395 
6396 	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
6397 
6398 	return res;
6399 }
6400 
6401 static void manage_dm_interrupts(struct amdgpu_device *adev,
6402 				 struct amdgpu_crtc *acrtc,
6403 				 bool enable)
6404 {
6405 	/*
6406 	 * this is not correct translation but will work as soon as VBLANK
6407 	 * constant is the same as PFLIP
6408 	 */
6409 	int irq_type =
6410 		amdgpu_display_crtc_idx_to_irq_type(
6411 			adev,
6412 			acrtc->crtc_id);
6413 
6414 	if (enable) {
6415 		drm_crtc_vblank_on(&acrtc->base);
6416 		amdgpu_irq_get(
6417 			adev,
6418 			&adev->pageflip_irq,
6419 			irq_type);
6420 	} else {
6421 
6422 		amdgpu_irq_put(
6423 			adev,
6424 			&adev->pageflip_irq,
6425 			irq_type);
6426 		drm_crtc_vblank_off(&acrtc->base);
6427 	}
6428 }
6429 
6430 static bool
6431 is_scaling_state_different(const struct dm_connector_state *dm_state,
6432 			   const struct dm_connector_state *old_dm_state)
6433 {
6434 	if (dm_state->scaling != old_dm_state->scaling)
6435 		return true;
6436 	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
6437 		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
6438 			return true;
6439 	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
6440 		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
6441 			return true;
6442 	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
6443 		   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
6444 		return true;
6445 	return false;
6446 }
6447 
6448 #ifdef CONFIG_DRM_AMD_DC_HDCP
6449 static bool is_content_protection_different(struct drm_connector_state *state,
6450 					    const struct drm_connector_state *old_state,
6451 					    const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
6452 {
6453 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6454 
6455 	if (old_state->hdcp_content_type != state->hdcp_content_type &&
6456 	    state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
6457 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6458 		return true;
6459 	}
6460 
6461 	/* CP is being re enabled, ignore this */
6462 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
6463 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
6464 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
6465 		return false;
6466 	}
6467 
6468 	/* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED */
6469 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
6470 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
6471 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6472 
6473 	/* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
6474 	 * hot-plug, headless s3, dpms
6475 	 */
6476 	if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && connector->dpms == DRM_MODE_DPMS_ON &&
6477 	    aconnector->dc_sink != NULL)
6478 		return true;
6479 
6480 	if (old_state->content_protection == state->content_protection)
6481 		return false;
6482 
6483 	if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
6484 		return true;
6485 
6486 	return false;
6487 }
6488 
6489 #endif
6490 static void remove_stream(struct amdgpu_device *adev,
6491 			  struct amdgpu_crtc *acrtc,
6492 			  struct dc_stream_state *stream)
6493 {
6494 	/* this is the update mode case */
6495 
6496 	acrtc->otg_inst = -1;
6497 	acrtc->enabled = false;
6498 }
6499 
6500 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
6501 			       struct dc_cursor_position *position)
6502 {
6503 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6504 	int x, y;
6505 	int xorigin = 0, yorigin = 0;
6506 
6507 	position->enable = false;
6508 	position->x = 0;
6509 	position->y = 0;
6510 
6511 	if (!crtc || !plane->state->fb)
6512 		return 0;
6513 
6514 	if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
6515 	    (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
6516 		DRM_ERROR("%s: bad cursor width or height %d x %d\n",
6517 			  __func__,
6518 			  plane->state->crtc_w,
6519 			  plane->state->crtc_h);
6520 		return -EINVAL;
6521 	}
6522 
6523 	x = plane->state->crtc_x;
6524 	y = plane->state->crtc_y;
6525 
6526 	if (x <= -amdgpu_crtc->max_cursor_width ||
6527 	    y <= -amdgpu_crtc->max_cursor_height)
6528 		return 0;
6529 
6530 	if (x < 0) {
6531 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
6532 		x = 0;
6533 	}
6534 	if (y < 0) {
6535 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
6536 		y = 0;
6537 	}
6538 	position->enable = true;
6539 	position->translate_by_source = true;
6540 	position->x = x;
6541 	position->y = y;
6542 	position->x_hotspot = xorigin;
6543 	position->y_hotspot = yorigin;
6544 
6545 	return 0;
6546 }
6547 
6548 static void handle_cursor_update(struct drm_plane *plane,
6549 				 struct drm_plane_state *old_plane_state)
6550 {
6551 	struct amdgpu_device *adev = plane->dev->dev_private;
6552 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
6553 	struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
6554 	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
6555 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6556 	uint64_t address = afb ? afb->address : 0;
6557 	struct dc_cursor_position position;
6558 	struct dc_cursor_attributes attributes;
6559 	int ret;
6560 
6561 	if (!plane->state->fb && !old_plane_state->fb)
6562 		return;
6563 
6564 	DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
6565 			 __func__,
6566 			 amdgpu_crtc->crtc_id,
6567 			 plane->state->crtc_w,
6568 			 plane->state->crtc_h);
6569 
6570 	ret = get_cursor_position(plane, crtc, &position);
6571 	if (ret)
6572 		return;
6573 
6574 	if (!position.enable) {
6575 		/* turn off cursor */
6576 		if (crtc_state && crtc_state->stream) {
6577 			mutex_lock(&adev->dm.dc_lock);
6578 			dc_stream_set_cursor_position(crtc_state->stream,
6579 						      &position);
6580 			mutex_unlock(&adev->dm.dc_lock);
6581 		}
6582 		return;
6583 	}
6584 
6585 	amdgpu_crtc->cursor_width = plane->state->crtc_w;
6586 	amdgpu_crtc->cursor_height = plane->state->crtc_h;
6587 
6588 	memset(&attributes, 0, sizeof(attributes));
6589 	attributes.address.high_part = upper_32_bits(address);
6590 	attributes.address.low_part  = lower_32_bits(address);
6591 	attributes.width             = plane->state->crtc_w;
6592 	attributes.height            = plane->state->crtc_h;
6593 	attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
6594 	attributes.rotation_angle    = 0;
6595 	attributes.attribute_flags.value = 0;
6596 
6597 	attributes.pitch = attributes.width;
6598 
6599 	if (crtc_state->stream) {
6600 		mutex_lock(&adev->dm.dc_lock);
6601 		if (!dc_stream_set_cursor_attributes(crtc_state->stream,
6602 							 &attributes))
6603 			DRM_ERROR("DC failed to set cursor attributes\n");
6604 
6605 		if (!dc_stream_set_cursor_position(crtc_state->stream,
6606 						   &position))
6607 			DRM_ERROR("DC failed to set cursor position\n");
6608 		mutex_unlock(&adev->dm.dc_lock);
6609 	}
6610 }
6611 
6612 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
6613 {
6614 
6615 	assert_spin_locked(&acrtc->base.dev->event_lock);
6616 	WARN_ON(acrtc->event);
6617 
6618 	acrtc->event = acrtc->base.state->event;
6619 
6620 	/* Set the flip status */
6621 	acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
6622 
6623 	/* Mark this event as consumed */
6624 	acrtc->base.state->event = NULL;
6625 
6626 	DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
6627 						 acrtc->crtc_id);
6628 }
6629 
6630 static void update_freesync_state_on_stream(
6631 	struct amdgpu_display_manager *dm,
6632 	struct dm_crtc_state *new_crtc_state,
6633 	struct dc_stream_state *new_stream,
6634 	struct dc_plane_state *surface,
6635 	u32 flip_timestamp_in_us)
6636 {
6637 	struct mod_vrr_params vrr_params;
6638 	struct dc_info_packet vrr_infopacket = {0};
6639 	struct amdgpu_device *adev = dm->adev;
6640 	unsigned long flags;
6641 
6642 	if (!new_stream)
6643 		return;
6644 
6645 	/*
6646 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6647 	 * For now it's sufficient to just guard against these conditions.
6648 	 */
6649 
6650 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6651 		return;
6652 
6653 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
6654 	vrr_params = new_crtc_state->vrr_params;
6655 
6656 	if (surface) {
6657 		mod_freesync_handle_preflip(
6658 			dm->freesync_module,
6659 			surface,
6660 			new_stream,
6661 			flip_timestamp_in_us,
6662 			&vrr_params);
6663 
6664 		if (adev->family < AMDGPU_FAMILY_AI &&
6665 		    amdgpu_dm_vrr_active(new_crtc_state)) {
6666 			mod_freesync_handle_v_update(dm->freesync_module,
6667 						     new_stream, &vrr_params);
6668 
6669 			/* Need to call this before the frame ends. */
6670 			dc_stream_adjust_vmin_vmax(dm->dc,
6671 						   new_crtc_state->stream,
6672 						   &vrr_params.adjust);
6673 		}
6674 	}
6675 
6676 	mod_freesync_build_vrr_infopacket(
6677 		dm->freesync_module,
6678 		new_stream,
6679 		&vrr_params,
6680 		PACKET_TYPE_VRR,
6681 		TRANSFER_FUNC_UNKNOWN,
6682 		&vrr_infopacket);
6683 
6684 	new_crtc_state->freesync_timing_changed |=
6685 		(memcmp(&new_crtc_state->vrr_params.adjust,
6686 			&vrr_params.adjust,
6687 			sizeof(vrr_params.adjust)) != 0);
6688 
6689 	new_crtc_state->freesync_vrr_info_changed |=
6690 		(memcmp(&new_crtc_state->vrr_infopacket,
6691 			&vrr_infopacket,
6692 			sizeof(vrr_infopacket)) != 0);
6693 
6694 	new_crtc_state->vrr_params = vrr_params;
6695 	new_crtc_state->vrr_infopacket = vrr_infopacket;
6696 
6697 	new_stream->adjust = new_crtc_state->vrr_params.adjust;
6698 	new_stream->vrr_infopacket = vrr_infopacket;
6699 
6700 	if (new_crtc_state->freesync_vrr_info_changed)
6701 		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
6702 			      new_crtc_state->base.crtc->base.id,
6703 			      (int)new_crtc_state->base.vrr_enabled,
6704 			      (int)vrr_params.state);
6705 
6706 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
6707 }
6708 
6709 static void pre_update_freesync_state_on_stream(
6710 	struct amdgpu_display_manager *dm,
6711 	struct dm_crtc_state *new_crtc_state)
6712 {
6713 	struct dc_stream_state *new_stream = new_crtc_state->stream;
6714 	struct mod_vrr_params vrr_params;
6715 	struct mod_freesync_config config = new_crtc_state->freesync_config;
6716 	struct amdgpu_device *adev = dm->adev;
6717 	unsigned long flags;
6718 
6719 	if (!new_stream)
6720 		return;
6721 
6722 	/*
6723 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6724 	 * For now it's sufficient to just guard against these conditions.
6725 	 */
6726 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6727 		return;
6728 
6729 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
6730 	vrr_params = new_crtc_state->vrr_params;
6731 
6732 	if (new_crtc_state->vrr_supported &&
6733 	    config.min_refresh_in_uhz &&
6734 	    config.max_refresh_in_uhz) {
6735 		config.state = new_crtc_state->base.vrr_enabled ?
6736 			VRR_STATE_ACTIVE_VARIABLE :
6737 			VRR_STATE_INACTIVE;
6738 	} else {
6739 		config.state = VRR_STATE_UNSUPPORTED;
6740 	}
6741 
6742 	mod_freesync_build_vrr_params(dm->freesync_module,
6743 				      new_stream,
6744 				      &config, &vrr_params);
6745 
6746 	new_crtc_state->freesync_timing_changed |=
6747 		(memcmp(&new_crtc_state->vrr_params.adjust,
6748 			&vrr_params.adjust,
6749 			sizeof(vrr_params.adjust)) != 0);
6750 
6751 	new_crtc_state->vrr_params = vrr_params;
6752 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
6753 }
6754 
6755 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
6756 					    struct dm_crtc_state *new_state)
6757 {
6758 	bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
6759 	bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
6760 
6761 	if (!old_vrr_active && new_vrr_active) {
6762 		/* Transition VRR inactive -> active:
6763 		 * While VRR is active, we must not disable vblank irq, as a
6764 		 * reenable after disable would compute bogus vblank/pflip
6765 		 * timestamps if it likely happened inside display front-porch.
6766 		 *
6767 		 * We also need vupdate irq for the actual core vblank handling
6768 		 * at end of vblank.
6769 		 */
6770 		dm_set_vupdate_irq(new_state->base.crtc, true);
6771 		drm_crtc_vblank_get(new_state->base.crtc);
6772 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
6773 				 __func__, new_state->base.crtc->base.id);
6774 	} else if (old_vrr_active && !new_vrr_active) {
6775 		/* Transition VRR active -> inactive:
6776 		 * Allow vblank irq disable again for fixed refresh rate.
6777 		 */
6778 		dm_set_vupdate_irq(new_state->base.crtc, false);
6779 		drm_crtc_vblank_put(new_state->base.crtc);
6780 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
6781 				 __func__, new_state->base.crtc->base.id);
6782 	}
6783 }
6784 
6785 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
6786 {
6787 	struct drm_plane *plane;
6788 	struct drm_plane_state *old_plane_state, *new_plane_state;
6789 	int i;
6790 
6791 	/*
6792 	 * TODO: Make this per-stream so we don't issue redundant updates for
6793 	 * commits with multiple streams.
6794 	 */
6795 	for_each_oldnew_plane_in_state(state, plane, old_plane_state,
6796 				       new_plane_state, i)
6797 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
6798 			handle_cursor_update(plane, old_plane_state);
6799 }
6800 
6801 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
6802 				    struct dc_state *dc_state,
6803 				    struct drm_device *dev,
6804 				    struct amdgpu_display_manager *dm,
6805 				    struct drm_crtc *pcrtc,
6806 				    bool wait_for_vblank)
6807 {
6808 	uint32_t i;
6809 	uint64_t timestamp_ns;
6810 	struct drm_plane *plane;
6811 	struct drm_plane_state *old_plane_state, *new_plane_state;
6812 	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
6813 	struct drm_crtc_state *new_pcrtc_state =
6814 			drm_atomic_get_new_crtc_state(state, pcrtc);
6815 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
6816 	struct dm_crtc_state *dm_old_crtc_state =
6817 			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
6818 	int planes_count = 0, vpos, hpos;
6819 	long r;
6820 	unsigned long flags;
6821 	struct amdgpu_bo *abo;
6822 	uint64_t tiling_flags;
6823 	bool tmz_surface = false;
6824 	uint32_t target_vblank, last_flip_vblank;
6825 	bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
6826 	bool pflip_present = false;
6827 	struct {
6828 		struct dc_surface_update surface_updates[MAX_SURFACES];
6829 		struct dc_plane_info plane_infos[MAX_SURFACES];
6830 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
6831 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
6832 		struct dc_stream_update stream_update;
6833 	} *bundle;
6834 
6835 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
6836 
6837 	if (!bundle) {
6838 		dm_error("Failed to allocate update bundle\n");
6839 		goto cleanup;
6840 	}
6841 
6842 	/*
6843 	 * Disable the cursor first if we're disabling all the planes.
6844 	 * It'll remain on the screen after the planes are re-enabled
6845 	 * if we don't.
6846 	 */
6847 	if (acrtc_state->active_planes == 0)
6848 		amdgpu_dm_commit_cursors(state);
6849 
6850 	/* update planes when needed */
6851 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
6852 		struct drm_crtc *crtc = new_plane_state->crtc;
6853 		struct drm_crtc_state *new_crtc_state;
6854 		struct drm_framebuffer *fb = new_plane_state->fb;
6855 		bool plane_needs_flip;
6856 		struct dc_plane_state *dc_plane;
6857 		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
6858 
6859 		/* Cursor plane is handled after stream updates */
6860 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
6861 			continue;
6862 
6863 		if (!fb || !crtc || pcrtc != crtc)
6864 			continue;
6865 
6866 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
6867 		if (!new_crtc_state->active)
6868 			continue;
6869 
6870 		dc_plane = dm_new_plane_state->dc_state;
6871 
6872 		bundle->surface_updates[planes_count].surface = dc_plane;
6873 		if (new_pcrtc_state->color_mgmt_changed) {
6874 			bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
6875 			bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
6876 			bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
6877 		}
6878 
6879 		fill_dc_scaling_info(new_plane_state,
6880 				     &bundle->scaling_infos[planes_count]);
6881 
6882 		bundle->surface_updates[planes_count].scaling_info =
6883 			&bundle->scaling_infos[planes_count];
6884 
6885 		plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
6886 
6887 		pflip_present = pflip_present || plane_needs_flip;
6888 
6889 		if (!plane_needs_flip) {
6890 			planes_count += 1;
6891 			continue;
6892 		}
6893 
6894 		abo = gem_to_amdgpu_bo(fb->obj[0]);
6895 
6896 		/*
6897 		 * Wait for all fences on this FB. Do limited wait to avoid
6898 		 * deadlock during GPU reset when this fence will not signal
6899 		 * but we hold reservation lock for the BO.
6900 		 */
6901 		r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
6902 							false,
6903 							msecs_to_jiffies(5000));
6904 		if (unlikely(r <= 0))
6905 			DRM_ERROR("Waiting for fences timed out!");
6906 
6907 		/*
6908 		 * TODO This might fail and hence better not used, wait
6909 		 * explicitly on fences instead
6910 		 * and in general should be called for
6911 		 * blocking commit to as per framework helpers
6912 		 */
6913 		r = amdgpu_bo_reserve(abo, true);
6914 		if (unlikely(r != 0))
6915 			DRM_ERROR("failed to reserve buffer before flip\n");
6916 
6917 		amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
6918 
6919 		tmz_surface = amdgpu_bo_encrypted(abo);
6920 
6921 		amdgpu_bo_unreserve(abo);
6922 
6923 		fill_dc_plane_info_and_addr(
6924 			dm->adev, new_plane_state, tiling_flags,
6925 			&bundle->plane_infos[planes_count],
6926 			&bundle->flip_addrs[planes_count].address,
6927 			tmz_surface,
6928 			false);
6929 
6930 		DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
6931 				 new_plane_state->plane->index,
6932 				 bundle->plane_infos[planes_count].dcc.enable);
6933 
6934 		bundle->surface_updates[planes_count].plane_info =
6935 			&bundle->plane_infos[planes_count];
6936 
6937 		/*
6938 		 * Only allow immediate flips for fast updates that don't
6939 		 * change FB pitch, DCC state, rotation or mirroing.
6940 		 */
6941 		bundle->flip_addrs[planes_count].flip_immediate =
6942 			crtc->state->async_flip &&
6943 			acrtc_state->update_type == UPDATE_TYPE_FAST;
6944 
6945 		timestamp_ns = ktime_get_ns();
6946 		bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
6947 		bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
6948 		bundle->surface_updates[planes_count].surface = dc_plane;
6949 
6950 		if (!bundle->surface_updates[planes_count].surface) {
6951 			DRM_ERROR("No surface for CRTC: id=%d\n",
6952 					acrtc_attach->crtc_id);
6953 			continue;
6954 		}
6955 
6956 		if (plane == pcrtc->primary)
6957 			update_freesync_state_on_stream(
6958 				dm,
6959 				acrtc_state,
6960 				acrtc_state->stream,
6961 				dc_plane,
6962 				bundle->flip_addrs[planes_count].flip_timestamp_in_us);
6963 
6964 		DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
6965 				 __func__,
6966 				 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
6967 				 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
6968 
6969 		planes_count += 1;
6970 
6971 	}
6972 
6973 	if (pflip_present) {
6974 		if (!vrr_active) {
6975 			/* Use old throttling in non-vrr fixed refresh rate mode
6976 			 * to keep flip scheduling based on target vblank counts
6977 			 * working in a backwards compatible way, e.g., for
6978 			 * clients using the GLX_OML_sync_control extension or
6979 			 * DRI3/Present extension with defined target_msc.
6980 			 */
6981 			last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
6982 		}
6983 		else {
6984 			/* For variable refresh rate mode only:
6985 			 * Get vblank of last completed flip to avoid > 1 vrr
6986 			 * flips per video frame by use of throttling, but allow
6987 			 * flip programming anywhere in the possibly large
6988 			 * variable vrr vblank interval for fine-grained flip
6989 			 * timing control and more opportunity to avoid stutter
6990 			 * on late submission of flips.
6991 			 */
6992 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
6993 			last_flip_vblank = acrtc_attach->last_flip_vblank;
6994 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
6995 		}
6996 
6997 		target_vblank = last_flip_vblank + wait_for_vblank;
6998 
6999 		/*
7000 		 * Wait until we're out of the vertical blank period before the one
7001 		 * targeted by the flip
7002 		 */
7003 		while ((acrtc_attach->enabled &&
7004 			(amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
7005 							    0, &vpos, &hpos, NULL,
7006 							    NULL, &pcrtc->hwmode)
7007 			 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
7008 			(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
7009 			(int)(target_vblank -
7010 			  amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
7011 			usleep_range(1000, 1100);
7012 		}
7013 
7014 		if (acrtc_attach->base.state->event) {
7015 			drm_crtc_vblank_get(pcrtc);
7016 
7017 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7018 
7019 			WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
7020 			prepare_flip_isr(acrtc_attach);
7021 
7022 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7023 		}
7024 
7025 		if (acrtc_state->stream) {
7026 			if (acrtc_state->freesync_vrr_info_changed)
7027 				bundle->stream_update.vrr_infopacket =
7028 					&acrtc_state->stream->vrr_infopacket;
7029 		}
7030 	}
7031 
7032 	/* Update the planes if changed or disable if we don't have any. */
7033 	if ((planes_count || acrtc_state->active_planes == 0) &&
7034 		acrtc_state->stream) {
7035 		bundle->stream_update.stream = acrtc_state->stream;
7036 		if (new_pcrtc_state->mode_changed) {
7037 			bundle->stream_update.src = acrtc_state->stream->src;
7038 			bundle->stream_update.dst = acrtc_state->stream->dst;
7039 		}
7040 
7041 		if (new_pcrtc_state->color_mgmt_changed) {
7042 			/*
7043 			 * TODO: This isn't fully correct since we've actually
7044 			 * already modified the stream in place.
7045 			 */
7046 			bundle->stream_update.gamut_remap =
7047 				&acrtc_state->stream->gamut_remap_matrix;
7048 			bundle->stream_update.output_csc_transform =
7049 				&acrtc_state->stream->csc_color_matrix;
7050 			bundle->stream_update.out_transfer_func =
7051 				acrtc_state->stream->out_transfer_func;
7052 		}
7053 
7054 		acrtc_state->stream->abm_level = acrtc_state->abm_level;
7055 		if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
7056 			bundle->stream_update.abm_level = &acrtc_state->abm_level;
7057 
7058 		/*
7059 		 * If FreeSync state on the stream has changed then we need to
7060 		 * re-adjust the min/max bounds now that DC doesn't handle this
7061 		 * as part of commit.
7062 		 */
7063 		if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
7064 		    amdgpu_dm_vrr_active(acrtc_state)) {
7065 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7066 			dc_stream_adjust_vmin_vmax(
7067 				dm->dc, acrtc_state->stream,
7068 				&acrtc_state->vrr_params.adjust);
7069 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7070 		}
7071 		mutex_lock(&dm->dc_lock);
7072 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7073 				acrtc_state->stream->link->psr_settings.psr_allow_active)
7074 			amdgpu_dm_psr_disable(acrtc_state->stream);
7075 
7076 		dc_commit_updates_for_stream(dm->dc,
7077 						     bundle->surface_updates,
7078 						     planes_count,
7079 						     acrtc_state->stream,
7080 						     &bundle->stream_update,
7081 						     dc_state);
7082 
7083 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7084 				acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
7085 				!acrtc_state->stream->link->psr_settings.psr_feature_enabled)
7086 			amdgpu_dm_link_setup_psr(acrtc_state->stream);
7087 		else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
7088 				acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
7089 				!acrtc_state->stream->link->psr_settings.psr_allow_active) {
7090 			amdgpu_dm_psr_enable(acrtc_state->stream);
7091 		}
7092 
7093 		mutex_unlock(&dm->dc_lock);
7094 	}
7095 
7096 	/*
7097 	 * Update cursor state *after* programming all the planes.
7098 	 * This avoids redundant programming in the case where we're going
7099 	 * to be disabling a single plane - those pipes are being disabled.
7100 	 */
7101 	if (acrtc_state->active_planes)
7102 		amdgpu_dm_commit_cursors(state);
7103 
7104 cleanup:
7105 	kfree(bundle);
7106 }
7107 
7108 static void amdgpu_dm_commit_audio(struct drm_device *dev,
7109 				   struct drm_atomic_state *state)
7110 {
7111 	struct amdgpu_device *adev = dev->dev_private;
7112 	struct amdgpu_dm_connector *aconnector;
7113 	struct drm_connector *connector;
7114 	struct drm_connector_state *old_con_state, *new_con_state;
7115 	struct drm_crtc_state *new_crtc_state;
7116 	struct dm_crtc_state *new_dm_crtc_state;
7117 	const struct dc_stream_status *status;
7118 	int i, inst;
7119 
7120 	/* Notify device removals. */
7121 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7122 		if (old_con_state->crtc != new_con_state->crtc) {
7123 			/* CRTC changes require notification. */
7124 			goto notify;
7125 		}
7126 
7127 		if (!new_con_state->crtc)
7128 			continue;
7129 
7130 		new_crtc_state = drm_atomic_get_new_crtc_state(
7131 			state, new_con_state->crtc);
7132 
7133 		if (!new_crtc_state)
7134 			continue;
7135 
7136 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7137 			continue;
7138 
7139 	notify:
7140 		aconnector = to_amdgpu_dm_connector(connector);
7141 
7142 		mutex_lock(&adev->dm.audio_lock);
7143 		inst = aconnector->audio_inst;
7144 		aconnector->audio_inst = -1;
7145 		mutex_unlock(&adev->dm.audio_lock);
7146 
7147 		amdgpu_dm_audio_eld_notify(adev, inst);
7148 	}
7149 
7150 	/* Notify audio device additions. */
7151 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
7152 		if (!new_con_state->crtc)
7153 			continue;
7154 
7155 		new_crtc_state = drm_atomic_get_new_crtc_state(
7156 			state, new_con_state->crtc);
7157 
7158 		if (!new_crtc_state)
7159 			continue;
7160 
7161 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7162 			continue;
7163 
7164 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
7165 		if (!new_dm_crtc_state->stream)
7166 			continue;
7167 
7168 		status = dc_stream_get_status(new_dm_crtc_state->stream);
7169 		if (!status)
7170 			continue;
7171 
7172 		aconnector = to_amdgpu_dm_connector(connector);
7173 
7174 		mutex_lock(&adev->dm.audio_lock);
7175 		inst = status->audio_inst;
7176 		aconnector->audio_inst = inst;
7177 		mutex_unlock(&adev->dm.audio_lock);
7178 
7179 		amdgpu_dm_audio_eld_notify(adev, inst);
7180 	}
7181 }
7182 
7183 /*
7184  * Enable interrupts on CRTCs that are newly active, undergone
7185  * a modeset, or have active planes again.
7186  *
7187  * Done in two passes, based on the for_modeset flag:
7188  * Pass 1: For CRTCs going through modeset
7189  * Pass 2: For CRTCs going from 0 to n active planes
7190  *
7191  * Interrupts can only be enabled after the planes are programmed,
7192  * so this requires a two-pass approach since we don't want to
7193  * just defer the interrupts until after commit planes every time.
7194  */
7195 static void amdgpu_dm_enable_crtc_interrupts(struct drm_device *dev,
7196 					     struct drm_atomic_state *state,
7197 					     bool for_modeset)
7198 {
7199 	struct amdgpu_device *adev = dev->dev_private;
7200 	struct drm_crtc *crtc;
7201 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7202 	int i;
7203 #ifdef CONFIG_DEBUG_FS
7204 	enum amdgpu_dm_pipe_crc_source source;
7205 #endif
7206 
7207 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
7208 				      new_crtc_state, i) {
7209 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7210 		struct dm_crtc_state *dm_new_crtc_state =
7211 			to_dm_crtc_state(new_crtc_state);
7212 		struct dm_crtc_state *dm_old_crtc_state =
7213 			to_dm_crtc_state(old_crtc_state);
7214 		bool modeset = drm_atomic_crtc_needs_modeset(new_crtc_state);
7215 		bool run_pass;
7216 
7217 		run_pass = (for_modeset && modeset) ||
7218 			   (!for_modeset && !modeset &&
7219 			    !dm_old_crtc_state->interrupts_enabled);
7220 
7221 		if (!run_pass)
7222 			continue;
7223 
7224 		if (!dm_new_crtc_state->interrupts_enabled)
7225 			continue;
7226 
7227 		manage_dm_interrupts(adev, acrtc, true);
7228 
7229 #ifdef CONFIG_DEBUG_FS
7230 		/* The stream has changed so CRC capture needs to re-enabled. */
7231 		source = dm_new_crtc_state->crc_src;
7232 		if (amdgpu_dm_is_valid_crc_source(source)) {
7233 			amdgpu_dm_crtc_configure_crc_source(
7234 				crtc, dm_new_crtc_state,
7235 				dm_new_crtc_state->crc_src);
7236 		}
7237 #endif
7238 	}
7239 }
7240 
7241 /*
7242  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
7243  * @crtc_state: the DRM CRTC state
7244  * @stream_state: the DC stream state.
7245  *
7246  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
7247  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
7248  */
7249 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
7250 						struct dc_stream_state *stream_state)
7251 {
7252 	stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
7253 }
7254 
7255 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
7256 				   struct drm_atomic_state *state,
7257 				   bool nonblock)
7258 {
7259 	struct drm_crtc *crtc;
7260 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7261 	struct amdgpu_device *adev = dev->dev_private;
7262 	int i;
7263 
7264 	/*
7265 	 * We evade vblank and pflip interrupts on CRTCs that are undergoing
7266 	 * a modeset, being disabled, or have no active planes.
7267 	 *
7268 	 * It's done in atomic commit rather than commit tail for now since
7269 	 * some of these interrupt handlers access the current CRTC state and
7270 	 * potentially the stream pointer itself.
7271 	 *
7272 	 * Since the atomic state is swapped within atomic commit and not within
7273 	 * commit tail this would leave to new state (that hasn't been committed yet)
7274 	 * being accesssed from within the handlers.
7275 	 *
7276 	 * TODO: Fix this so we can do this in commit tail and not have to block
7277 	 * in atomic check.
7278 	 */
7279 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7280 		struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7281 		struct dm_crtc_state *dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7282 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7283 
7284 		if (dm_old_crtc_state->interrupts_enabled &&
7285 		    (!dm_new_crtc_state->interrupts_enabled ||
7286 		     drm_atomic_crtc_needs_modeset(new_crtc_state)))
7287 			manage_dm_interrupts(adev, acrtc, false);
7288 	}
7289 	/*
7290 	 * Add check here for SoC's that support hardware cursor plane, to
7291 	 * unset legacy_cursor_update
7292 	 */
7293 
7294 	return drm_atomic_helper_commit(dev, state, nonblock);
7295 
7296 	/*TODO Handle EINTR, reenable IRQ*/
7297 }
7298 
7299 /**
7300  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
7301  * @state: The atomic state to commit
7302  *
7303  * This will tell DC to commit the constructed DC state from atomic_check,
7304  * programming the hardware. Any failures here implies a hardware failure, since
7305  * atomic check should have filtered anything non-kosher.
7306  */
7307 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
7308 {
7309 	struct drm_device *dev = state->dev;
7310 	struct amdgpu_device *adev = dev->dev_private;
7311 	struct amdgpu_display_manager *dm = &adev->dm;
7312 	struct dm_atomic_state *dm_state;
7313 	struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
7314 	uint32_t i, j;
7315 	struct drm_crtc *crtc;
7316 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7317 	unsigned long flags;
7318 	bool wait_for_vblank = true;
7319 	struct drm_connector *connector;
7320 	struct drm_connector_state *old_con_state, *new_con_state;
7321 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7322 	int crtc_disable_count = 0;
7323 
7324 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
7325 
7326 	dm_state = dm_atomic_get_new_state(state);
7327 	if (dm_state && dm_state->context) {
7328 		dc_state = dm_state->context;
7329 	} else {
7330 		/* No state changes, retain current state. */
7331 		dc_state_temp = dc_create_state(dm->dc);
7332 		ASSERT(dc_state_temp);
7333 		dc_state = dc_state_temp;
7334 		dc_resource_state_copy_construct_current(dm->dc, dc_state);
7335 	}
7336 
7337 	/* update changed items */
7338 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7339 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7340 
7341 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7342 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7343 
7344 		DRM_DEBUG_DRIVER(
7345 			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7346 			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
7347 			"connectors_changed:%d\n",
7348 			acrtc->crtc_id,
7349 			new_crtc_state->enable,
7350 			new_crtc_state->active,
7351 			new_crtc_state->planes_changed,
7352 			new_crtc_state->mode_changed,
7353 			new_crtc_state->active_changed,
7354 			new_crtc_state->connectors_changed);
7355 
7356 		/* Copy all transient state flags into dc state */
7357 		if (dm_new_crtc_state->stream) {
7358 			amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
7359 							    dm_new_crtc_state->stream);
7360 		}
7361 
7362 		/* handles headless hotplug case, updating new_state and
7363 		 * aconnector as needed
7364 		 */
7365 
7366 		if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
7367 
7368 			DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
7369 
7370 			if (!dm_new_crtc_state->stream) {
7371 				/*
7372 				 * this could happen because of issues with
7373 				 * userspace notifications delivery.
7374 				 * In this case userspace tries to set mode on
7375 				 * display which is disconnected in fact.
7376 				 * dc_sink is NULL in this case on aconnector.
7377 				 * We expect reset mode will come soon.
7378 				 *
7379 				 * This can also happen when unplug is done
7380 				 * during resume sequence ended
7381 				 *
7382 				 * In this case, we want to pretend we still
7383 				 * have a sink to keep the pipe running so that
7384 				 * hw state is consistent with the sw state
7385 				 */
7386 				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7387 						__func__, acrtc->base.base.id);
7388 				continue;
7389 			}
7390 
7391 			if (dm_old_crtc_state->stream)
7392 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7393 
7394 			pm_runtime_get_noresume(dev->dev);
7395 
7396 			acrtc->enabled = true;
7397 			acrtc->hw_mode = new_crtc_state->mode;
7398 			crtc->hwmode = new_crtc_state->mode;
7399 		} else if (modereset_required(new_crtc_state)) {
7400 			DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
7401 			/* i.e. reset mode */
7402 			if (dm_old_crtc_state->stream) {
7403 				if (dm_old_crtc_state->stream->link->psr_settings.psr_allow_active)
7404 					amdgpu_dm_psr_disable(dm_old_crtc_state->stream);
7405 
7406 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7407 			}
7408 		}
7409 	} /* for_each_crtc_in_state() */
7410 
7411 	if (dc_state) {
7412 		dm_enable_per_frame_crtc_master_sync(dc_state);
7413 		mutex_lock(&dm->dc_lock);
7414 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
7415 		mutex_unlock(&dm->dc_lock);
7416 	}
7417 
7418 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7419 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7420 
7421 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7422 
7423 		if (dm_new_crtc_state->stream != NULL) {
7424 			const struct dc_stream_status *status =
7425 					dc_stream_get_status(dm_new_crtc_state->stream);
7426 
7427 			if (!status)
7428 				status = dc_stream_get_status_from_state(dc_state,
7429 									 dm_new_crtc_state->stream);
7430 
7431 			if (!status)
7432 				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
7433 			else
7434 				acrtc->otg_inst = status->primary_otg_inst;
7435 		}
7436 	}
7437 #ifdef CONFIG_DRM_AMD_DC_HDCP
7438 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7439 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7440 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7441 		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7442 
7443 		new_crtc_state = NULL;
7444 
7445 		if (acrtc)
7446 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7447 
7448 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7449 
7450 		if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
7451 		    connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
7452 			hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
7453 			new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7454 			continue;
7455 		}
7456 
7457 		if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
7458 			hdcp_update_display(
7459 				adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
7460 				new_con_state->hdcp_content_type,
7461 				new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
7462 													 : false);
7463 	}
7464 #endif
7465 
7466 	/* Handle connector state changes */
7467 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7468 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7469 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
7470 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7471 		struct dc_surface_update dummy_updates[MAX_SURFACES];
7472 		struct dc_stream_update stream_update;
7473 		struct dc_info_packet hdr_packet;
7474 		struct dc_stream_status *status = NULL;
7475 		bool abm_changed, hdr_changed, scaling_changed;
7476 
7477 		memset(&dummy_updates, 0, sizeof(dummy_updates));
7478 		memset(&stream_update, 0, sizeof(stream_update));
7479 
7480 		if (acrtc) {
7481 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7482 			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
7483 		}
7484 
7485 		/* Skip any modesets/resets */
7486 		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
7487 			continue;
7488 
7489 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7490 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7491 
7492 		scaling_changed = is_scaling_state_different(dm_new_con_state,
7493 							     dm_old_con_state);
7494 
7495 		abm_changed = dm_new_crtc_state->abm_level !=
7496 			      dm_old_crtc_state->abm_level;
7497 
7498 		hdr_changed =
7499 			is_hdr_metadata_different(old_con_state, new_con_state);
7500 
7501 		if (!scaling_changed && !abm_changed && !hdr_changed)
7502 			continue;
7503 
7504 		stream_update.stream = dm_new_crtc_state->stream;
7505 		if (scaling_changed) {
7506 			update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
7507 					dm_new_con_state, dm_new_crtc_state->stream);
7508 
7509 			stream_update.src = dm_new_crtc_state->stream->src;
7510 			stream_update.dst = dm_new_crtc_state->stream->dst;
7511 		}
7512 
7513 		if (abm_changed) {
7514 			dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
7515 
7516 			stream_update.abm_level = &dm_new_crtc_state->abm_level;
7517 		}
7518 
7519 		if (hdr_changed) {
7520 			fill_hdr_info_packet(new_con_state, &hdr_packet);
7521 			stream_update.hdr_static_metadata = &hdr_packet;
7522 		}
7523 
7524 		status = dc_stream_get_status(dm_new_crtc_state->stream);
7525 		WARN_ON(!status);
7526 		WARN_ON(!status->plane_count);
7527 
7528 		/*
7529 		 * TODO: DC refuses to perform stream updates without a dc_surface_update.
7530 		 * Here we create an empty update on each plane.
7531 		 * To fix this, DC should permit updating only stream properties.
7532 		 */
7533 		for (j = 0; j < status->plane_count; j++)
7534 			dummy_updates[j].surface = status->plane_states[0];
7535 
7536 
7537 		mutex_lock(&dm->dc_lock);
7538 		dc_commit_updates_for_stream(dm->dc,
7539 						     dummy_updates,
7540 						     status->plane_count,
7541 						     dm_new_crtc_state->stream,
7542 						     &stream_update,
7543 						     dc_state);
7544 		mutex_unlock(&dm->dc_lock);
7545 	}
7546 
7547 	/* Count number of newly disabled CRTCs for dropping PM refs later. */
7548 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
7549 				      new_crtc_state, i) {
7550 		if (old_crtc_state->active && !new_crtc_state->active)
7551 			crtc_disable_count++;
7552 
7553 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7554 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7555 
7556 		/* Update freesync active state. */
7557 		pre_update_freesync_state_on_stream(dm, dm_new_crtc_state);
7558 
7559 		/* Handle vrr on->off / off->on transitions */
7560 		amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
7561 						dm_new_crtc_state);
7562 	}
7563 
7564 	/* Enable interrupts for CRTCs going through a modeset. */
7565 	amdgpu_dm_enable_crtc_interrupts(dev, state, true);
7566 
7567 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
7568 		if (new_crtc_state->async_flip)
7569 			wait_for_vblank = false;
7570 
7571 	/* update planes when needed per crtc*/
7572 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
7573 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7574 
7575 		if (dm_new_crtc_state->stream)
7576 			amdgpu_dm_commit_planes(state, dc_state, dev,
7577 						dm, crtc, wait_for_vblank);
7578 	}
7579 
7580 	/* Enable interrupts for CRTCs going from 0 to n active planes. */
7581 	amdgpu_dm_enable_crtc_interrupts(dev, state, false);
7582 
7583 	/* Update audio instances for each connector. */
7584 	amdgpu_dm_commit_audio(dev, state);
7585 
7586 	/*
7587 	 * send vblank event on all events not handled in flip and
7588 	 * mark consumed event for drm_atomic_helper_commit_hw_done
7589 	 */
7590 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
7591 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7592 
7593 		if (new_crtc_state->event)
7594 			drm_send_event_locked(dev, &new_crtc_state->event->base);
7595 
7596 		new_crtc_state->event = NULL;
7597 	}
7598 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
7599 
7600 	/* Signal HW programming completion */
7601 	drm_atomic_helper_commit_hw_done(state);
7602 
7603 	if (wait_for_vblank)
7604 		drm_atomic_helper_wait_for_flip_done(dev, state);
7605 
7606 	drm_atomic_helper_cleanup_planes(dev, state);
7607 
7608 	/*
7609 	 * Finally, drop a runtime PM reference for each newly disabled CRTC,
7610 	 * so we can put the GPU into runtime suspend if we're not driving any
7611 	 * displays anymore
7612 	 */
7613 	for (i = 0; i < crtc_disable_count; i++)
7614 		pm_runtime_put_autosuspend(dev->dev);
7615 	pm_runtime_mark_last_busy(dev->dev);
7616 
7617 	if (dc_state_temp)
7618 		dc_release_state(dc_state_temp);
7619 }
7620 
7621 
7622 static int dm_force_atomic_commit(struct drm_connector *connector)
7623 {
7624 	int ret = 0;
7625 	struct drm_device *ddev = connector->dev;
7626 	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
7627 	struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7628 	struct drm_plane *plane = disconnected_acrtc->base.primary;
7629 	struct drm_connector_state *conn_state;
7630 	struct drm_crtc_state *crtc_state;
7631 	struct drm_plane_state *plane_state;
7632 
7633 	if (!state)
7634 		return -ENOMEM;
7635 
7636 	state->acquire_ctx = ddev->mode_config.acquire_ctx;
7637 
7638 	/* Construct an atomic state to restore previous display setting */
7639 
7640 	/*
7641 	 * Attach connectors to drm_atomic_state
7642 	 */
7643 	conn_state = drm_atomic_get_connector_state(state, connector);
7644 
7645 	ret = PTR_ERR_OR_ZERO(conn_state);
7646 	if (ret)
7647 		goto err;
7648 
7649 	/* Attach crtc to drm_atomic_state*/
7650 	crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
7651 
7652 	ret = PTR_ERR_OR_ZERO(crtc_state);
7653 	if (ret)
7654 		goto err;
7655 
7656 	/* force a restore */
7657 	crtc_state->mode_changed = true;
7658 
7659 	/* Attach plane to drm_atomic_state */
7660 	plane_state = drm_atomic_get_plane_state(state, plane);
7661 
7662 	ret = PTR_ERR_OR_ZERO(plane_state);
7663 	if (ret)
7664 		goto err;
7665 
7666 
7667 	/* Call commit internally with the state we just constructed */
7668 	ret = drm_atomic_commit(state);
7669 	if (!ret)
7670 		return 0;
7671 
7672 err:
7673 	DRM_ERROR("Restoring old state failed with %i\n", ret);
7674 	drm_atomic_state_put(state);
7675 
7676 	return ret;
7677 }
7678 
7679 /*
7680  * This function handles all cases when set mode does not come upon hotplug.
7681  * This includes when a display is unplugged then plugged back into the
7682  * same port and when running without usermode desktop manager supprot
7683  */
7684 void dm_restore_drm_connector_state(struct drm_device *dev,
7685 				    struct drm_connector *connector)
7686 {
7687 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7688 	struct amdgpu_crtc *disconnected_acrtc;
7689 	struct dm_crtc_state *acrtc_state;
7690 
7691 	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
7692 		return;
7693 
7694 	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7695 	if (!disconnected_acrtc)
7696 		return;
7697 
7698 	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
7699 	if (!acrtc_state->stream)
7700 		return;
7701 
7702 	/*
7703 	 * If the previous sink is not released and different from the current,
7704 	 * we deduce we are in a state where we can not rely on usermode call
7705 	 * to turn on the display, so we do it here
7706 	 */
7707 	if (acrtc_state->stream->sink != aconnector->dc_sink)
7708 		dm_force_atomic_commit(&aconnector->base);
7709 }
7710 
7711 /*
7712  * Grabs all modesetting locks to serialize against any blocking commits,
7713  * Waits for completion of all non blocking commits.
7714  */
7715 static int do_aquire_global_lock(struct drm_device *dev,
7716 				 struct drm_atomic_state *state)
7717 {
7718 	struct drm_crtc *crtc;
7719 	struct drm_crtc_commit *commit;
7720 	long ret;
7721 
7722 	/*
7723 	 * Adding all modeset locks to aquire_ctx will
7724 	 * ensure that when the framework release it the
7725 	 * extra locks we are locking here will get released to
7726 	 */
7727 	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
7728 	if (ret)
7729 		return ret;
7730 
7731 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7732 		spin_lock(&crtc->commit_lock);
7733 		commit = list_first_entry_or_null(&crtc->commit_list,
7734 				struct drm_crtc_commit, commit_entry);
7735 		if (commit)
7736 			drm_crtc_commit_get(commit);
7737 		spin_unlock(&crtc->commit_lock);
7738 
7739 		if (!commit)
7740 			continue;
7741 
7742 		/*
7743 		 * Make sure all pending HW programming completed and
7744 		 * page flips done
7745 		 */
7746 		ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
7747 
7748 		if (ret > 0)
7749 			ret = wait_for_completion_interruptible_timeout(
7750 					&commit->flip_done, 10*HZ);
7751 
7752 		if (ret == 0)
7753 			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
7754 				  "timed out\n", crtc->base.id, crtc->name);
7755 
7756 		drm_crtc_commit_put(commit);
7757 	}
7758 
7759 	return ret < 0 ? ret : 0;
7760 }
7761 
7762 static void get_freesync_config_for_crtc(
7763 	struct dm_crtc_state *new_crtc_state,
7764 	struct dm_connector_state *new_con_state)
7765 {
7766 	struct mod_freesync_config config = {0};
7767 	struct amdgpu_dm_connector *aconnector =
7768 			to_amdgpu_dm_connector(new_con_state->base.connector);
7769 	struct drm_display_mode *mode = &new_crtc_state->base.mode;
7770 	int vrefresh = drm_mode_vrefresh(mode);
7771 
7772 	new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
7773 					vrefresh >= aconnector->min_vfreq &&
7774 					vrefresh <= aconnector->max_vfreq;
7775 
7776 	if (new_crtc_state->vrr_supported) {
7777 		new_crtc_state->stream->ignore_msa_timing_param = true;
7778 		config.state = new_crtc_state->base.vrr_enabled ?
7779 				VRR_STATE_ACTIVE_VARIABLE :
7780 				VRR_STATE_INACTIVE;
7781 		config.min_refresh_in_uhz =
7782 				aconnector->min_vfreq * 1000000;
7783 		config.max_refresh_in_uhz =
7784 				aconnector->max_vfreq * 1000000;
7785 		config.vsif_supported = true;
7786 		config.btr = true;
7787 	}
7788 
7789 	new_crtc_state->freesync_config = config;
7790 }
7791 
7792 static void reset_freesync_config_for_crtc(
7793 	struct dm_crtc_state *new_crtc_state)
7794 {
7795 	new_crtc_state->vrr_supported = false;
7796 
7797 	memset(&new_crtc_state->vrr_params, 0,
7798 	       sizeof(new_crtc_state->vrr_params));
7799 	memset(&new_crtc_state->vrr_infopacket, 0,
7800 	       sizeof(new_crtc_state->vrr_infopacket));
7801 }
7802 
7803 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
7804 				struct drm_atomic_state *state,
7805 				struct drm_crtc *crtc,
7806 				struct drm_crtc_state *old_crtc_state,
7807 				struct drm_crtc_state *new_crtc_state,
7808 				bool enable,
7809 				bool *lock_and_validation_needed)
7810 {
7811 	struct dm_atomic_state *dm_state = NULL;
7812 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7813 	struct dc_stream_state *new_stream;
7814 	int ret = 0;
7815 
7816 	/*
7817 	 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
7818 	 * update changed items
7819 	 */
7820 	struct amdgpu_crtc *acrtc = NULL;
7821 	struct amdgpu_dm_connector *aconnector = NULL;
7822 	struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
7823 	struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
7824 
7825 	new_stream = NULL;
7826 
7827 	dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7828 	dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7829 	acrtc = to_amdgpu_crtc(crtc);
7830 	aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
7831 
7832 	/* TODO This hack should go away */
7833 	if (aconnector && enable) {
7834 		/* Make sure fake sink is created in plug-in scenario */
7835 		drm_new_conn_state = drm_atomic_get_new_connector_state(state,
7836 							    &aconnector->base);
7837 		drm_old_conn_state = drm_atomic_get_old_connector_state(state,
7838 							    &aconnector->base);
7839 
7840 		if (IS_ERR(drm_new_conn_state)) {
7841 			ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
7842 			goto fail;
7843 		}
7844 
7845 		dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
7846 		dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
7847 
7848 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7849 			goto skip_modeset;
7850 
7851 		new_stream = create_validate_stream_for_sink(aconnector,
7852 							     &new_crtc_state->mode,
7853 							     dm_new_conn_state,
7854 							     dm_old_crtc_state->stream);
7855 
7856 		/*
7857 		 * we can have no stream on ACTION_SET if a display
7858 		 * was disconnected during S3, in this case it is not an
7859 		 * error, the OS will be updated after detection, and
7860 		 * will do the right thing on next atomic commit
7861 		 */
7862 
7863 		if (!new_stream) {
7864 			DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7865 					__func__, acrtc->base.base.id);
7866 			ret = -ENOMEM;
7867 			goto fail;
7868 		}
7869 
7870 		dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
7871 
7872 		ret = fill_hdr_info_packet(drm_new_conn_state,
7873 					   &new_stream->hdr_static_metadata);
7874 		if (ret)
7875 			goto fail;
7876 
7877 		/*
7878 		 * If we already removed the old stream from the context
7879 		 * (and set the new stream to NULL) then we can't reuse
7880 		 * the old stream even if the stream and scaling are unchanged.
7881 		 * We'll hit the BUG_ON and black screen.
7882 		 *
7883 		 * TODO: Refactor this function to allow this check to work
7884 		 * in all conditions.
7885 		 */
7886 		if (dm_new_crtc_state->stream &&
7887 		    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
7888 		    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
7889 			new_crtc_state->mode_changed = false;
7890 			DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
7891 					 new_crtc_state->mode_changed);
7892 		}
7893 	}
7894 
7895 	/* mode_changed flag may get updated above, need to check again */
7896 	if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7897 		goto skip_modeset;
7898 
7899 	DRM_DEBUG_DRIVER(
7900 		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7901 		"planes_changed:%d, mode_changed:%d,active_changed:%d,"
7902 		"connectors_changed:%d\n",
7903 		acrtc->crtc_id,
7904 		new_crtc_state->enable,
7905 		new_crtc_state->active,
7906 		new_crtc_state->planes_changed,
7907 		new_crtc_state->mode_changed,
7908 		new_crtc_state->active_changed,
7909 		new_crtc_state->connectors_changed);
7910 
7911 	/* Remove stream for any changed/disabled CRTC */
7912 	if (!enable) {
7913 
7914 		if (!dm_old_crtc_state->stream)
7915 			goto skip_modeset;
7916 
7917 		ret = dm_atomic_get_state(state, &dm_state);
7918 		if (ret)
7919 			goto fail;
7920 
7921 		DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
7922 				crtc->base.id);
7923 
7924 		/* i.e. reset mode */
7925 		if (dc_remove_stream_from_ctx(
7926 				dm->dc,
7927 				dm_state->context,
7928 				dm_old_crtc_state->stream) != DC_OK) {
7929 			ret = -EINVAL;
7930 			goto fail;
7931 		}
7932 
7933 		dc_stream_release(dm_old_crtc_state->stream);
7934 		dm_new_crtc_state->stream = NULL;
7935 
7936 		reset_freesync_config_for_crtc(dm_new_crtc_state);
7937 
7938 		*lock_and_validation_needed = true;
7939 
7940 	} else {/* Add stream for any updated/enabled CRTC */
7941 		/*
7942 		 * Quick fix to prevent NULL pointer on new_stream when
7943 		 * added MST connectors not found in existing crtc_state in the chained mode
7944 		 * TODO: need to dig out the root cause of that
7945 		 */
7946 		if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
7947 			goto skip_modeset;
7948 
7949 		if (modereset_required(new_crtc_state))
7950 			goto skip_modeset;
7951 
7952 		if (modeset_required(new_crtc_state, new_stream,
7953 				     dm_old_crtc_state->stream)) {
7954 
7955 			WARN_ON(dm_new_crtc_state->stream);
7956 
7957 			ret = dm_atomic_get_state(state, &dm_state);
7958 			if (ret)
7959 				goto fail;
7960 
7961 			dm_new_crtc_state->stream = new_stream;
7962 
7963 			dc_stream_retain(new_stream);
7964 
7965 			DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
7966 						crtc->base.id);
7967 
7968 			if (dc_add_stream_to_ctx(
7969 					dm->dc,
7970 					dm_state->context,
7971 					dm_new_crtc_state->stream) != DC_OK) {
7972 				ret = -EINVAL;
7973 				goto fail;
7974 			}
7975 
7976 			*lock_and_validation_needed = true;
7977 		}
7978 	}
7979 
7980 skip_modeset:
7981 	/* Release extra reference */
7982 	if (new_stream)
7983 		 dc_stream_release(new_stream);
7984 
7985 	/*
7986 	 * We want to do dc stream updates that do not require a
7987 	 * full modeset below.
7988 	 */
7989 	if (!(enable && aconnector && new_crtc_state->enable &&
7990 	      new_crtc_state->active))
7991 		return 0;
7992 	/*
7993 	 * Given above conditions, the dc state cannot be NULL because:
7994 	 * 1. We're in the process of enabling CRTCs (just been added
7995 	 *    to the dc context, or already is on the context)
7996 	 * 2. Has a valid connector attached, and
7997 	 * 3. Is currently active and enabled.
7998 	 * => The dc stream state currently exists.
7999 	 */
8000 	BUG_ON(dm_new_crtc_state->stream == NULL);
8001 
8002 	/* Scaling or underscan settings */
8003 	if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
8004 		update_stream_scaling_settings(
8005 			&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
8006 
8007 	/* ABM settings */
8008 	dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8009 
8010 	/*
8011 	 * Color management settings. We also update color properties
8012 	 * when a modeset is needed, to ensure it gets reprogrammed.
8013 	 */
8014 	if (dm_new_crtc_state->base.color_mgmt_changed ||
8015 	    drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8016 		ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
8017 		if (ret)
8018 			goto fail;
8019 	}
8020 
8021 	/* Update Freesync settings. */
8022 	get_freesync_config_for_crtc(dm_new_crtc_state,
8023 				     dm_new_conn_state);
8024 
8025 	return ret;
8026 
8027 fail:
8028 	if (new_stream)
8029 		dc_stream_release(new_stream);
8030 	return ret;
8031 }
8032 
8033 static bool should_reset_plane(struct drm_atomic_state *state,
8034 			       struct drm_plane *plane,
8035 			       struct drm_plane_state *old_plane_state,
8036 			       struct drm_plane_state *new_plane_state)
8037 {
8038 	struct drm_plane *other;
8039 	struct drm_plane_state *old_other_state, *new_other_state;
8040 	struct drm_crtc_state *new_crtc_state;
8041 	int i;
8042 
8043 	/*
8044 	 * TODO: Remove this hack once the checks below are sufficient
8045 	 * enough to determine when we need to reset all the planes on
8046 	 * the stream.
8047 	 */
8048 	if (state->allow_modeset)
8049 		return true;
8050 
8051 	/* Exit early if we know that we're adding or removing the plane. */
8052 	if (old_plane_state->crtc != new_plane_state->crtc)
8053 		return true;
8054 
8055 	/* old crtc == new_crtc == NULL, plane not in context. */
8056 	if (!new_plane_state->crtc)
8057 		return false;
8058 
8059 	new_crtc_state =
8060 		drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
8061 
8062 	if (!new_crtc_state)
8063 		return true;
8064 
8065 	/* CRTC Degamma changes currently require us to recreate planes. */
8066 	if (new_crtc_state->color_mgmt_changed)
8067 		return true;
8068 
8069 	if (drm_atomic_crtc_needs_modeset(new_crtc_state))
8070 		return true;
8071 
8072 	/*
8073 	 * If there are any new primary or overlay planes being added or
8074 	 * removed then the z-order can potentially change. To ensure
8075 	 * correct z-order and pipe acquisition the current DC architecture
8076 	 * requires us to remove and recreate all existing planes.
8077 	 *
8078 	 * TODO: Come up with a more elegant solution for this.
8079 	 */
8080 	for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
8081 		if (other->type == DRM_PLANE_TYPE_CURSOR)
8082 			continue;
8083 
8084 		if (old_other_state->crtc != new_plane_state->crtc &&
8085 		    new_other_state->crtc != new_plane_state->crtc)
8086 			continue;
8087 
8088 		if (old_other_state->crtc != new_other_state->crtc)
8089 			return true;
8090 
8091 		/* TODO: Remove this once we can handle fast format changes. */
8092 		if (old_other_state->fb && new_other_state->fb &&
8093 		    old_other_state->fb->format != new_other_state->fb->format)
8094 			return true;
8095 	}
8096 
8097 	return false;
8098 }
8099 
8100 static int dm_update_plane_state(struct dc *dc,
8101 				 struct drm_atomic_state *state,
8102 				 struct drm_plane *plane,
8103 				 struct drm_plane_state *old_plane_state,
8104 				 struct drm_plane_state *new_plane_state,
8105 				 bool enable,
8106 				 bool *lock_and_validation_needed)
8107 {
8108 
8109 	struct dm_atomic_state *dm_state = NULL;
8110 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
8111 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8112 	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
8113 	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
8114 	struct amdgpu_crtc *new_acrtc;
8115 	bool needs_reset;
8116 	int ret = 0;
8117 
8118 
8119 	new_plane_crtc = new_plane_state->crtc;
8120 	old_plane_crtc = old_plane_state->crtc;
8121 	dm_new_plane_state = to_dm_plane_state(new_plane_state);
8122 	dm_old_plane_state = to_dm_plane_state(old_plane_state);
8123 
8124 	/*TODO Implement better atomic check for cursor plane */
8125 	if (plane->type == DRM_PLANE_TYPE_CURSOR) {
8126 		if (!enable || !new_plane_crtc ||
8127 			drm_atomic_plane_disabling(plane->state, new_plane_state))
8128 			return 0;
8129 
8130 		new_acrtc = to_amdgpu_crtc(new_plane_crtc);
8131 
8132 		if ((new_plane_state->crtc_w > new_acrtc->max_cursor_width) ||
8133 			(new_plane_state->crtc_h > new_acrtc->max_cursor_height)) {
8134 			DRM_DEBUG_ATOMIC("Bad cursor size %d x %d\n",
8135 							 new_plane_state->crtc_w, new_plane_state->crtc_h);
8136 			return -EINVAL;
8137 		}
8138 
8139 		return 0;
8140 	}
8141 
8142 	needs_reset = should_reset_plane(state, plane, old_plane_state,
8143 					 new_plane_state);
8144 
8145 	/* Remove any changed/removed planes */
8146 	if (!enable) {
8147 		if (!needs_reset)
8148 			return 0;
8149 
8150 		if (!old_plane_crtc)
8151 			return 0;
8152 
8153 		old_crtc_state = drm_atomic_get_old_crtc_state(
8154 				state, old_plane_crtc);
8155 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8156 
8157 		if (!dm_old_crtc_state->stream)
8158 			return 0;
8159 
8160 		DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
8161 				plane->base.id, old_plane_crtc->base.id);
8162 
8163 		ret = dm_atomic_get_state(state, &dm_state);
8164 		if (ret)
8165 			return ret;
8166 
8167 		if (!dc_remove_plane_from_context(
8168 				dc,
8169 				dm_old_crtc_state->stream,
8170 				dm_old_plane_state->dc_state,
8171 				dm_state->context)) {
8172 
8173 			ret = EINVAL;
8174 			return ret;
8175 		}
8176 
8177 
8178 		dc_plane_state_release(dm_old_plane_state->dc_state);
8179 		dm_new_plane_state->dc_state = NULL;
8180 
8181 		*lock_and_validation_needed = true;
8182 
8183 	} else { /* Add new planes */
8184 		struct dc_plane_state *dc_new_plane_state;
8185 
8186 		if (drm_atomic_plane_disabling(plane->state, new_plane_state))
8187 			return 0;
8188 
8189 		if (!new_plane_crtc)
8190 			return 0;
8191 
8192 		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
8193 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8194 
8195 		if (!dm_new_crtc_state->stream)
8196 			return 0;
8197 
8198 		if (!needs_reset)
8199 			return 0;
8200 
8201 		WARN_ON(dm_new_plane_state->dc_state);
8202 
8203 		dc_new_plane_state = dc_create_plane_state(dc);
8204 		if (!dc_new_plane_state)
8205 			return -ENOMEM;
8206 
8207 		DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
8208 				plane->base.id, new_plane_crtc->base.id);
8209 
8210 		ret = fill_dc_plane_attributes(
8211 			new_plane_crtc->dev->dev_private,
8212 			dc_new_plane_state,
8213 			new_plane_state,
8214 			new_crtc_state);
8215 		if (ret) {
8216 			dc_plane_state_release(dc_new_plane_state);
8217 			return ret;
8218 		}
8219 
8220 		ret = dm_atomic_get_state(state, &dm_state);
8221 		if (ret) {
8222 			dc_plane_state_release(dc_new_plane_state);
8223 			return ret;
8224 		}
8225 
8226 		/*
8227 		 * Any atomic check errors that occur after this will
8228 		 * not need a release. The plane state will be attached
8229 		 * to the stream, and therefore part of the atomic
8230 		 * state. It'll be released when the atomic state is
8231 		 * cleaned.
8232 		 */
8233 		if (!dc_add_plane_to_context(
8234 				dc,
8235 				dm_new_crtc_state->stream,
8236 				dc_new_plane_state,
8237 				dm_state->context)) {
8238 
8239 			dc_plane_state_release(dc_new_plane_state);
8240 			return -EINVAL;
8241 		}
8242 
8243 		dm_new_plane_state->dc_state = dc_new_plane_state;
8244 
8245 		/* Tell DC to do a full surface update every time there
8246 		 * is a plane change. Inefficient, but works for now.
8247 		 */
8248 		dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
8249 
8250 		*lock_and_validation_needed = true;
8251 	}
8252 
8253 
8254 	return ret;
8255 }
8256 
8257 static int
8258 dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
8259 				    struct drm_atomic_state *state,
8260 				    enum surface_update_type *out_type)
8261 {
8262 	struct dc *dc = dm->dc;
8263 	struct dm_atomic_state *dm_state = NULL, *old_dm_state = NULL;
8264 	int i, j, num_plane, ret = 0;
8265 	struct drm_plane_state *old_plane_state, *new_plane_state;
8266 	struct dm_plane_state *new_dm_plane_state, *old_dm_plane_state;
8267 	struct drm_crtc *new_plane_crtc;
8268 	struct drm_plane *plane;
8269 
8270 	struct drm_crtc *crtc;
8271 	struct drm_crtc_state *new_crtc_state, *old_crtc_state;
8272 	struct dm_crtc_state *new_dm_crtc_state, *old_dm_crtc_state;
8273 	struct dc_stream_status *status = NULL;
8274 	enum surface_update_type update_type = UPDATE_TYPE_FAST;
8275 	struct surface_info_bundle {
8276 		struct dc_surface_update surface_updates[MAX_SURFACES];
8277 		struct dc_plane_info plane_infos[MAX_SURFACES];
8278 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
8279 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8280 		struct dc_stream_update stream_update;
8281 	} *bundle;
8282 
8283 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8284 
8285 	if (!bundle) {
8286 		DRM_ERROR("Failed to allocate update bundle\n");
8287 		/* Set type to FULL to avoid crashing in DC*/
8288 		update_type = UPDATE_TYPE_FULL;
8289 		goto cleanup;
8290 	}
8291 
8292 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8293 
8294 		memset(bundle, 0, sizeof(struct surface_info_bundle));
8295 
8296 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8297 		old_dm_crtc_state = to_dm_crtc_state(old_crtc_state);
8298 		num_plane = 0;
8299 
8300 		if (new_dm_crtc_state->stream != old_dm_crtc_state->stream) {
8301 			update_type = UPDATE_TYPE_FULL;
8302 			goto cleanup;
8303 		}
8304 
8305 		if (!new_dm_crtc_state->stream)
8306 			continue;
8307 
8308 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, j) {
8309 			const struct amdgpu_framebuffer *amdgpu_fb =
8310 				to_amdgpu_framebuffer(new_plane_state->fb);
8311 			struct dc_plane_info *plane_info = &bundle->plane_infos[num_plane];
8312 			struct dc_flip_addrs *flip_addr = &bundle->flip_addrs[num_plane];
8313 			struct dc_scaling_info *scaling_info = &bundle->scaling_infos[num_plane];
8314 			uint64_t tiling_flags;
8315 			bool tmz_surface = false;
8316 
8317 			new_plane_crtc = new_plane_state->crtc;
8318 			new_dm_plane_state = to_dm_plane_state(new_plane_state);
8319 			old_dm_plane_state = to_dm_plane_state(old_plane_state);
8320 
8321 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
8322 				continue;
8323 
8324 			if (new_dm_plane_state->dc_state != old_dm_plane_state->dc_state) {
8325 				update_type = UPDATE_TYPE_FULL;
8326 				goto cleanup;
8327 			}
8328 
8329 			if (crtc != new_plane_crtc)
8330 				continue;
8331 
8332 			bundle->surface_updates[num_plane].surface =
8333 					new_dm_plane_state->dc_state;
8334 
8335 			if (new_crtc_state->mode_changed) {
8336 				bundle->stream_update.dst = new_dm_crtc_state->stream->dst;
8337 				bundle->stream_update.src = new_dm_crtc_state->stream->src;
8338 			}
8339 
8340 			if (new_crtc_state->color_mgmt_changed) {
8341 				bundle->surface_updates[num_plane].gamma =
8342 						new_dm_plane_state->dc_state->gamma_correction;
8343 				bundle->surface_updates[num_plane].in_transfer_func =
8344 						new_dm_plane_state->dc_state->in_transfer_func;
8345 				bundle->surface_updates[num_plane].gamut_remap_matrix =
8346 						&new_dm_plane_state->dc_state->gamut_remap_matrix;
8347 				bundle->stream_update.gamut_remap =
8348 						&new_dm_crtc_state->stream->gamut_remap_matrix;
8349 				bundle->stream_update.output_csc_transform =
8350 						&new_dm_crtc_state->stream->csc_color_matrix;
8351 				bundle->stream_update.out_transfer_func =
8352 						new_dm_crtc_state->stream->out_transfer_func;
8353 			}
8354 
8355 			ret = fill_dc_scaling_info(new_plane_state,
8356 						   scaling_info);
8357 			if (ret)
8358 				goto cleanup;
8359 
8360 			bundle->surface_updates[num_plane].scaling_info = scaling_info;
8361 
8362 			if (amdgpu_fb) {
8363 				ret = get_fb_info(amdgpu_fb, &tiling_flags, &tmz_surface);
8364 				if (ret)
8365 					goto cleanup;
8366 
8367 				ret = fill_dc_plane_info_and_addr(
8368 					dm->adev, new_plane_state, tiling_flags,
8369 					plane_info,
8370 					&flip_addr->address, tmz_surface,
8371 					false);
8372 				if (ret)
8373 					goto cleanup;
8374 
8375 				bundle->surface_updates[num_plane].plane_info = plane_info;
8376 				bundle->surface_updates[num_plane].flip_addr = flip_addr;
8377 			}
8378 
8379 			num_plane++;
8380 		}
8381 
8382 		if (num_plane == 0)
8383 			continue;
8384 
8385 		ret = dm_atomic_get_state(state, &dm_state);
8386 		if (ret)
8387 			goto cleanup;
8388 
8389 		old_dm_state = dm_atomic_get_old_state(state);
8390 		if (!old_dm_state) {
8391 			ret = -EINVAL;
8392 			goto cleanup;
8393 		}
8394 
8395 		status = dc_stream_get_status_from_state(old_dm_state->context,
8396 							 new_dm_crtc_state->stream);
8397 		bundle->stream_update.stream = new_dm_crtc_state->stream;
8398 		/*
8399 		 * TODO: DC modifies the surface during this call so we need
8400 		 * to lock here - find a way to do this without locking.
8401 		 */
8402 		mutex_lock(&dm->dc_lock);
8403 		update_type = dc_check_update_surfaces_for_stream(
8404 				dc,	bundle->surface_updates, num_plane,
8405 				&bundle->stream_update, status);
8406 		mutex_unlock(&dm->dc_lock);
8407 
8408 		if (update_type > UPDATE_TYPE_MED) {
8409 			update_type = UPDATE_TYPE_FULL;
8410 			goto cleanup;
8411 		}
8412 	}
8413 
8414 cleanup:
8415 	kfree(bundle);
8416 
8417 	*out_type = update_type;
8418 	return ret;
8419 }
8420 
8421 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
8422 {
8423 	struct drm_connector *connector;
8424 	struct drm_connector_state *conn_state;
8425 	struct amdgpu_dm_connector *aconnector = NULL;
8426 	int i;
8427 	for_each_new_connector_in_state(state, connector, conn_state, i) {
8428 		if (conn_state->crtc != crtc)
8429 			continue;
8430 
8431 		aconnector = to_amdgpu_dm_connector(connector);
8432 		if (!aconnector->port || !aconnector->mst_port)
8433 			aconnector = NULL;
8434 		else
8435 			break;
8436 	}
8437 
8438 	if (!aconnector)
8439 		return 0;
8440 
8441 	return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
8442 }
8443 
8444 /**
8445  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
8446  * @dev: The DRM device
8447  * @state: The atomic state to commit
8448  *
8449  * Validate that the given atomic state is programmable by DC into hardware.
8450  * This involves constructing a &struct dc_state reflecting the new hardware
8451  * state we wish to commit, then querying DC to see if it is programmable. It's
8452  * important not to modify the existing DC state. Otherwise, atomic_check
8453  * may unexpectedly commit hardware changes.
8454  *
8455  * When validating the DC state, it's important that the right locks are
8456  * acquired. For full updates case which removes/adds/updates streams on one
8457  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
8458  * that any such full update commit will wait for completion of any outstanding
8459  * flip using DRMs synchronization events. See
8460  * dm_determine_update_type_for_commit()
8461  *
8462  * Note that DM adds the affected connectors for all CRTCs in state, when that
8463  * might not seem necessary. This is because DC stream creation requires the
8464  * DC sink, which is tied to the DRM connector state. Cleaning this up should
8465  * be possible but non-trivial - a possible TODO item.
8466  *
8467  * Return: -Error code if validation failed.
8468  */
8469 static int amdgpu_dm_atomic_check(struct drm_device *dev,
8470 				  struct drm_atomic_state *state)
8471 {
8472 	struct amdgpu_device *adev = dev->dev_private;
8473 	struct dm_atomic_state *dm_state = NULL;
8474 	struct dc *dc = adev->dm.dc;
8475 	struct drm_connector *connector;
8476 	struct drm_connector_state *old_con_state, *new_con_state;
8477 	struct drm_crtc *crtc;
8478 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8479 	struct drm_plane *plane;
8480 	struct drm_plane_state *old_plane_state, *new_plane_state;
8481 	enum surface_update_type update_type = UPDATE_TYPE_FAST;
8482 	enum surface_update_type overall_update_type = UPDATE_TYPE_FAST;
8483 
8484 	int ret, i;
8485 
8486 	/*
8487 	 * This bool will be set for true for any modeset/reset
8488 	 * or plane update which implies non fast surface update.
8489 	 */
8490 	bool lock_and_validation_needed = false;
8491 
8492 	ret = drm_atomic_helper_check_modeset(dev, state);
8493 	if (ret)
8494 		goto fail;
8495 
8496 	if (adev->asic_type >= CHIP_NAVI10) {
8497 		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8498 			if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8499 				ret = add_affected_mst_dsc_crtcs(state, crtc);
8500 				if (ret)
8501 					goto fail;
8502 			}
8503 		}
8504 	}
8505 
8506 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8507 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
8508 		    !new_crtc_state->color_mgmt_changed &&
8509 		    old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
8510 			continue;
8511 
8512 		if (!new_crtc_state->enable)
8513 			continue;
8514 
8515 		ret = drm_atomic_add_affected_connectors(state, crtc);
8516 		if (ret)
8517 			return ret;
8518 
8519 		ret = drm_atomic_add_affected_planes(state, crtc);
8520 		if (ret)
8521 			goto fail;
8522 	}
8523 
8524 	/*
8525 	 * Add all primary and overlay planes on the CRTC to the state
8526 	 * whenever a plane is enabled to maintain correct z-ordering
8527 	 * and to enable fast surface updates.
8528 	 */
8529 	drm_for_each_crtc(crtc, dev) {
8530 		bool modified = false;
8531 
8532 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8533 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
8534 				continue;
8535 
8536 			if (new_plane_state->crtc == crtc ||
8537 			    old_plane_state->crtc == crtc) {
8538 				modified = true;
8539 				break;
8540 			}
8541 		}
8542 
8543 		if (!modified)
8544 			continue;
8545 
8546 		drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
8547 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
8548 				continue;
8549 
8550 			new_plane_state =
8551 				drm_atomic_get_plane_state(state, plane);
8552 
8553 			if (IS_ERR(new_plane_state)) {
8554 				ret = PTR_ERR(new_plane_state);
8555 				goto fail;
8556 			}
8557 		}
8558 	}
8559 
8560 	/* Remove exiting planes if they are modified */
8561 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8562 		ret = dm_update_plane_state(dc, state, plane,
8563 					    old_plane_state,
8564 					    new_plane_state,
8565 					    false,
8566 					    &lock_and_validation_needed);
8567 		if (ret)
8568 			goto fail;
8569 	}
8570 
8571 	/* Disable all crtcs which require disable */
8572 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8573 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
8574 					   old_crtc_state,
8575 					   new_crtc_state,
8576 					   false,
8577 					   &lock_and_validation_needed);
8578 		if (ret)
8579 			goto fail;
8580 	}
8581 
8582 	/* Enable all crtcs which require enable */
8583 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8584 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
8585 					   old_crtc_state,
8586 					   new_crtc_state,
8587 					   true,
8588 					   &lock_and_validation_needed);
8589 		if (ret)
8590 			goto fail;
8591 	}
8592 
8593 	/* Add new/modified planes */
8594 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8595 		ret = dm_update_plane_state(dc, state, plane,
8596 					    old_plane_state,
8597 					    new_plane_state,
8598 					    true,
8599 					    &lock_and_validation_needed);
8600 		if (ret)
8601 			goto fail;
8602 	}
8603 
8604 	/* Run this here since we want to validate the streams we created */
8605 	ret = drm_atomic_helper_check_planes(dev, state);
8606 	if (ret)
8607 		goto fail;
8608 
8609 	if (state->legacy_cursor_update) {
8610 		/*
8611 		 * This is a fast cursor update coming from the plane update
8612 		 * helper, check if it can be done asynchronously for better
8613 		 * performance.
8614 		 */
8615 		state->async_update =
8616 			!drm_atomic_helper_async_check(dev, state);
8617 
8618 		/*
8619 		 * Skip the remaining global validation if this is an async
8620 		 * update. Cursor updates can be done without affecting
8621 		 * state or bandwidth calcs and this avoids the performance
8622 		 * penalty of locking the private state object and
8623 		 * allocating a new dc_state.
8624 		 */
8625 		if (state->async_update)
8626 			return 0;
8627 	}
8628 
8629 	/* Check scaling and underscan changes*/
8630 	/* TODO Removed scaling changes validation due to inability to commit
8631 	 * new stream into context w\o causing full reset. Need to
8632 	 * decide how to handle.
8633 	 */
8634 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8635 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8636 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8637 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8638 
8639 		/* Skip any modesets/resets */
8640 		if (!acrtc || drm_atomic_crtc_needs_modeset(
8641 				drm_atomic_get_new_crtc_state(state, &acrtc->base)))
8642 			continue;
8643 
8644 		/* Skip any thing not scale or underscan changes */
8645 		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
8646 			continue;
8647 
8648 		overall_update_type = UPDATE_TYPE_FULL;
8649 		lock_and_validation_needed = true;
8650 	}
8651 
8652 	ret = dm_determine_update_type_for_commit(&adev->dm, state, &update_type);
8653 	if (ret)
8654 		goto fail;
8655 
8656 	if (overall_update_type < update_type)
8657 		overall_update_type = update_type;
8658 
8659 	/*
8660 	 * lock_and_validation_needed was an old way to determine if we need to set
8661 	 * the global lock. Leaving it in to check if we broke any corner cases
8662 	 * lock_and_validation_needed true = UPDATE_TYPE_FULL or UPDATE_TYPE_MED
8663 	 * lock_and_validation_needed false = UPDATE_TYPE_FAST
8664 	 */
8665 	if (lock_and_validation_needed && overall_update_type <= UPDATE_TYPE_FAST)
8666 		WARN(1, "Global lock should be Set, overall_update_type should be UPDATE_TYPE_MED or UPDATE_TYPE_FULL");
8667 
8668 	if (overall_update_type > UPDATE_TYPE_FAST) {
8669 		ret = dm_atomic_get_state(state, &dm_state);
8670 		if (ret)
8671 			goto fail;
8672 
8673 		ret = do_aquire_global_lock(dev, state);
8674 		if (ret)
8675 			goto fail;
8676 
8677 #if defined(CONFIG_DRM_AMD_DC_DCN)
8678 		if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
8679 			goto fail;
8680 
8681 		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
8682 		if (ret)
8683 			goto fail;
8684 #endif
8685 
8686 		/*
8687 		 * Perform validation of MST topology in the state:
8688 		 * We need to perform MST atomic check before calling
8689 		 * dc_validate_global_state(), or there is a chance
8690 		 * to get stuck in an infinite loop and hang eventually.
8691 		 */
8692 		ret = drm_dp_mst_atomic_check(state);
8693 		if (ret)
8694 			goto fail;
8695 
8696 		if (dc_validate_global_state(dc, dm_state->context, false) != DC_OK) {
8697 			ret = -EINVAL;
8698 			goto fail;
8699 		}
8700 	} else {
8701 		/*
8702 		 * The commit is a fast update. Fast updates shouldn't change
8703 		 * the DC context, affect global validation, and can have their
8704 		 * commit work done in parallel with other commits not touching
8705 		 * the same resource. If we have a new DC context as part of
8706 		 * the DM atomic state from validation we need to free it and
8707 		 * retain the existing one instead.
8708 		 */
8709 		struct dm_atomic_state *new_dm_state, *old_dm_state;
8710 
8711 		new_dm_state = dm_atomic_get_new_state(state);
8712 		old_dm_state = dm_atomic_get_old_state(state);
8713 
8714 		if (new_dm_state && old_dm_state) {
8715 			if (new_dm_state->context)
8716 				dc_release_state(new_dm_state->context);
8717 
8718 			new_dm_state->context = old_dm_state->context;
8719 
8720 			if (old_dm_state->context)
8721 				dc_retain_state(old_dm_state->context);
8722 		}
8723 	}
8724 
8725 	/* Store the overall update type for use later in atomic check. */
8726 	for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
8727 		struct dm_crtc_state *dm_new_crtc_state =
8728 			to_dm_crtc_state(new_crtc_state);
8729 
8730 		dm_new_crtc_state->update_type = (int)overall_update_type;
8731 	}
8732 
8733 	/* Must be success */
8734 	WARN_ON(ret);
8735 	return ret;
8736 
8737 fail:
8738 	if (ret == -EDEADLK)
8739 		DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
8740 	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
8741 		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
8742 	else
8743 		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
8744 
8745 	return ret;
8746 }
8747 
8748 static bool is_dp_capable_without_timing_msa(struct dc *dc,
8749 					     struct amdgpu_dm_connector *amdgpu_dm_connector)
8750 {
8751 	uint8_t dpcd_data;
8752 	bool capable = false;
8753 
8754 	if (amdgpu_dm_connector->dc_link &&
8755 		dm_helpers_dp_read_dpcd(
8756 				NULL,
8757 				amdgpu_dm_connector->dc_link,
8758 				DP_DOWN_STREAM_PORT_COUNT,
8759 				&dpcd_data,
8760 				sizeof(dpcd_data))) {
8761 		capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
8762 	}
8763 
8764 	return capable;
8765 }
8766 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
8767 					struct edid *edid)
8768 {
8769 	int i;
8770 	bool edid_check_required;
8771 	struct detailed_timing *timing;
8772 	struct detailed_non_pixel *data;
8773 	struct detailed_data_monitor_range *range;
8774 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8775 			to_amdgpu_dm_connector(connector);
8776 	struct dm_connector_state *dm_con_state = NULL;
8777 
8778 	struct drm_device *dev = connector->dev;
8779 	struct amdgpu_device *adev = dev->dev_private;
8780 	bool freesync_capable = false;
8781 
8782 	if (!connector->state) {
8783 		DRM_ERROR("%s - Connector has no state", __func__);
8784 		goto update;
8785 	}
8786 
8787 	if (!edid) {
8788 		dm_con_state = to_dm_connector_state(connector->state);
8789 
8790 		amdgpu_dm_connector->min_vfreq = 0;
8791 		amdgpu_dm_connector->max_vfreq = 0;
8792 		amdgpu_dm_connector->pixel_clock_mhz = 0;
8793 
8794 		goto update;
8795 	}
8796 
8797 	dm_con_state = to_dm_connector_state(connector->state);
8798 
8799 	edid_check_required = false;
8800 	if (!amdgpu_dm_connector->dc_sink) {
8801 		DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
8802 		goto update;
8803 	}
8804 	if (!adev->dm.freesync_module)
8805 		goto update;
8806 	/*
8807 	 * if edid non zero restrict freesync only for dp and edp
8808 	 */
8809 	if (edid) {
8810 		if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
8811 			|| amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
8812 			edid_check_required = is_dp_capable_without_timing_msa(
8813 						adev->dm.dc,
8814 						amdgpu_dm_connector);
8815 		}
8816 	}
8817 	if (edid_check_required == true && (edid->version > 1 ||
8818 	   (edid->version == 1 && edid->revision > 1))) {
8819 		for (i = 0; i < 4; i++) {
8820 
8821 			timing	= &edid->detailed_timings[i];
8822 			data	= &timing->data.other_data;
8823 			range	= &data->data.range;
8824 			/*
8825 			 * Check if monitor has continuous frequency mode
8826 			 */
8827 			if (data->type != EDID_DETAIL_MONITOR_RANGE)
8828 				continue;
8829 			/*
8830 			 * Check for flag range limits only. If flag == 1 then
8831 			 * no additional timing information provided.
8832 			 * Default GTF, GTF Secondary curve and CVT are not
8833 			 * supported
8834 			 */
8835 			if (range->flags != 1)
8836 				continue;
8837 
8838 			amdgpu_dm_connector->min_vfreq = range->min_vfreq;
8839 			amdgpu_dm_connector->max_vfreq = range->max_vfreq;
8840 			amdgpu_dm_connector->pixel_clock_mhz =
8841 				range->pixel_clock_mhz * 10;
8842 			break;
8843 		}
8844 
8845 		if (amdgpu_dm_connector->max_vfreq -
8846 		    amdgpu_dm_connector->min_vfreq > 10) {
8847 
8848 			freesync_capable = true;
8849 		}
8850 	}
8851 
8852 update:
8853 	if (dm_con_state)
8854 		dm_con_state->freesync_capable = freesync_capable;
8855 
8856 	if (connector->vrr_capable_property)
8857 		drm_connector_set_vrr_capable_property(connector,
8858 						       freesync_capable);
8859 }
8860 
8861 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
8862 {
8863 	uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
8864 
8865 	if (!(link->connector_signal & SIGNAL_TYPE_EDP))
8866 		return;
8867 	if (link->type == dc_connection_none)
8868 		return;
8869 	if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
8870 					dpcd_data, sizeof(dpcd_data))) {
8871 		link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
8872 
8873 		if (dpcd_data[0] == 0) {
8874 			link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
8875 			link->psr_settings.psr_feature_enabled = false;
8876 		} else {
8877 			link->psr_settings.psr_version = DC_PSR_VERSION_1;
8878 			link->psr_settings.psr_feature_enabled = true;
8879 		}
8880 
8881 		DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
8882 	}
8883 }
8884 
8885 /*
8886  * amdgpu_dm_link_setup_psr() - configure psr link
8887  * @stream: stream state
8888  *
8889  * Return: true if success
8890  */
8891 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
8892 {
8893 	struct dc_link *link = NULL;
8894 	struct psr_config psr_config = {0};
8895 	struct psr_context psr_context = {0};
8896 	bool ret = false;
8897 
8898 	if (stream == NULL)
8899 		return false;
8900 
8901 	link = stream->link;
8902 
8903 	psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
8904 
8905 	if (psr_config.psr_version > 0) {
8906 		psr_config.psr_exit_link_training_required = 0x1;
8907 		psr_config.psr_frame_capture_indication_req = 0;
8908 		psr_config.psr_rfb_setup_time = 0x37;
8909 		psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
8910 		psr_config.allow_smu_optimizations = 0x0;
8911 
8912 		ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
8913 
8914 	}
8915 	DRM_DEBUG_DRIVER("PSR link: %d\n",	link->psr_settings.psr_feature_enabled);
8916 
8917 	return ret;
8918 }
8919 
8920 /*
8921  * amdgpu_dm_psr_enable() - enable psr f/w
8922  * @stream: stream state
8923  *
8924  * Return: true if success
8925  */
8926 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
8927 {
8928 	struct dc_link *link = stream->link;
8929 	unsigned int vsync_rate_hz = 0;
8930 	struct dc_static_screen_params params = {0};
8931 	/* Calculate number of static frames before generating interrupt to
8932 	 * enter PSR.
8933 	 */
8934 	// Init fail safe of 2 frames static
8935 	unsigned int num_frames_static = 2;
8936 
8937 	DRM_DEBUG_DRIVER("Enabling psr...\n");
8938 
8939 	vsync_rate_hz = div64_u64(div64_u64((
8940 			stream->timing.pix_clk_100hz * 100),
8941 			stream->timing.v_total),
8942 			stream->timing.h_total);
8943 
8944 	/* Round up
8945 	 * Calculate number of frames such that at least 30 ms of time has
8946 	 * passed.
8947 	 */
8948 	if (vsync_rate_hz != 0) {
8949 		unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
8950 		num_frames_static = (30000 / frame_time_microsec) + 1;
8951 	}
8952 
8953 	params.triggers.cursor_update = true;
8954 	params.triggers.overlay_update = true;
8955 	params.triggers.surface_update = true;
8956 	params.num_frames = num_frames_static;
8957 
8958 	dc_stream_set_static_screen_params(link->ctx->dc,
8959 					   &stream, 1,
8960 					   &params);
8961 
8962 	return dc_link_set_psr_allow_active(link, true, false);
8963 }
8964 
8965 /*
8966  * amdgpu_dm_psr_disable() - disable psr f/w
8967  * @stream:  stream state
8968  *
8969  * Return: true if success
8970  */
8971 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
8972 {
8973 
8974 	DRM_DEBUG_DRIVER("Disabling psr...\n");
8975 
8976 	return dc_link_set_psr_allow_active(stream->link, false, true);
8977 }
8978