xref: /linux/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c (revision db5b5c679e6cad2bb147337af6c378d278231b45)
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28 
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc_link_dp.h"
32 #include "link_enc_cfg.h"
33 #include "dc/inc/core_types.h"
34 #include "dal_asic_id.h"
35 #include "dmub/dmub_srv.h"
36 #include "dc/inc/hw/dmcu.h"
37 #include "dc/inc/hw/abm.h"
38 #include "dc/dc_dmub_srv.h"
39 #include "dc/dc_edid_parser.h"
40 #include "dc/dc_stat.h"
41 #include "amdgpu_dm_trace.h"
42 
43 #include "vid.h"
44 #include "amdgpu.h"
45 #include "amdgpu_display.h"
46 #include "amdgpu_ucode.h"
47 #include "atom.h"
48 #include "amdgpu_dm.h"
49 #ifdef CONFIG_DRM_AMD_DC_HDCP
50 #include "amdgpu_dm_hdcp.h"
51 #include <drm/drm_hdcp.h>
52 #endif
53 #include "amdgpu_pm.h"
54 
55 #include "amd_shared.h"
56 #include "amdgpu_dm_irq.h"
57 #include "dm_helpers.h"
58 #include "amdgpu_dm_mst_types.h"
59 #if defined(CONFIG_DEBUG_FS)
60 #include "amdgpu_dm_debugfs.h"
61 #endif
62 #include "amdgpu_dm_psr.h"
63 
64 #include "ivsrcid/ivsrcid_vislands30.h"
65 
66 #include "i2caux_interface.h"
67 #include <linux/module.h>
68 #include <linux/moduleparam.h>
69 #include <linux/types.h>
70 #include <linux/pm_runtime.h>
71 #include <linux/pci.h>
72 #include <linux/firmware.h>
73 #include <linux/component.h>
74 
75 #include <drm/drm_atomic.h>
76 #include <drm/drm_atomic_uapi.h>
77 #include <drm/drm_atomic_helper.h>
78 #include <drm/drm_dp_mst_helper.h>
79 #include <drm/drm_fb_helper.h>
80 #include <drm/drm_fourcc.h>
81 #include <drm/drm_edid.h>
82 #include <drm/drm_vblank.h>
83 #include <drm/drm_audio_component.h>
84 
85 #if defined(CONFIG_DRM_AMD_DC_DCN)
86 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
87 
88 #include "dcn/dcn_1_0_offset.h"
89 #include "dcn/dcn_1_0_sh_mask.h"
90 #include "soc15_hw_ip.h"
91 #include "vega10_ip_offset.h"
92 
93 #include "soc15_common.h"
94 #endif
95 
96 #include "modules/inc/mod_freesync.h"
97 #include "modules/power/power_helpers.h"
98 #include "modules/inc/mod_info_packet.h"
99 
100 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
101 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
102 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
103 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
104 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
105 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
106 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
107 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
108 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
109 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
110 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
111 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
112 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
113 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
114 #define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
115 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
116 
117 #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
118 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
119 
120 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
121 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
122 
123 /* Number of bytes in PSP header for firmware. */
124 #define PSP_HEADER_BYTES 0x100
125 
126 /* Number of bytes in PSP footer for firmware. */
127 #define PSP_FOOTER_BYTES 0x100
128 
129 /**
130  * DOC: overview
131  *
132  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
133  * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
134  * requests into DC requests, and DC responses into DRM responses.
135  *
136  * The root control structure is &struct amdgpu_display_manager.
137  */
138 
139 /* basic init/fini API */
140 static int amdgpu_dm_init(struct amdgpu_device *adev);
141 static void amdgpu_dm_fini(struct amdgpu_device *adev);
142 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
143 
144 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
145 {
146 	switch (link->dpcd_caps.dongle_type) {
147 	case DISPLAY_DONGLE_NONE:
148 		return DRM_MODE_SUBCONNECTOR_Native;
149 	case DISPLAY_DONGLE_DP_VGA_CONVERTER:
150 		return DRM_MODE_SUBCONNECTOR_VGA;
151 	case DISPLAY_DONGLE_DP_DVI_CONVERTER:
152 	case DISPLAY_DONGLE_DP_DVI_DONGLE:
153 		return DRM_MODE_SUBCONNECTOR_DVID;
154 	case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
155 	case DISPLAY_DONGLE_DP_HDMI_DONGLE:
156 		return DRM_MODE_SUBCONNECTOR_HDMIA;
157 	case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
158 	default:
159 		return DRM_MODE_SUBCONNECTOR_Unknown;
160 	}
161 }
162 
163 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
164 {
165 	struct dc_link *link = aconnector->dc_link;
166 	struct drm_connector *connector = &aconnector->base;
167 	enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
168 
169 	if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
170 		return;
171 
172 	if (aconnector->dc_sink)
173 		subconnector = get_subconnector_type(link);
174 
175 	drm_object_property_set_value(&connector->base,
176 			connector->dev->mode_config.dp_subconnector_property,
177 			subconnector);
178 }
179 
180 /*
181  * initializes drm_device display related structures, based on the information
182  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
183  * drm_encoder, drm_mode_config
184  *
185  * Returns 0 on success
186  */
187 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
188 /* removes and deallocates the drm structures, created by the above function */
189 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
190 
191 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
192 				struct drm_plane *plane,
193 				unsigned long possible_crtcs,
194 				const struct dc_plane_cap *plane_cap);
195 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
196 			       struct drm_plane *plane,
197 			       uint32_t link_index);
198 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
199 				    struct amdgpu_dm_connector *amdgpu_dm_connector,
200 				    uint32_t link_index,
201 				    struct amdgpu_encoder *amdgpu_encoder);
202 static int amdgpu_dm_encoder_init(struct drm_device *dev,
203 				  struct amdgpu_encoder *aencoder,
204 				  uint32_t link_index);
205 
206 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
207 
208 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
209 
210 static int amdgpu_dm_atomic_check(struct drm_device *dev,
211 				  struct drm_atomic_state *state);
212 
213 static void handle_cursor_update(struct drm_plane *plane,
214 				 struct drm_plane_state *old_plane_state);
215 
216 static const struct drm_format_info *
217 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
218 
219 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
220 static void handle_hpd_rx_irq(void *param);
221 
222 static bool
223 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
224 				 struct drm_crtc_state *new_crtc_state);
225 /*
226  * dm_vblank_get_counter
227  *
228  * @brief
229  * Get counter for number of vertical blanks
230  *
231  * @param
232  * struct amdgpu_device *adev - [in] desired amdgpu device
233  * int disp_idx - [in] which CRTC to get the counter from
234  *
235  * @return
236  * Counter for vertical blanks
237  */
238 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
239 {
240 	if (crtc >= adev->mode_info.num_crtc)
241 		return 0;
242 	else {
243 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
244 
245 		if (acrtc->dm_irq_params.stream == NULL) {
246 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
247 				  crtc);
248 			return 0;
249 		}
250 
251 		return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
252 	}
253 }
254 
255 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
256 				  u32 *vbl, u32 *position)
257 {
258 	uint32_t v_blank_start, v_blank_end, h_position, v_position;
259 
260 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
261 		return -EINVAL;
262 	else {
263 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
264 
265 		if (acrtc->dm_irq_params.stream ==  NULL) {
266 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
267 				  crtc);
268 			return 0;
269 		}
270 
271 		/*
272 		 * TODO rework base driver to use values directly.
273 		 * for now parse it back into reg-format
274 		 */
275 		dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
276 					 &v_blank_start,
277 					 &v_blank_end,
278 					 &h_position,
279 					 &v_position);
280 
281 		*position = v_position | (h_position << 16);
282 		*vbl = v_blank_start | (v_blank_end << 16);
283 	}
284 
285 	return 0;
286 }
287 
288 static bool dm_is_idle(void *handle)
289 {
290 	/* XXX todo */
291 	return true;
292 }
293 
294 static int dm_wait_for_idle(void *handle)
295 {
296 	/* XXX todo */
297 	return 0;
298 }
299 
300 static bool dm_check_soft_reset(void *handle)
301 {
302 	return false;
303 }
304 
305 static int dm_soft_reset(void *handle)
306 {
307 	/* XXX todo */
308 	return 0;
309 }
310 
311 static struct amdgpu_crtc *
312 get_crtc_by_otg_inst(struct amdgpu_device *adev,
313 		     int otg_inst)
314 {
315 	struct drm_device *dev = adev_to_drm(adev);
316 	struct drm_crtc *crtc;
317 	struct amdgpu_crtc *amdgpu_crtc;
318 
319 	if (WARN_ON(otg_inst == -1))
320 		return adev->mode_info.crtcs[0];
321 
322 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
323 		amdgpu_crtc = to_amdgpu_crtc(crtc);
324 
325 		if (amdgpu_crtc->otg_inst == otg_inst)
326 			return amdgpu_crtc;
327 	}
328 
329 	return NULL;
330 }
331 
332 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
333 {
334 	return acrtc->dm_irq_params.freesync_config.state ==
335 		       VRR_STATE_ACTIVE_VARIABLE ||
336 	       acrtc->dm_irq_params.freesync_config.state ==
337 		       VRR_STATE_ACTIVE_FIXED;
338 }
339 
340 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
341 {
342 	return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
343 	       dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
344 }
345 
346 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
347 					      struct dm_crtc_state *new_state)
348 {
349 	if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
350 		return true;
351 	else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
352 		return true;
353 	else
354 		return false;
355 }
356 
357 /**
358  * dm_pflip_high_irq() - Handle pageflip interrupt
359  * @interrupt_params: ignored
360  *
361  * Handles the pageflip interrupt by notifying all interested parties
362  * that the pageflip has been completed.
363  */
364 static void dm_pflip_high_irq(void *interrupt_params)
365 {
366 	struct amdgpu_crtc *amdgpu_crtc;
367 	struct common_irq_params *irq_params = interrupt_params;
368 	struct amdgpu_device *adev = irq_params->adev;
369 	unsigned long flags;
370 	struct drm_pending_vblank_event *e;
371 	uint32_t vpos, hpos, v_blank_start, v_blank_end;
372 	bool vrr_active;
373 
374 	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
375 
376 	/* IRQ could occur when in initial stage */
377 	/* TODO work and BO cleanup */
378 	if (amdgpu_crtc == NULL) {
379 		DC_LOG_PFLIP("CRTC is null, returning.\n");
380 		return;
381 	}
382 
383 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
384 
385 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
386 		DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
387 						 amdgpu_crtc->pflip_status,
388 						 AMDGPU_FLIP_SUBMITTED,
389 						 amdgpu_crtc->crtc_id,
390 						 amdgpu_crtc);
391 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
392 		return;
393 	}
394 
395 	/* page flip completed. */
396 	e = amdgpu_crtc->event;
397 	amdgpu_crtc->event = NULL;
398 
399 	WARN_ON(!e);
400 
401 	vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
402 
403 	/* Fixed refresh rate, or VRR scanout position outside front-porch? */
404 	if (!vrr_active ||
405 	    !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
406 				      &v_blank_end, &hpos, &vpos) ||
407 	    (vpos < v_blank_start)) {
408 		/* Update to correct count and vblank timestamp if racing with
409 		 * vblank irq. This also updates to the correct vblank timestamp
410 		 * even in VRR mode, as scanout is past the front-porch atm.
411 		 */
412 		drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
413 
414 		/* Wake up userspace by sending the pageflip event with proper
415 		 * count and timestamp of vblank of flip completion.
416 		 */
417 		if (e) {
418 			drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
419 
420 			/* Event sent, so done with vblank for this flip */
421 			drm_crtc_vblank_put(&amdgpu_crtc->base);
422 		}
423 	} else if (e) {
424 		/* VRR active and inside front-porch: vblank count and
425 		 * timestamp for pageflip event will only be up to date after
426 		 * drm_crtc_handle_vblank() has been executed from late vblank
427 		 * irq handler after start of back-porch (vline 0). We queue the
428 		 * pageflip event for send-out by drm_crtc_handle_vblank() with
429 		 * updated timestamp and count, once it runs after us.
430 		 *
431 		 * We need to open-code this instead of using the helper
432 		 * drm_crtc_arm_vblank_event(), as that helper would
433 		 * call drm_crtc_accurate_vblank_count(), which we must
434 		 * not call in VRR mode while we are in front-porch!
435 		 */
436 
437 		/* sequence will be replaced by real count during send-out. */
438 		e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
439 		e->pipe = amdgpu_crtc->crtc_id;
440 
441 		list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
442 		e = NULL;
443 	}
444 
445 	/* Keep track of vblank of this flip for flip throttling. We use the
446 	 * cooked hw counter, as that one incremented at start of this vblank
447 	 * of pageflip completion, so last_flip_vblank is the forbidden count
448 	 * for queueing new pageflips if vsync + VRR is enabled.
449 	 */
450 	amdgpu_crtc->dm_irq_params.last_flip_vblank =
451 		amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
452 
453 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
454 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
455 
456 	DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
457 		     amdgpu_crtc->crtc_id, amdgpu_crtc,
458 		     vrr_active, (int) !e);
459 }
460 
461 static void dm_vupdate_high_irq(void *interrupt_params)
462 {
463 	struct common_irq_params *irq_params = interrupt_params;
464 	struct amdgpu_device *adev = irq_params->adev;
465 	struct amdgpu_crtc *acrtc;
466 	struct drm_device *drm_dev;
467 	struct drm_vblank_crtc *vblank;
468 	ktime_t frame_duration_ns, previous_timestamp;
469 	unsigned long flags;
470 	int vrr_active;
471 
472 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
473 
474 	if (acrtc) {
475 		vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
476 		drm_dev = acrtc->base.dev;
477 		vblank = &drm_dev->vblank[acrtc->base.index];
478 		previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
479 		frame_duration_ns = vblank->time - previous_timestamp;
480 
481 		if (frame_duration_ns > 0) {
482 			trace_amdgpu_refresh_rate_track(acrtc->base.index,
483 						frame_duration_ns,
484 						ktime_divns(NSEC_PER_SEC, frame_duration_ns));
485 			atomic64_set(&irq_params->previous_timestamp, vblank->time);
486 		}
487 
488 		DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
489 			      acrtc->crtc_id,
490 			      vrr_active);
491 
492 		/* Core vblank handling is done here after end of front-porch in
493 		 * vrr mode, as vblank timestamping will give valid results
494 		 * while now done after front-porch. This will also deliver
495 		 * page-flip completion events that have been queued to us
496 		 * if a pageflip happened inside front-porch.
497 		 */
498 		if (vrr_active) {
499 			drm_crtc_handle_vblank(&acrtc->base);
500 
501 			/* BTR processing for pre-DCE12 ASICs */
502 			if (acrtc->dm_irq_params.stream &&
503 			    adev->family < AMDGPU_FAMILY_AI) {
504 				spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
505 				mod_freesync_handle_v_update(
506 				    adev->dm.freesync_module,
507 				    acrtc->dm_irq_params.stream,
508 				    &acrtc->dm_irq_params.vrr_params);
509 
510 				dc_stream_adjust_vmin_vmax(
511 				    adev->dm.dc,
512 				    acrtc->dm_irq_params.stream,
513 				    &acrtc->dm_irq_params.vrr_params.adjust);
514 				spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
515 			}
516 		}
517 	}
518 }
519 
520 /**
521  * dm_crtc_high_irq() - Handles CRTC interrupt
522  * @interrupt_params: used for determining the CRTC instance
523  *
524  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
525  * event handler.
526  */
527 static void dm_crtc_high_irq(void *interrupt_params)
528 {
529 	struct common_irq_params *irq_params = interrupt_params;
530 	struct amdgpu_device *adev = irq_params->adev;
531 	struct amdgpu_crtc *acrtc;
532 	unsigned long flags;
533 	int vrr_active;
534 
535 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
536 	if (!acrtc)
537 		return;
538 
539 	vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
540 
541 	DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
542 		      vrr_active, acrtc->dm_irq_params.active_planes);
543 
544 	/**
545 	 * Core vblank handling at start of front-porch is only possible
546 	 * in non-vrr mode, as only there vblank timestamping will give
547 	 * valid results while done in front-porch. Otherwise defer it
548 	 * to dm_vupdate_high_irq after end of front-porch.
549 	 */
550 	if (!vrr_active)
551 		drm_crtc_handle_vblank(&acrtc->base);
552 
553 	/**
554 	 * Following stuff must happen at start of vblank, for crc
555 	 * computation and below-the-range btr support in vrr mode.
556 	 */
557 	amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
558 
559 	/* BTR updates need to happen before VUPDATE on Vega and above. */
560 	if (adev->family < AMDGPU_FAMILY_AI)
561 		return;
562 
563 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
564 
565 	if (acrtc->dm_irq_params.stream &&
566 	    acrtc->dm_irq_params.vrr_params.supported &&
567 	    acrtc->dm_irq_params.freesync_config.state ==
568 		    VRR_STATE_ACTIVE_VARIABLE) {
569 		mod_freesync_handle_v_update(adev->dm.freesync_module,
570 					     acrtc->dm_irq_params.stream,
571 					     &acrtc->dm_irq_params.vrr_params);
572 
573 		dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
574 					   &acrtc->dm_irq_params.vrr_params.adjust);
575 	}
576 
577 	/*
578 	 * If there aren't any active_planes then DCH HUBP may be clock-gated.
579 	 * In that case, pageflip completion interrupts won't fire and pageflip
580 	 * completion events won't get delivered. Prevent this by sending
581 	 * pending pageflip events from here if a flip is still pending.
582 	 *
583 	 * If any planes are enabled, use dm_pflip_high_irq() instead, to
584 	 * avoid race conditions between flip programming and completion,
585 	 * which could cause too early flip completion events.
586 	 */
587 	if (adev->family >= AMDGPU_FAMILY_RV &&
588 	    acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
589 	    acrtc->dm_irq_params.active_planes == 0) {
590 		if (acrtc->event) {
591 			drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
592 			acrtc->event = NULL;
593 			drm_crtc_vblank_put(&acrtc->base);
594 		}
595 		acrtc->pflip_status = AMDGPU_FLIP_NONE;
596 	}
597 
598 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
599 }
600 
601 #if defined(CONFIG_DRM_AMD_DC_DCN)
602 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
603 /**
604  * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
605  * DCN generation ASICs
606  * @interrupt_params: interrupt parameters
607  *
608  * Used to set crc window/read out crc value at vertical line 0 position
609  */
610 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
611 {
612 	struct common_irq_params *irq_params = interrupt_params;
613 	struct amdgpu_device *adev = irq_params->adev;
614 	struct amdgpu_crtc *acrtc;
615 
616 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
617 
618 	if (!acrtc)
619 		return;
620 
621 	amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
622 }
623 #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
624 
625 /**
626  * dmub_aux_setconfig_reply_callback - Callback for AUX or SET_CONFIG command.
627  * @adev: amdgpu_device pointer
628  * @notify: dmub notification structure
629  *
630  * Dmub AUX or SET_CONFIG command completion processing callback
631  * Copies dmub notification to DM which is to be read by AUX command.
632  * issuing thread and also signals the event to wake up the thread.
633  */
634 void dmub_aux_setconfig_callback(struct amdgpu_device *adev, struct dmub_notification *notify)
635 {
636 	if (adev->dm.dmub_notify)
637 		memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
638 	if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
639 		complete(&adev->dm.dmub_aux_transfer_done);
640 }
641 
642 /**
643  * dmub_hpd_callback - DMUB HPD interrupt processing callback.
644  * @adev: amdgpu_device pointer
645  * @notify: dmub notification structure
646  *
647  * Dmub Hpd interrupt processing callback. Gets displayindex through the
648  * ink index and calls helper to do the processing.
649  */
650 void dmub_hpd_callback(struct amdgpu_device *adev, struct dmub_notification *notify)
651 {
652 	struct amdgpu_dm_connector *aconnector;
653 	struct amdgpu_dm_connector *hpd_aconnector = NULL;
654 	struct drm_connector *connector;
655 	struct drm_connector_list_iter iter;
656 	struct dc_link *link;
657 	uint8_t link_index = 0;
658 	struct drm_device *dev = adev->dm.ddev;
659 
660 	if (adev == NULL)
661 		return;
662 
663 	if (notify == NULL) {
664 		DRM_ERROR("DMUB HPD callback notification was NULL");
665 		return;
666 	}
667 
668 	if (notify->link_index > adev->dm.dc->link_count) {
669 		DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
670 		return;
671 	}
672 
673 	link_index = notify->link_index;
674 	link = adev->dm.dc->links[link_index];
675 
676 	drm_connector_list_iter_begin(dev, &iter);
677 	drm_for_each_connector_iter(connector, &iter) {
678 		aconnector = to_amdgpu_dm_connector(connector);
679 		if (link && aconnector->dc_link == link) {
680 			DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
681 			hpd_aconnector = aconnector;
682 			break;
683 		}
684 	}
685 	drm_connector_list_iter_end(&iter);
686 
687 	if (hpd_aconnector) {
688 		if (notify->type == DMUB_NOTIFICATION_HPD)
689 			handle_hpd_irq_helper(hpd_aconnector);
690 		else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
691 			handle_hpd_rx_irq(hpd_aconnector);
692 	}
693 }
694 
695 /**
696  * register_dmub_notify_callback - Sets callback for DMUB notify
697  * @adev: amdgpu_device pointer
698  * @type: Type of dmub notification
699  * @callback: Dmub interrupt callback function
700  * @dmub_int_thread_offload: offload indicator
701  *
702  * API to register a dmub callback handler for a dmub notification
703  * Also sets indicator whether callback processing to be offloaded.
704  * to dmub interrupt handling thread
705  * Return: true if successfully registered, false if there is existing registration
706  */
707 bool register_dmub_notify_callback(struct amdgpu_device *adev, enum dmub_notification_type type,
708 dmub_notify_interrupt_callback_t callback, bool dmub_int_thread_offload)
709 {
710 	if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
711 		adev->dm.dmub_callback[type] = callback;
712 		adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
713 	} else
714 		return false;
715 
716 	return true;
717 }
718 
719 static void dm_handle_hpd_work(struct work_struct *work)
720 {
721 	struct dmub_hpd_work *dmub_hpd_wrk;
722 
723 	dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
724 
725 	if (!dmub_hpd_wrk->dmub_notify) {
726 		DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
727 		return;
728 	}
729 
730 	if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
731 		dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
732 		dmub_hpd_wrk->dmub_notify);
733 	}
734 
735 	kfree(dmub_hpd_wrk->dmub_notify);
736 	kfree(dmub_hpd_wrk);
737 
738 }
739 
740 #define DMUB_TRACE_MAX_READ 64
741 /**
742  * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
743  * @interrupt_params: used for determining the Outbox instance
744  *
745  * Handles the Outbox Interrupt
746  * event handler.
747  */
748 static void dm_dmub_outbox1_low_irq(void *interrupt_params)
749 {
750 	struct dmub_notification notify;
751 	struct common_irq_params *irq_params = interrupt_params;
752 	struct amdgpu_device *adev = irq_params->adev;
753 	struct amdgpu_display_manager *dm = &adev->dm;
754 	struct dmcub_trace_buf_entry entry = { 0 };
755 	uint32_t count = 0;
756 	struct dmub_hpd_work *dmub_hpd_wrk;
757 	struct dc_link *plink = NULL;
758 
759 	if (dc_enable_dmub_notifications(adev->dm.dc) &&
760 		irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
761 
762 		do {
763 			dc_stat_get_dmub_notification(adev->dm.dc, &notify);
764 			if (notify.type > ARRAY_SIZE(dm->dmub_thread_offload)) {
765 				DRM_ERROR("DM: notify type %d invalid!", notify.type);
766 				continue;
767 			}
768 			if (!dm->dmub_callback[notify.type]) {
769 				DRM_DEBUG_DRIVER("DMUB notification skipped, no handler: type=%d\n", notify.type);
770 				continue;
771 			}
772 			if (dm->dmub_thread_offload[notify.type] == true) {
773 				dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
774 				if (!dmub_hpd_wrk) {
775 					DRM_ERROR("Failed to allocate dmub_hpd_wrk");
776 					return;
777 				}
778 				dmub_hpd_wrk->dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_ATOMIC);
779 				if (!dmub_hpd_wrk->dmub_notify) {
780 					kfree(dmub_hpd_wrk);
781 					DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify");
782 					return;
783 				}
784 				INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
785 				if (dmub_hpd_wrk->dmub_notify)
786 					memcpy(dmub_hpd_wrk->dmub_notify, &notify, sizeof(struct dmub_notification));
787 				dmub_hpd_wrk->adev = adev;
788 				if (notify.type == DMUB_NOTIFICATION_HPD) {
789 					plink = adev->dm.dc->links[notify.link_index];
790 					if (plink) {
791 						plink->hpd_status =
792 							notify.hpd_status == DP_HPD_PLUG;
793 					}
794 				}
795 				queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
796 			} else {
797 				dm->dmub_callback[notify.type](adev, &notify);
798 			}
799 		} while (notify.pending_notification);
800 	}
801 
802 
803 	do {
804 		if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
805 			trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
806 							entry.param0, entry.param1);
807 
808 			DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
809 				 entry.trace_code, entry.tick_count, entry.param0, entry.param1);
810 		} else
811 			break;
812 
813 		count++;
814 
815 	} while (count <= DMUB_TRACE_MAX_READ);
816 
817 	if (count > DMUB_TRACE_MAX_READ)
818 		DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
819 }
820 #endif /* CONFIG_DRM_AMD_DC_DCN */
821 
822 static int dm_set_clockgating_state(void *handle,
823 		  enum amd_clockgating_state state)
824 {
825 	return 0;
826 }
827 
828 static int dm_set_powergating_state(void *handle,
829 		  enum amd_powergating_state state)
830 {
831 	return 0;
832 }
833 
834 /* Prototypes of private functions */
835 static int dm_early_init(void* handle);
836 
837 /* Allocate memory for FBC compressed data  */
838 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
839 {
840 	struct drm_device *dev = connector->dev;
841 	struct amdgpu_device *adev = drm_to_adev(dev);
842 	struct dm_compressor_info *compressor = &adev->dm.compressor;
843 	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
844 	struct drm_display_mode *mode;
845 	unsigned long max_size = 0;
846 
847 	if (adev->dm.dc->fbc_compressor == NULL)
848 		return;
849 
850 	if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
851 		return;
852 
853 	if (compressor->bo_ptr)
854 		return;
855 
856 
857 	list_for_each_entry(mode, &connector->modes, head) {
858 		if (max_size < mode->htotal * mode->vtotal)
859 			max_size = mode->htotal * mode->vtotal;
860 	}
861 
862 	if (max_size) {
863 		int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
864 			    AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
865 			    &compressor->gpu_addr, &compressor->cpu_addr);
866 
867 		if (r)
868 			DRM_ERROR("DM: Failed to initialize FBC\n");
869 		else {
870 			adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
871 			DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
872 		}
873 
874 	}
875 
876 }
877 
878 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
879 					  int pipe, bool *enabled,
880 					  unsigned char *buf, int max_bytes)
881 {
882 	struct drm_device *dev = dev_get_drvdata(kdev);
883 	struct amdgpu_device *adev = drm_to_adev(dev);
884 	struct drm_connector *connector;
885 	struct drm_connector_list_iter conn_iter;
886 	struct amdgpu_dm_connector *aconnector;
887 	int ret = 0;
888 
889 	*enabled = false;
890 
891 	mutex_lock(&adev->dm.audio_lock);
892 
893 	drm_connector_list_iter_begin(dev, &conn_iter);
894 	drm_for_each_connector_iter(connector, &conn_iter) {
895 		aconnector = to_amdgpu_dm_connector(connector);
896 		if (aconnector->audio_inst != port)
897 			continue;
898 
899 		*enabled = true;
900 		ret = drm_eld_size(connector->eld);
901 		memcpy(buf, connector->eld, min(max_bytes, ret));
902 
903 		break;
904 	}
905 	drm_connector_list_iter_end(&conn_iter);
906 
907 	mutex_unlock(&adev->dm.audio_lock);
908 
909 	DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
910 
911 	return ret;
912 }
913 
914 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
915 	.get_eld = amdgpu_dm_audio_component_get_eld,
916 };
917 
918 static int amdgpu_dm_audio_component_bind(struct device *kdev,
919 				       struct device *hda_kdev, void *data)
920 {
921 	struct drm_device *dev = dev_get_drvdata(kdev);
922 	struct amdgpu_device *adev = drm_to_adev(dev);
923 	struct drm_audio_component *acomp = data;
924 
925 	acomp->ops = &amdgpu_dm_audio_component_ops;
926 	acomp->dev = kdev;
927 	adev->dm.audio_component = acomp;
928 
929 	return 0;
930 }
931 
932 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
933 					  struct device *hda_kdev, void *data)
934 {
935 	struct drm_device *dev = dev_get_drvdata(kdev);
936 	struct amdgpu_device *adev = drm_to_adev(dev);
937 	struct drm_audio_component *acomp = data;
938 
939 	acomp->ops = NULL;
940 	acomp->dev = NULL;
941 	adev->dm.audio_component = NULL;
942 }
943 
944 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
945 	.bind	= amdgpu_dm_audio_component_bind,
946 	.unbind	= amdgpu_dm_audio_component_unbind,
947 };
948 
949 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
950 {
951 	int i, ret;
952 
953 	if (!amdgpu_audio)
954 		return 0;
955 
956 	adev->mode_info.audio.enabled = true;
957 
958 	adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
959 
960 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
961 		adev->mode_info.audio.pin[i].channels = -1;
962 		adev->mode_info.audio.pin[i].rate = -1;
963 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
964 		adev->mode_info.audio.pin[i].status_bits = 0;
965 		adev->mode_info.audio.pin[i].category_code = 0;
966 		adev->mode_info.audio.pin[i].connected = false;
967 		adev->mode_info.audio.pin[i].id =
968 			adev->dm.dc->res_pool->audios[i]->inst;
969 		adev->mode_info.audio.pin[i].offset = 0;
970 	}
971 
972 	ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
973 	if (ret < 0)
974 		return ret;
975 
976 	adev->dm.audio_registered = true;
977 
978 	return 0;
979 }
980 
981 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
982 {
983 	if (!amdgpu_audio)
984 		return;
985 
986 	if (!adev->mode_info.audio.enabled)
987 		return;
988 
989 	if (adev->dm.audio_registered) {
990 		component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
991 		adev->dm.audio_registered = false;
992 	}
993 
994 	/* TODO: Disable audio? */
995 
996 	adev->mode_info.audio.enabled = false;
997 }
998 
999 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
1000 {
1001 	struct drm_audio_component *acomp = adev->dm.audio_component;
1002 
1003 	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
1004 		DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
1005 
1006 		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
1007 						 pin, -1);
1008 	}
1009 }
1010 
1011 static int dm_dmub_hw_init(struct amdgpu_device *adev)
1012 {
1013 	const struct dmcub_firmware_header_v1_0 *hdr;
1014 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1015 	struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
1016 	const struct firmware *dmub_fw = adev->dm.dmub_fw;
1017 	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
1018 	struct abm *abm = adev->dm.dc->res_pool->abm;
1019 	struct dmub_srv_hw_params hw_params;
1020 	enum dmub_status status;
1021 	const unsigned char *fw_inst_const, *fw_bss_data;
1022 	uint32_t i, fw_inst_const_size, fw_bss_data_size;
1023 	bool has_hw_support;
1024 	struct dc *dc = adev->dm.dc;
1025 
1026 	if (!dmub_srv)
1027 		/* DMUB isn't supported on the ASIC. */
1028 		return 0;
1029 
1030 	if (!fb_info) {
1031 		DRM_ERROR("No framebuffer info for DMUB service.\n");
1032 		return -EINVAL;
1033 	}
1034 
1035 	if (!dmub_fw) {
1036 		/* Firmware required for DMUB support. */
1037 		DRM_ERROR("No firmware provided for DMUB.\n");
1038 		return -EINVAL;
1039 	}
1040 
1041 	status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
1042 	if (status != DMUB_STATUS_OK) {
1043 		DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
1044 		return -EINVAL;
1045 	}
1046 
1047 	if (!has_hw_support) {
1048 		DRM_INFO("DMUB unsupported on ASIC\n");
1049 		return 0;
1050 	}
1051 
1052 	hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
1053 
1054 	fw_inst_const = dmub_fw->data +
1055 			le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1056 			PSP_HEADER_BYTES;
1057 
1058 	fw_bss_data = dmub_fw->data +
1059 		      le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1060 		      le32_to_cpu(hdr->inst_const_bytes);
1061 
1062 	/* Copy firmware and bios info into FB memory. */
1063 	fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1064 			     PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1065 
1066 	fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1067 
1068 	/* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
1069 	 * amdgpu_ucode_init_single_fw will load dmub firmware
1070 	 * fw_inst_const part to cw0; otherwise, the firmware back door load
1071 	 * will be done by dm_dmub_hw_init
1072 	 */
1073 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1074 		memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
1075 				fw_inst_const_size);
1076 	}
1077 
1078 	if (fw_bss_data_size)
1079 		memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
1080 		       fw_bss_data, fw_bss_data_size);
1081 
1082 	/* Copy firmware bios info into FB memory. */
1083 	memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
1084 	       adev->bios_size);
1085 
1086 	/* Reset regions that need to be reset. */
1087 	memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
1088 	fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
1089 
1090 	memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
1091 	       fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
1092 
1093 	memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
1094 	       fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
1095 
1096 	/* Initialize hardware. */
1097 	memset(&hw_params, 0, sizeof(hw_params));
1098 	hw_params.fb_base = adev->gmc.fb_start;
1099 	hw_params.fb_offset = adev->gmc.aper_base;
1100 
1101 	/* backdoor load firmware and trigger dmub running */
1102 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
1103 		hw_params.load_inst_const = true;
1104 
1105 	if (dmcu)
1106 		hw_params.psp_version = dmcu->psp_version;
1107 
1108 	for (i = 0; i < fb_info->num_fb; ++i)
1109 		hw_params.fb[i] = &fb_info->fb[i];
1110 
1111 	switch (adev->asic_type) {
1112 	case CHIP_YELLOW_CARP:
1113 		if (dc->ctx->asic_id.hw_internal_rev != YELLOW_CARP_A0) {
1114 			hw_params.dpia_supported = true;
1115 #if defined(CONFIG_DRM_AMD_DC_DCN)
1116 			hw_params.disable_dpia = dc->debug.dpia_debug.bits.disable_dpia;
1117 #endif
1118 		}
1119 		break;
1120 	default:
1121 		break;
1122 	}
1123 
1124 	status = dmub_srv_hw_init(dmub_srv, &hw_params);
1125 	if (status != DMUB_STATUS_OK) {
1126 		DRM_ERROR("Error initializing DMUB HW: %d\n", status);
1127 		return -EINVAL;
1128 	}
1129 
1130 	/* Wait for firmware load to finish. */
1131 	status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1132 	if (status != DMUB_STATUS_OK)
1133 		DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1134 
1135 	/* Init DMCU and ABM if available. */
1136 	if (dmcu && abm) {
1137 		dmcu->funcs->dmcu_init(dmcu);
1138 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1139 	}
1140 
1141 	if (!adev->dm.dc->ctx->dmub_srv)
1142 		adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
1143 	if (!adev->dm.dc->ctx->dmub_srv) {
1144 		DRM_ERROR("Couldn't allocate DC DMUB server!\n");
1145 		return -ENOMEM;
1146 	}
1147 
1148 	DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
1149 		 adev->dm.dmcub_fw_version);
1150 
1151 	return 0;
1152 }
1153 
1154 #if defined(CONFIG_DRM_AMD_DC_DCN)
1155 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
1156 {
1157 	uint64_t pt_base;
1158 	uint32_t logical_addr_low;
1159 	uint32_t logical_addr_high;
1160 	uint32_t agp_base, agp_bot, agp_top;
1161 	PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1162 
1163 	memset(pa_config, 0, sizeof(*pa_config));
1164 
1165 	logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1166 	pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1167 
1168 	if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1169 		/*
1170 		 * Raven2 has a HW issue that it is unable to use the vram which
1171 		 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1172 		 * workaround that increase system aperture high address (add 1)
1173 		 * to get rid of the VM fault and hardware hang.
1174 		 */
1175 		logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1176 	else
1177 		logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1178 
1179 	agp_base = 0;
1180 	agp_bot = adev->gmc.agp_start >> 24;
1181 	agp_top = adev->gmc.agp_end >> 24;
1182 
1183 
1184 	page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1185 	page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1186 	page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1187 	page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1188 	page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1189 	page_table_base.low_part = lower_32_bits(pt_base);
1190 
1191 	pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1192 	pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1193 
1194 	pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1195 	pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1196 	pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1197 
1198 	pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1199 	pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1200 	pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1201 
1202 	pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1203 	pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1204 	pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1205 
1206 	pa_config->is_hvm_enabled = 0;
1207 
1208 }
1209 #endif
1210 #if defined(CONFIG_DRM_AMD_DC_DCN)
1211 static void vblank_control_worker(struct work_struct *work)
1212 {
1213 	struct vblank_control_work *vblank_work =
1214 		container_of(work, struct vblank_control_work, work);
1215 	struct amdgpu_display_manager *dm = vblank_work->dm;
1216 
1217 	mutex_lock(&dm->dc_lock);
1218 
1219 	if (vblank_work->enable)
1220 		dm->active_vblank_irq_count++;
1221 	else if(dm->active_vblank_irq_count)
1222 		dm->active_vblank_irq_count--;
1223 
1224 	dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1225 
1226 	DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1227 
1228 	/* Control PSR based on vblank requirements from OS */
1229 	if (vblank_work->stream && vblank_work->stream->link) {
1230 		if (vblank_work->enable) {
1231 			if (vblank_work->stream->link->psr_settings.psr_allow_active)
1232 				amdgpu_dm_psr_disable(vblank_work->stream);
1233 		} else if (vblank_work->stream->link->psr_settings.psr_feature_enabled &&
1234 			   !vblank_work->stream->link->psr_settings.psr_allow_active &&
1235 			   vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
1236 			amdgpu_dm_psr_enable(vblank_work->stream);
1237 		}
1238 	}
1239 
1240 	mutex_unlock(&dm->dc_lock);
1241 
1242 	dc_stream_release(vblank_work->stream);
1243 
1244 	kfree(vblank_work);
1245 }
1246 
1247 #endif
1248 
1249 static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
1250 {
1251 	struct hpd_rx_irq_offload_work *offload_work;
1252 	struct amdgpu_dm_connector *aconnector;
1253 	struct dc_link *dc_link;
1254 	struct amdgpu_device *adev;
1255 	enum dc_connection_type new_connection_type = dc_connection_none;
1256 	unsigned long flags;
1257 
1258 	offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
1259 	aconnector = offload_work->offload_wq->aconnector;
1260 
1261 	if (!aconnector) {
1262 		DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
1263 		goto skip;
1264 	}
1265 
1266 	adev = drm_to_adev(aconnector->base.dev);
1267 	dc_link = aconnector->dc_link;
1268 
1269 	mutex_lock(&aconnector->hpd_lock);
1270 	if (!dc_link_detect_sink(dc_link, &new_connection_type))
1271 		DRM_ERROR("KMS: Failed to detect connector\n");
1272 	mutex_unlock(&aconnector->hpd_lock);
1273 
1274 	if (new_connection_type == dc_connection_none)
1275 		goto skip;
1276 
1277 	if (amdgpu_in_reset(adev))
1278 		goto skip;
1279 
1280 	mutex_lock(&adev->dm.dc_lock);
1281 	if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST)
1282 		dc_link_dp_handle_automated_test(dc_link);
1283 	else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
1284 			hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) &&
1285 			dc_link_dp_allow_hpd_rx_irq(dc_link)) {
1286 		dc_link_dp_handle_link_loss(dc_link);
1287 		spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
1288 		offload_work->offload_wq->is_handling_link_loss = false;
1289 		spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
1290 	}
1291 	mutex_unlock(&adev->dm.dc_lock);
1292 
1293 skip:
1294 	kfree(offload_work);
1295 
1296 }
1297 
1298 static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
1299 {
1300 	int max_caps = dc->caps.max_links;
1301 	int i = 0;
1302 	struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
1303 
1304 	hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
1305 
1306 	if (!hpd_rx_offload_wq)
1307 		return NULL;
1308 
1309 
1310 	for (i = 0; i < max_caps; i++) {
1311 		hpd_rx_offload_wq[i].wq =
1312 				    create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
1313 
1314 		if (hpd_rx_offload_wq[i].wq == NULL) {
1315 			DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
1316 			return NULL;
1317 		}
1318 
1319 		spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
1320 	}
1321 
1322 	return hpd_rx_offload_wq;
1323 }
1324 
1325 struct amdgpu_stutter_quirk {
1326 	u16 chip_vendor;
1327 	u16 chip_device;
1328 	u16 subsys_vendor;
1329 	u16 subsys_device;
1330 	u8 revision;
1331 };
1332 
1333 static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = {
1334 	/* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */
1335 	{ 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
1336 	{ 0, 0, 0, 0, 0 },
1337 };
1338 
1339 static bool dm_should_disable_stutter(struct pci_dev *pdev)
1340 {
1341 	const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list;
1342 
1343 	while (p && p->chip_device != 0) {
1344 		if (pdev->vendor == p->chip_vendor &&
1345 		    pdev->device == p->chip_device &&
1346 		    pdev->subsystem_vendor == p->subsys_vendor &&
1347 		    pdev->subsystem_device == p->subsys_device &&
1348 		    pdev->revision == p->revision) {
1349 			return true;
1350 		}
1351 		++p;
1352 	}
1353 	return false;
1354 }
1355 
1356 static int amdgpu_dm_init(struct amdgpu_device *adev)
1357 {
1358 	struct dc_init_data init_data;
1359 #ifdef CONFIG_DRM_AMD_DC_HDCP
1360 	struct dc_callback_init init_params;
1361 #endif
1362 	int r;
1363 
1364 	adev->dm.ddev = adev_to_drm(adev);
1365 	adev->dm.adev = adev;
1366 
1367 	/* Zero all the fields */
1368 	memset(&init_data, 0, sizeof(init_data));
1369 #ifdef CONFIG_DRM_AMD_DC_HDCP
1370 	memset(&init_params, 0, sizeof(init_params));
1371 #endif
1372 
1373 	mutex_init(&adev->dm.dc_lock);
1374 	mutex_init(&adev->dm.audio_lock);
1375 #if defined(CONFIG_DRM_AMD_DC_DCN)
1376 	spin_lock_init(&adev->dm.vblank_lock);
1377 #endif
1378 
1379 	if(amdgpu_dm_irq_init(adev)) {
1380 		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1381 		goto error;
1382 	}
1383 
1384 	init_data.asic_id.chip_family = adev->family;
1385 
1386 	init_data.asic_id.pci_revision_id = adev->pdev->revision;
1387 	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1388 	init_data.asic_id.chip_id = adev->pdev->device;
1389 
1390 	init_data.asic_id.vram_width = adev->gmc.vram_width;
1391 	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
1392 	init_data.asic_id.atombios_base_address =
1393 		adev->mode_info.atom_context->bios;
1394 
1395 	init_data.driver = adev;
1396 
1397 	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1398 
1399 	if (!adev->dm.cgs_device) {
1400 		DRM_ERROR("amdgpu: failed to create cgs device.\n");
1401 		goto error;
1402 	}
1403 
1404 	init_data.cgs_device = adev->dm.cgs_device;
1405 
1406 	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1407 
1408 	switch (adev->asic_type) {
1409 	case CHIP_CARRIZO:
1410 	case CHIP_STONEY:
1411 		init_data.flags.gpu_vm_support = true;
1412 		break;
1413 	default:
1414 		switch (adev->ip_versions[DCE_HWIP][0]) {
1415 		case IP_VERSION(2, 1, 0):
1416 			init_data.flags.gpu_vm_support = true;
1417 			switch (adev->dm.dmcub_fw_version) {
1418 			case 0: /* development */
1419 			case 0x1: /* linux-firmware.git hash 6d9f399 */
1420 			case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */
1421 				init_data.flags.disable_dmcu = false;
1422 				break;
1423 			default:
1424 				init_data.flags.disable_dmcu = true;
1425 			}
1426 			break;
1427 		case IP_VERSION(1, 0, 0):
1428 		case IP_VERSION(1, 0, 1):
1429 		case IP_VERSION(3, 0, 1):
1430 		case IP_VERSION(3, 1, 2):
1431 		case IP_VERSION(3, 1, 3):
1432 			init_data.flags.gpu_vm_support = true;
1433 			break;
1434 		case IP_VERSION(2, 0, 3):
1435 			init_data.flags.disable_dmcu = true;
1436 			break;
1437 		default:
1438 			break;
1439 		}
1440 		break;
1441 	}
1442 
1443 	if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1444 		init_data.flags.fbc_support = true;
1445 
1446 	if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1447 		init_data.flags.multi_mon_pp_mclk_switch = true;
1448 
1449 	if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1450 		init_data.flags.disable_fractional_pwm = true;
1451 
1452 	if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
1453 		init_data.flags.edp_no_power_sequencing = true;
1454 
1455 	init_data.flags.power_down_display_on_boot = true;
1456 
1457 	INIT_LIST_HEAD(&adev->dm.da_list);
1458 	/* Display Core create. */
1459 	adev->dm.dc = dc_create(&init_data);
1460 
1461 	if (adev->dm.dc) {
1462 		DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1463 	} else {
1464 		DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1465 		goto error;
1466 	}
1467 
1468 	if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1469 		adev->dm.dc->debug.force_single_disp_pipe_split = false;
1470 		adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1471 	}
1472 
1473 	if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1474 		adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1475 	if (dm_should_disable_stutter(adev->pdev))
1476 		adev->dm.dc->debug.disable_stutter = true;
1477 
1478 	if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1479 		adev->dm.dc->debug.disable_stutter = true;
1480 
1481 	if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) {
1482 		adev->dm.dc->debug.disable_dsc = true;
1483 		adev->dm.dc->debug.disable_dsc_edp = true;
1484 	}
1485 
1486 	if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1487 		adev->dm.dc->debug.disable_clock_gate = true;
1488 
1489 	r = dm_dmub_hw_init(adev);
1490 	if (r) {
1491 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1492 		goto error;
1493 	}
1494 
1495 	dc_hardware_init(adev->dm.dc);
1496 
1497 	adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
1498 	if (!adev->dm.hpd_rx_offload_wq) {
1499 		DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
1500 		goto error;
1501 	}
1502 
1503 #if defined(CONFIG_DRM_AMD_DC_DCN)
1504 	if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
1505 		struct dc_phy_addr_space_config pa_config;
1506 
1507 		mmhub_read_system_context(adev, &pa_config);
1508 
1509 		// Call the DC init_memory func
1510 		dc_setup_system_context(adev->dm.dc, &pa_config);
1511 	}
1512 #endif
1513 
1514 	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1515 	if (!adev->dm.freesync_module) {
1516 		DRM_ERROR(
1517 		"amdgpu: failed to initialize freesync_module.\n");
1518 	} else
1519 		DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1520 				adev->dm.freesync_module);
1521 
1522 	amdgpu_dm_init_color_mod();
1523 
1524 #if defined(CONFIG_DRM_AMD_DC_DCN)
1525 	if (adev->dm.dc->caps.max_links > 0) {
1526 		adev->dm.vblank_control_workqueue =
1527 			create_singlethread_workqueue("dm_vblank_control_workqueue");
1528 		if (!adev->dm.vblank_control_workqueue)
1529 			DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1530 	}
1531 #endif
1532 
1533 #ifdef CONFIG_DRM_AMD_DC_HDCP
1534 	if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
1535 		adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1536 
1537 		if (!adev->dm.hdcp_workqueue)
1538 			DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1539 		else
1540 			DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1541 
1542 		dc_init_callbacks(adev->dm.dc, &init_params);
1543 	}
1544 #endif
1545 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1546 	adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1547 #endif
1548 	if (dc_enable_dmub_notifications(adev->dm.dc)) {
1549 		init_completion(&adev->dm.dmub_aux_transfer_done);
1550 		adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1551 		if (!adev->dm.dmub_notify) {
1552 			DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1553 			goto error;
1554 		}
1555 
1556 		adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
1557 		if (!adev->dm.delayed_hpd_wq) {
1558 			DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
1559 			goto error;
1560 		}
1561 
1562 		amdgpu_dm_outbox_init(adev);
1563 #if defined(CONFIG_DRM_AMD_DC_DCN)
1564 		if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
1565 			dmub_aux_setconfig_callback, false)) {
1566 			DRM_ERROR("amdgpu: fail to register dmub aux callback");
1567 			goto error;
1568 		}
1569 		if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
1570 			DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1571 			goto error;
1572 		}
1573 		if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) {
1574 			DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1575 			goto error;
1576 		}
1577 #endif /* CONFIG_DRM_AMD_DC_DCN */
1578 	}
1579 
1580 	if (amdgpu_dm_initialize_drm_device(adev)) {
1581 		DRM_ERROR(
1582 		"amdgpu: failed to initialize sw for display support.\n");
1583 		goto error;
1584 	}
1585 
1586 	/* create fake encoders for MST */
1587 	dm_dp_create_fake_mst_encoders(adev);
1588 
1589 	/* TODO: Add_display_info? */
1590 
1591 	/* TODO use dynamic cursor width */
1592 	adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1593 	adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1594 
1595 	/* Disable vblank IRQs aggressively for power-saving */
1596 	adev_to_drm(adev)->vblank_disable_immediate = true;
1597 
1598 	if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1599 		DRM_ERROR(
1600 		"amdgpu: failed to initialize sw for display support.\n");
1601 		goto error;
1602 	}
1603 
1604 
1605 	DRM_DEBUG_DRIVER("KMS initialized.\n");
1606 
1607 	return 0;
1608 error:
1609 	amdgpu_dm_fini(adev);
1610 
1611 	return -EINVAL;
1612 }
1613 
1614 static int amdgpu_dm_early_fini(void *handle)
1615 {
1616 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1617 
1618 	amdgpu_dm_audio_fini(adev);
1619 
1620 	return 0;
1621 }
1622 
1623 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1624 {
1625 	int i;
1626 
1627 #if defined(CONFIG_DRM_AMD_DC_DCN)
1628 	if (adev->dm.vblank_control_workqueue) {
1629 		destroy_workqueue(adev->dm.vblank_control_workqueue);
1630 		adev->dm.vblank_control_workqueue = NULL;
1631 	}
1632 #endif
1633 
1634 	for (i = 0; i < adev->dm.display_indexes_num; i++) {
1635 		drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1636 	}
1637 
1638 	amdgpu_dm_destroy_drm_device(&adev->dm);
1639 
1640 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1641 	if (adev->dm.crc_rd_wrk) {
1642 		flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1643 		kfree(adev->dm.crc_rd_wrk);
1644 		adev->dm.crc_rd_wrk = NULL;
1645 	}
1646 #endif
1647 #ifdef CONFIG_DRM_AMD_DC_HDCP
1648 	if (adev->dm.hdcp_workqueue) {
1649 		hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1650 		adev->dm.hdcp_workqueue = NULL;
1651 	}
1652 
1653 	if (adev->dm.dc)
1654 		dc_deinit_callbacks(adev->dm.dc);
1655 #endif
1656 
1657 	dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1658 
1659 	if (dc_enable_dmub_notifications(adev->dm.dc)) {
1660 		kfree(adev->dm.dmub_notify);
1661 		adev->dm.dmub_notify = NULL;
1662 		destroy_workqueue(adev->dm.delayed_hpd_wq);
1663 		adev->dm.delayed_hpd_wq = NULL;
1664 	}
1665 
1666 	if (adev->dm.dmub_bo)
1667 		amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1668 				      &adev->dm.dmub_bo_gpu_addr,
1669 				      &adev->dm.dmub_bo_cpu_addr);
1670 
1671 	if (adev->dm.hpd_rx_offload_wq) {
1672 		for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
1673 			if (adev->dm.hpd_rx_offload_wq[i].wq) {
1674 				destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
1675 				adev->dm.hpd_rx_offload_wq[i].wq = NULL;
1676 			}
1677 		}
1678 
1679 		kfree(adev->dm.hpd_rx_offload_wq);
1680 		adev->dm.hpd_rx_offload_wq = NULL;
1681 	}
1682 
1683 	/* DC Destroy TODO: Replace destroy DAL */
1684 	if (adev->dm.dc)
1685 		dc_destroy(&adev->dm.dc);
1686 	/*
1687 	 * TODO: pageflip, vlank interrupt
1688 	 *
1689 	 * amdgpu_dm_irq_fini(adev);
1690 	 */
1691 
1692 	if (adev->dm.cgs_device) {
1693 		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1694 		adev->dm.cgs_device = NULL;
1695 	}
1696 	if (adev->dm.freesync_module) {
1697 		mod_freesync_destroy(adev->dm.freesync_module);
1698 		adev->dm.freesync_module = NULL;
1699 	}
1700 
1701 	mutex_destroy(&adev->dm.audio_lock);
1702 	mutex_destroy(&adev->dm.dc_lock);
1703 
1704 	return;
1705 }
1706 
1707 static int load_dmcu_fw(struct amdgpu_device *adev)
1708 {
1709 	const char *fw_name_dmcu = NULL;
1710 	int r;
1711 	const struct dmcu_firmware_header_v1_0 *hdr;
1712 
1713 	switch(adev->asic_type) {
1714 #if defined(CONFIG_DRM_AMD_DC_SI)
1715 	case CHIP_TAHITI:
1716 	case CHIP_PITCAIRN:
1717 	case CHIP_VERDE:
1718 	case CHIP_OLAND:
1719 #endif
1720 	case CHIP_BONAIRE:
1721 	case CHIP_HAWAII:
1722 	case CHIP_KAVERI:
1723 	case CHIP_KABINI:
1724 	case CHIP_MULLINS:
1725 	case CHIP_TONGA:
1726 	case CHIP_FIJI:
1727 	case CHIP_CARRIZO:
1728 	case CHIP_STONEY:
1729 	case CHIP_POLARIS11:
1730 	case CHIP_POLARIS10:
1731 	case CHIP_POLARIS12:
1732 	case CHIP_VEGAM:
1733 	case CHIP_VEGA10:
1734 	case CHIP_VEGA12:
1735 	case CHIP_VEGA20:
1736 		return 0;
1737 	case CHIP_NAVI12:
1738 		fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1739 		break;
1740 	case CHIP_RAVEN:
1741 		if (ASICREV_IS_PICASSO(adev->external_rev_id))
1742 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1743 		else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1744 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1745 		else
1746 			return 0;
1747 		break;
1748 	default:
1749 		switch (adev->ip_versions[DCE_HWIP][0]) {
1750 		case IP_VERSION(2, 0, 2):
1751 		case IP_VERSION(2, 0, 3):
1752 		case IP_VERSION(2, 0, 0):
1753 		case IP_VERSION(2, 1, 0):
1754 		case IP_VERSION(3, 0, 0):
1755 		case IP_VERSION(3, 0, 2):
1756 		case IP_VERSION(3, 0, 3):
1757 		case IP_VERSION(3, 0, 1):
1758 		case IP_VERSION(3, 1, 2):
1759 		case IP_VERSION(3, 1, 3):
1760 			return 0;
1761 		default:
1762 			break;
1763 		}
1764 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1765 		return -EINVAL;
1766 	}
1767 
1768 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1769 		DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1770 		return 0;
1771 	}
1772 
1773 	r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1774 	if (r == -ENOENT) {
1775 		/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1776 		DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1777 		adev->dm.fw_dmcu = NULL;
1778 		return 0;
1779 	}
1780 	if (r) {
1781 		dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1782 			fw_name_dmcu);
1783 		return r;
1784 	}
1785 
1786 	r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1787 	if (r) {
1788 		dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1789 			fw_name_dmcu);
1790 		release_firmware(adev->dm.fw_dmcu);
1791 		adev->dm.fw_dmcu = NULL;
1792 		return r;
1793 	}
1794 
1795 	hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1796 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1797 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1798 	adev->firmware.fw_size +=
1799 		ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1800 
1801 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1802 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1803 	adev->firmware.fw_size +=
1804 		ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1805 
1806 	adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1807 
1808 	DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1809 
1810 	return 0;
1811 }
1812 
1813 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1814 {
1815 	struct amdgpu_device *adev = ctx;
1816 
1817 	return dm_read_reg(adev->dm.dc->ctx, address);
1818 }
1819 
1820 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1821 				     uint32_t value)
1822 {
1823 	struct amdgpu_device *adev = ctx;
1824 
1825 	return dm_write_reg(adev->dm.dc->ctx, address, value);
1826 }
1827 
1828 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1829 {
1830 	struct dmub_srv_create_params create_params;
1831 	struct dmub_srv_region_params region_params;
1832 	struct dmub_srv_region_info region_info;
1833 	struct dmub_srv_fb_params fb_params;
1834 	struct dmub_srv_fb_info *fb_info;
1835 	struct dmub_srv *dmub_srv;
1836 	const struct dmcub_firmware_header_v1_0 *hdr;
1837 	const char *fw_name_dmub;
1838 	enum dmub_asic dmub_asic;
1839 	enum dmub_status status;
1840 	int r;
1841 
1842 	switch (adev->ip_versions[DCE_HWIP][0]) {
1843 	case IP_VERSION(2, 1, 0):
1844 		dmub_asic = DMUB_ASIC_DCN21;
1845 		fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1846 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1847 			fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1848 		break;
1849 	case IP_VERSION(3, 0, 0):
1850 		if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) {
1851 			dmub_asic = DMUB_ASIC_DCN30;
1852 			fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1853 		} else {
1854 			dmub_asic = DMUB_ASIC_DCN30;
1855 			fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1856 		}
1857 		break;
1858 	case IP_VERSION(3, 0, 1):
1859 		dmub_asic = DMUB_ASIC_DCN301;
1860 		fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1861 		break;
1862 	case IP_VERSION(3, 0, 2):
1863 		dmub_asic = DMUB_ASIC_DCN302;
1864 		fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1865 		break;
1866 	case IP_VERSION(3, 0, 3):
1867 		dmub_asic = DMUB_ASIC_DCN303;
1868 		fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1869 		break;
1870 	case IP_VERSION(3, 1, 2):
1871 	case IP_VERSION(3, 1, 3):
1872 		dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31;
1873 		fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
1874 		break;
1875 
1876 	default:
1877 		/* ASIC doesn't support DMUB. */
1878 		return 0;
1879 	}
1880 
1881 	r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1882 	if (r) {
1883 		DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1884 		return 0;
1885 	}
1886 
1887 	r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1888 	if (r) {
1889 		DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1890 		return 0;
1891 	}
1892 
1893 	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1894 	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1895 
1896 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1897 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1898 			AMDGPU_UCODE_ID_DMCUB;
1899 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1900 			adev->dm.dmub_fw;
1901 		adev->firmware.fw_size +=
1902 			ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1903 
1904 		DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1905 			 adev->dm.dmcub_fw_version);
1906 	}
1907 
1908 
1909 	adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1910 	dmub_srv = adev->dm.dmub_srv;
1911 
1912 	if (!dmub_srv) {
1913 		DRM_ERROR("Failed to allocate DMUB service!\n");
1914 		return -ENOMEM;
1915 	}
1916 
1917 	memset(&create_params, 0, sizeof(create_params));
1918 	create_params.user_ctx = adev;
1919 	create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1920 	create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1921 	create_params.asic = dmub_asic;
1922 
1923 	/* Create the DMUB service. */
1924 	status = dmub_srv_create(dmub_srv, &create_params);
1925 	if (status != DMUB_STATUS_OK) {
1926 		DRM_ERROR("Error creating DMUB service: %d\n", status);
1927 		return -EINVAL;
1928 	}
1929 
1930 	/* Calculate the size of all the regions for the DMUB service. */
1931 	memset(&region_params, 0, sizeof(region_params));
1932 
1933 	region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1934 					PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1935 	region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1936 	region_params.vbios_size = adev->bios_size;
1937 	region_params.fw_bss_data = region_params.bss_data_size ?
1938 		adev->dm.dmub_fw->data +
1939 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1940 		le32_to_cpu(hdr->inst_const_bytes) : NULL;
1941 	region_params.fw_inst_const =
1942 		adev->dm.dmub_fw->data +
1943 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1944 		PSP_HEADER_BYTES;
1945 
1946 	status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1947 					   &region_info);
1948 
1949 	if (status != DMUB_STATUS_OK) {
1950 		DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1951 		return -EINVAL;
1952 	}
1953 
1954 	/*
1955 	 * Allocate a framebuffer based on the total size of all the regions.
1956 	 * TODO: Move this into GART.
1957 	 */
1958 	r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1959 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1960 				    &adev->dm.dmub_bo_gpu_addr,
1961 				    &adev->dm.dmub_bo_cpu_addr);
1962 	if (r)
1963 		return r;
1964 
1965 	/* Rebase the regions on the framebuffer address. */
1966 	memset(&fb_params, 0, sizeof(fb_params));
1967 	fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1968 	fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1969 	fb_params.region_info = &region_info;
1970 
1971 	adev->dm.dmub_fb_info =
1972 		kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1973 	fb_info = adev->dm.dmub_fb_info;
1974 
1975 	if (!fb_info) {
1976 		DRM_ERROR(
1977 			"Failed to allocate framebuffer info for DMUB service!\n");
1978 		return -ENOMEM;
1979 	}
1980 
1981 	status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1982 	if (status != DMUB_STATUS_OK) {
1983 		DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1984 		return -EINVAL;
1985 	}
1986 
1987 	return 0;
1988 }
1989 
1990 static int dm_sw_init(void *handle)
1991 {
1992 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1993 	int r;
1994 
1995 	r = dm_dmub_sw_init(adev);
1996 	if (r)
1997 		return r;
1998 
1999 	return load_dmcu_fw(adev);
2000 }
2001 
2002 static int dm_sw_fini(void *handle)
2003 {
2004 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2005 
2006 	kfree(adev->dm.dmub_fb_info);
2007 	adev->dm.dmub_fb_info = NULL;
2008 
2009 	if (adev->dm.dmub_srv) {
2010 		dmub_srv_destroy(adev->dm.dmub_srv);
2011 		adev->dm.dmub_srv = NULL;
2012 	}
2013 
2014 	release_firmware(adev->dm.dmub_fw);
2015 	adev->dm.dmub_fw = NULL;
2016 
2017 	release_firmware(adev->dm.fw_dmcu);
2018 	adev->dm.fw_dmcu = NULL;
2019 
2020 	return 0;
2021 }
2022 
2023 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
2024 {
2025 	struct amdgpu_dm_connector *aconnector;
2026 	struct drm_connector *connector;
2027 	struct drm_connector_list_iter iter;
2028 	int ret = 0;
2029 
2030 	drm_connector_list_iter_begin(dev, &iter);
2031 	drm_for_each_connector_iter(connector, &iter) {
2032 		aconnector = to_amdgpu_dm_connector(connector);
2033 		if (aconnector->dc_link->type == dc_connection_mst_branch &&
2034 		    aconnector->mst_mgr.aux) {
2035 			DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
2036 					 aconnector,
2037 					 aconnector->base.base.id);
2038 
2039 			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
2040 			if (ret < 0) {
2041 				DRM_ERROR("DM_MST: Failed to start MST\n");
2042 				aconnector->dc_link->type =
2043 					dc_connection_single;
2044 				break;
2045 			}
2046 		}
2047 	}
2048 	drm_connector_list_iter_end(&iter);
2049 
2050 	return ret;
2051 }
2052 
2053 static int dm_late_init(void *handle)
2054 {
2055 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2056 
2057 	struct dmcu_iram_parameters params;
2058 	unsigned int linear_lut[16];
2059 	int i;
2060 	struct dmcu *dmcu = NULL;
2061 
2062 	dmcu = adev->dm.dc->res_pool->dmcu;
2063 
2064 	for (i = 0; i < 16; i++)
2065 		linear_lut[i] = 0xFFFF * i / 15;
2066 
2067 	params.set = 0;
2068 	params.backlight_ramping_override = false;
2069 	params.backlight_ramping_start = 0xCCCC;
2070 	params.backlight_ramping_reduction = 0xCCCCCCCC;
2071 	params.backlight_lut_array_size = 16;
2072 	params.backlight_lut_array = linear_lut;
2073 
2074 	/* Min backlight level after ABM reduction,  Don't allow below 1%
2075 	 * 0xFFFF x 0.01 = 0x28F
2076 	 */
2077 	params.min_abm_backlight = 0x28F;
2078 	/* In the case where abm is implemented on dmcub,
2079 	* dmcu object will be null.
2080 	* ABM 2.4 and up are implemented on dmcub.
2081 	*/
2082 	if (dmcu) {
2083 		if (!dmcu_load_iram(dmcu, params))
2084 			return -EINVAL;
2085 	} else if (adev->dm.dc->ctx->dmub_srv) {
2086 		struct dc_link *edp_links[MAX_NUM_EDP];
2087 		int edp_num;
2088 
2089 		get_edp_links(adev->dm.dc, edp_links, &edp_num);
2090 		for (i = 0; i < edp_num; i++) {
2091 			if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
2092 				return -EINVAL;
2093 		}
2094 	}
2095 
2096 	return detect_mst_link_for_all_connectors(adev_to_drm(adev));
2097 }
2098 
2099 static void s3_handle_mst(struct drm_device *dev, bool suspend)
2100 {
2101 	struct amdgpu_dm_connector *aconnector;
2102 	struct drm_connector *connector;
2103 	struct drm_connector_list_iter iter;
2104 	struct drm_dp_mst_topology_mgr *mgr;
2105 	int ret;
2106 	bool need_hotplug = false;
2107 
2108 	drm_connector_list_iter_begin(dev, &iter);
2109 	drm_for_each_connector_iter(connector, &iter) {
2110 		aconnector = to_amdgpu_dm_connector(connector);
2111 		if (aconnector->dc_link->type != dc_connection_mst_branch ||
2112 		    aconnector->mst_port)
2113 			continue;
2114 
2115 		mgr = &aconnector->mst_mgr;
2116 
2117 		if (suspend) {
2118 			drm_dp_mst_topology_mgr_suspend(mgr);
2119 		} else {
2120 			ret = drm_dp_mst_topology_mgr_resume(mgr, true);
2121 			if (ret < 0) {
2122 				drm_dp_mst_topology_mgr_set_mst(mgr, false);
2123 				need_hotplug = true;
2124 			}
2125 		}
2126 	}
2127 	drm_connector_list_iter_end(&iter);
2128 
2129 	if (need_hotplug)
2130 		drm_kms_helper_hotplug_event(dev);
2131 }
2132 
2133 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
2134 {
2135 	struct smu_context *smu = &adev->smu;
2136 	int ret = 0;
2137 
2138 	if (!is_support_sw_smu(adev))
2139 		return 0;
2140 
2141 	/* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
2142 	 * on window driver dc implementation.
2143 	 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
2144 	 * should be passed to smu during boot up and resume from s3.
2145 	 * boot up: dc calculate dcn watermark clock settings within dc_create,
2146 	 * dcn20_resource_construct
2147 	 * then call pplib functions below to pass the settings to smu:
2148 	 * smu_set_watermarks_for_clock_ranges
2149 	 * smu_set_watermarks_table
2150 	 * navi10_set_watermarks_table
2151 	 * smu_write_watermarks_table
2152 	 *
2153 	 * For Renoir, clock settings of dcn watermark are also fixed values.
2154 	 * dc has implemented different flow for window driver:
2155 	 * dc_hardware_init / dc_set_power_state
2156 	 * dcn10_init_hw
2157 	 * notify_wm_ranges
2158 	 * set_wm_ranges
2159 	 * -- Linux
2160 	 * smu_set_watermarks_for_clock_ranges
2161 	 * renoir_set_watermarks_table
2162 	 * smu_write_watermarks_table
2163 	 *
2164 	 * For Linux,
2165 	 * dc_hardware_init -> amdgpu_dm_init
2166 	 * dc_set_power_state --> dm_resume
2167 	 *
2168 	 * therefore, this function apply to navi10/12/14 but not Renoir
2169 	 * *
2170 	 */
2171 	switch (adev->ip_versions[DCE_HWIP][0]) {
2172 	case IP_VERSION(2, 0, 2):
2173 	case IP_VERSION(2, 0, 0):
2174 		break;
2175 	default:
2176 		return 0;
2177 	}
2178 
2179 	ret = smu_write_watermarks_table(smu);
2180 	if (ret) {
2181 		DRM_ERROR("Failed to update WMTABLE!\n");
2182 		return ret;
2183 	}
2184 
2185 	return 0;
2186 }
2187 
2188 /**
2189  * dm_hw_init() - Initialize DC device
2190  * @handle: The base driver device containing the amdgpu_dm device.
2191  *
2192  * Initialize the &struct amdgpu_display_manager device. This involves calling
2193  * the initializers of each DM component, then populating the struct with them.
2194  *
2195  * Although the function implies hardware initialization, both hardware and
2196  * software are initialized here. Splitting them out to their relevant init
2197  * hooks is a future TODO item.
2198  *
2199  * Some notable things that are initialized here:
2200  *
2201  * - Display Core, both software and hardware
2202  * - DC modules that we need (freesync and color management)
2203  * - DRM software states
2204  * - Interrupt sources and handlers
2205  * - Vblank support
2206  * - Debug FS entries, if enabled
2207  */
2208 static int dm_hw_init(void *handle)
2209 {
2210 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2211 	/* Create DAL display manager */
2212 	amdgpu_dm_init(adev);
2213 	amdgpu_dm_hpd_init(adev);
2214 
2215 	return 0;
2216 }
2217 
2218 /**
2219  * dm_hw_fini() - Teardown DC device
2220  * @handle: The base driver device containing the amdgpu_dm device.
2221  *
2222  * Teardown components within &struct amdgpu_display_manager that require
2223  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
2224  * were loaded. Also flush IRQ workqueues and disable them.
2225  */
2226 static int dm_hw_fini(void *handle)
2227 {
2228 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2229 
2230 	amdgpu_dm_hpd_fini(adev);
2231 
2232 	amdgpu_dm_irq_fini(adev);
2233 	amdgpu_dm_fini(adev);
2234 	return 0;
2235 }
2236 
2237 
2238 static int dm_enable_vblank(struct drm_crtc *crtc);
2239 static void dm_disable_vblank(struct drm_crtc *crtc);
2240 
2241 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
2242 				 struct dc_state *state, bool enable)
2243 {
2244 	enum dc_irq_source irq_source;
2245 	struct amdgpu_crtc *acrtc;
2246 	int rc = -EBUSY;
2247 	int i = 0;
2248 
2249 	for (i = 0; i < state->stream_count; i++) {
2250 		acrtc = get_crtc_by_otg_inst(
2251 				adev, state->stream_status[i].primary_otg_inst);
2252 
2253 		if (acrtc && state->stream_status[i].plane_count != 0) {
2254 			irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
2255 			rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
2256 			DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
2257 				      acrtc->crtc_id, enable ? "en" : "dis", rc);
2258 			if (rc)
2259 				DRM_WARN("Failed to %s pflip interrupts\n",
2260 					 enable ? "enable" : "disable");
2261 
2262 			if (enable) {
2263 				rc = dm_enable_vblank(&acrtc->base);
2264 				if (rc)
2265 					DRM_WARN("Failed to enable vblank interrupts\n");
2266 			} else {
2267 				dm_disable_vblank(&acrtc->base);
2268 			}
2269 
2270 		}
2271 	}
2272 
2273 }
2274 
2275 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
2276 {
2277 	struct dc_state *context = NULL;
2278 	enum dc_status res = DC_ERROR_UNEXPECTED;
2279 	int i;
2280 	struct dc_stream_state *del_streams[MAX_PIPES];
2281 	int del_streams_count = 0;
2282 
2283 	memset(del_streams, 0, sizeof(del_streams));
2284 
2285 	context = dc_create_state(dc);
2286 	if (context == NULL)
2287 		goto context_alloc_fail;
2288 
2289 	dc_resource_state_copy_construct_current(dc, context);
2290 
2291 	/* First remove from context all streams */
2292 	for (i = 0; i < context->stream_count; i++) {
2293 		struct dc_stream_state *stream = context->streams[i];
2294 
2295 		del_streams[del_streams_count++] = stream;
2296 	}
2297 
2298 	/* Remove all planes for removed streams and then remove the streams */
2299 	for (i = 0; i < del_streams_count; i++) {
2300 		if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
2301 			res = DC_FAIL_DETACH_SURFACES;
2302 			goto fail;
2303 		}
2304 
2305 		res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
2306 		if (res != DC_OK)
2307 			goto fail;
2308 	}
2309 
2310 
2311 	res = dc_validate_global_state(dc, context, false);
2312 
2313 	if (res != DC_OK) {
2314 		DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
2315 		goto fail;
2316 	}
2317 
2318 	res = dc_commit_state(dc, context);
2319 
2320 fail:
2321 	dc_release_state(context);
2322 
2323 context_alloc_fail:
2324 	return res;
2325 }
2326 
2327 static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
2328 {
2329 	int i;
2330 
2331 	if (dm->hpd_rx_offload_wq) {
2332 		for (i = 0; i < dm->dc->caps.max_links; i++)
2333 			flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
2334 	}
2335 }
2336 
2337 static int dm_suspend(void *handle)
2338 {
2339 	struct amdgpu_device *adev = handle;
2340 	struct amdgpu_display_manager *dm = &adev->dm;
2341 	int ret = 0;
2342 
2343 	if (amdgpu_in_reset(adev)) {
2344 		mutex_lock(&dm->dc_lock);
2345 
2346 #if defined(CONFIG_DRM_AMD_DC_DCN)
2347 		dc_allow_idle_optimizations(adev->dm.dc, false);
2348 #endif
2349 
2350 		dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
2351 
2352 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
2353 
2354 		amdgpu_dm_commit_zero_streams(dm->dc);
2355 
2356 		amdgpu_dm_irq_suspend(adev);
2357 
2358 		hpd_rx_irq_work_suspend(dm);
2359 
2360 		return ret;
2361 	}
2362 
2363 	WARN_ON(adev->dm.cached_state);
2364 	adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
2365 
2366 	s3_handle_mst(adev_to_drm(adev), true);
2367 
2368 	amdgpu_dm_irq_suspend(adev);
2369 
2370 	hpd_rx_irq_work_suspend(dm);
2371 
2372 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
2373 
2374 	return 0;
2375 }
2376 
2377 static struct amdgpu_dm_connector *
2378 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2379 					     struct drm_crtc *crtc)
2380 {
2381 	uint32_t i;
2382 	struct drm_connector_state *new_con_state;
2383 	struct drm_connector *connector;
2384 	struct drm_crtc *crtc_from_state;
2385 
2386 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
2387 		crtc_from_state = new_con_state->crtc;
2388 
2389 		if (crtc_from_state == crtc)
2390 			return to_amdgpu_dm_connector(connector);
2391 	}
2392 
2393 	return NULL;
2394 }
2395 
2396 static void emulated_link_detect(struct dc_link *link)
2397 {
2398 	struct dc_sink_init_data sink_init_data = { 0 };
2399 	struct display_sink_capability sink_caps = { 0 };
2400 	enum dc_edid_status edid_status;
2401 	struct dc_context *dc_ctx = link->ctx;
2402 	struct dc_sink *sink = NULL;
2403 	struct dc_sink *prev_sink = NULL;
2404 
2405 	link->type = dc_connection_none;
2406 	prev_sink = link->local_sink;
2407 
2408 	if (prev_sink)
2409 		dc_sink_release(prev_sink);
2410 
2411 	switch (link->connector_signal) {
2412 	case SIGNAL_TYPE_HDMI_TYPE_A: {
2413 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2414 		sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2415 		break;
2416 	}
2417 
2418 	case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2419 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2420 		sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2421 		break;
2422 	}
2423 
2424 	case SIGNAL_TYPE_DVI_DUAL_LINK: {
2425 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2426 		sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2427 		break;
2428 	}
2429 
2430 	case SIGNAL_TYPE_LVDS: {
2431 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2432 		sink_caps.signal = SIGNAL_TYPE_LVDS;
2433 		break;
2434 	}
2435 
2436 	case SIGNAL_TYPE_EDP: {
2437 		sink_caps.transaction_type =
2438 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2439 		sink_caps.signal = SIGNAL_TYPE_EDP;
2440 		break;
2441 	}
2442 
2443 	case SIGNAL_TYPE_DISPLAY_PORT: {
2444 		sink_caps.transaction_type =
2445 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2446 		sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2447 		break;
2448 	}
2449 
2450 	default:
2451 		DC_ERROR("Invalid connector type! signal:%d\n",
2452 			link->connector_signal);
2453 		return;
2454 	}
2455 
2456 	sink_init_data.link = link;
2457 	sink_init_data.sink_signal = sink_caps.signal;
2458 
2459 	sink = dc_sink_create(&sink_init_data);
2460 	if (!sink) {
2461 		DC_ERROR("Failed to create sink!\n");
2462 		return;
2463 	}
2464 
2465 	/* dc_sink_create returns a new reference */
2466 	link->local_sink = sink;
2467 
2468 	edid_status = dm_helpers_read_local_edid(
2469 			link->ctx,
2470 			link,
2471 			sink);
2472 
2473 	if (edid_status != EDID_OK)
2474 		DC_ERROR("Failed to read EDID");
2475 
2476 }
2477 
2478 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2479 				     struct amdgpu_display_manager *dm)
2480 {
2481 	struct {
2482 		struct dc_surface_update surface_updates[MAX_SURFACES];
2483 		struct dc_plane_info plane_infos[MAX_SURFACES];
2484 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
2485 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2486 		struct dc_stream_update stream_update;
2487 	} * bundle;
2488 	int k, m;
2489 
2490 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2491 
2492 	if (!bundle) {
2493 		dm_error("Failed to allocate update bundle\n");
2494 		goto cleanup;
2495 	}
2496 
2497 	for (k = 0; k < dc_state->stream_count; k++) {
2498 		bundle->stream_update.stream = dc_state->streams[k];
2499 
2500 		for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2501 			bundle->surface_updates[m].surface =
2502 				dc_state->stream_status->plane_states[m];
2503 			bundle->surface_updates[m].surface->force_full_update =
2504 				true;
2505 		}
2506 		dc_commit_updates_for_stream(
2507 			dm->dc, bundle->surface_updates,
2508 			dc_state->stream_status->plane_count,
2509 			dc_state->streams[k], &bundle->stream_update, dc_state);
2510 	}
2511 
2512 cleanup:
2513 	kfree(bundle);
2514 
2515 	return;
2516 }
2517 
2518 static void dm_set_dpms_off(struct dc_link *link, struct dm_crtc_state *acrtc_state)
2519 {
2520 	struct dc_stream_state *stream_state;
2521 	struct amdgpu_dm_connector *aconnector = link->priv;
2522 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2523 	struct dc_stream_update stream_update;
2524 	bool dpms_off = true;
2525 
2526 	memset(&stream_update, 0, sizeof(stream_update));
2527 	stream_update.dpms_off = &dpms_off;
2528 
2529 	mutex_lock(&adev->dm.dc_lock);
2530 	stream_state = dc_stream_find_from_link(link);
2531 
2532 	if (stream_state == NULL) {
2533 		DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2534 		mutex_unlock(&adev->dm.dc_lock);
2535 		return;
2536 	}
2537 
2538 	stream_update.stream = stream_state;
2539 	acrtc_state->force_dpms_off = true;
2540 	dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2541 				     stream_state, &stream_update,
2542 				     stream_state->ctx->dc->current_state);
2543 	mutex_unlock(&adev->dm.dc_lock);
2544 }
2545 
2546 static int dm_resume(void *handle)
2547 {
2548 	struct amdgpu_device *adev = handle;
2549 	struct drm_device *ddev = adev_to_drm(adev);
2550 	struct amdgpu_display_manager *dm = &adev->dm;
2551 	struct amdgpu_dm_connector *aconnector;
2552 	struct drm_connector *connector;
2553 	struct drm_connector_list_iter iter;
2554 	struct drm_crtc *crtc;
2555 	struct drm_crtc_state *new_crtc_state;
2556 	struct dm_crtc_state *dm_new_crtc_state;
2557 	struct drm_plane *plane;
2558 	struct drm_plane_state *new_plane_state;
2559 	struct dm_plane_state *dm_new_plane_state;
2560 	struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2561 	enum dc_connection_type new_connection_type = dc_connection_none;
2562 	struct dc_state *dc_state;
2563 	int i, r, j;
2564 
2565 	if (amdgpu_in_reset(adev)) {
2566 		dc_state = dm->cached_dc_state;
2567 
2568 		r = dm_dmub_hw_init(adev);
2569 		if (r)
2570 			DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2571 
2572 		dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2573 		dc_resume(dm->dc);
2574 
2575 		amdgpu_dm_irq_resume_early(adev);
2576 
2577 		for (i = 0; i < dc_state->stream_count; i++) {
2578 			dc_state->streams[i]->mode_changed = true;
2579 			for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2580 				dc_state->stream_status->plane_states[j]->update_flags.raw
2581 					= 0xffffffff;
2582 			}
2583 		}
2584 #if defined(CONFIG_DRM_AMD_DC_DCN)
2585 		/*
2586 		 * Resource allocation happens for link encoders for newer ASIC in
2587 		 * dc_validate_global_state, so we need to revalidate it.
2588 		 *
2589 		 * This shouldn't fail (it passed once before), so warn if it does.
2590 		 */
2591 		WARN_ON(dc_validate_global_state(dm->dc, dc_state, false) != DC_OK);
2592 #endif
2593 
2594 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
2595 
2596 		dm_gpureset_commit_state(dm->cached_dc_state, dm);
2597 
2598 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2599 
2600 		dc_release_state(dm->cached_dc_state);
2601 		dm->cached_dc_state = NULL;
2602 
2603 		amdgpu_dm_irq_resume_late(adev);
2604 
2605 		mutex_unlock(&dm->dc_lock);
2606 
2607 		return 0;
2608 	}
2609 	/* Recreate dc_state - DC invalidates it when setting power state to S3. */
2610 	dc_release_state(dm_state->context);
2611 	dm_state->context = dc_create_state(dm->dc);
2612 	/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2613 	dc_resource_state_construct(dm->dc, dm_state->context);
2614 
2615 	/* Before powering on DC we need to re-initialize DMUB. */
2616 	r = dm_dmub_hw_init(adev);
2617 	if (r)
2618 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2619 
2620 	/* power on hardware */
2621 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2622 
2623 	/* program HPD filter */
2624 	dc_resume(dm->dc);
2625 
2626 	/*
2627 	 * early enable HPD Rx IRQ, should be done before set mode as short
2628 	 * pulse interrupts are used for MST
2629 	 */
2630 	amdgpu_dm_irq_resume_early(adev);
2631 
2632 	/* On resume we need to rewrite the MSTM control bits to enable MST*/
2633 	s3_handle_mst(ddev, false);
2634 
2635 	/* Do detection*/
2636 	drm_connector_list_iter_begin(ddev, &iter);
2637 	drm_for_each_connector_iter(connector, &iter) {
2638 		aconnector = to_amdgpu_dm_connector(connector);
2639 
2640 		/*
2641 		 * this is the case when traversing through already created
2642 		 * MST connectors, should be skipped
2643 		 */
2644 		if (aconnector->mst_port)
2645 			continue;
2646 
2647 		mutex_lock(&aconnector->hpd_lock);
2648 		if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2649 			DRM_ERROR("KMS: Failed to detect connector\n");
2650 
2651 		if (aconnector->base.force && new_connection_type == dc_connection_none)
2652 			emulated_link_detect(aconnector->dc_link);
2653 		else
2654 			dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2655 
2656 		if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2657 			aconnector->fake_enable = false;
2658 
2659 		if (aconnector->dc_sink)
2660 			dc_sink_release(aconnector->dc_sink);
2661 		aconnector->dc_sink = NULL;
2662 		amdgpu_dm_update_connector_after_detect(aconnector);
2663 		mutex_unlock(&aconnector->hpd_lock);
2664 	}
2665 	drm_connector_list_iter_end(&iter);
2666 
2667 	/* Force mode set in atomic commit */
2668 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2669 		new_crtc_state->active_changed = true;
2670 
2671 	/*
2672 	 * atomic_check is expected to create the dc states. We need to release
2673 	 * them here, since they were duplicated as part of the suspend
2674 	 * procedure.
2675 	 */
2676 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2677 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2678 		if (dm_new_crtc_state->stream) {
2679 			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2680 			dc_stream_release(dm_new_crtc_state->stream);
2681 			dm_new_crtc_state->stream = NULL;
2682 		}
2683 	}
2684 
2685 	for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2686 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
2687 		if (dm_new_plane_state->dc_state) {
2688 			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2689 			dc_plane_state_release(dm_new_plane_state->dc_state);
2690 			dm_new_plane_state->dc_state = NULL;
2691 		}
2692 	}
2693 
2694 	drm_atomic_helper_resume(ddev, dm->cached_state);
2695 
2696 	dm->cached_state = NULL;
2697 
2698 	amdgpu_dm_irq_resume_late(adev);
2699 
2700 	amdgpu_dm_smu_write_watermarks_table(adev);
2701 
2702 	return 0;
2703 }
2704 
2705 /**
2706  * DOC: DM Lifecycle
2707  *
2708  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2709  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2710  * the base driver's device list to be initialized and torn down accordingly.
2711  *
2712  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2713  */
2714 
2715 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2716 	.name = "dm",
2717 	.early_init = dm_early_init,
2718 	.late_init = dm_late_init,
2719 	.sw_init = dm_sw_init,
2720 	.sw_fini = dm_sw_fini,
2721 	.early_fini = amdgpu_dm_early_fini,
2722 	.hw_init = dm_hw_init,
2723 	.hw_fini = dm_hw_fini,
2724 	.suspend = dm_suspend,
2725 	.resume = dm_resume,
2726 	.is_idle = dm_is_idle,
2727 	.wait_for_idle = dm_wait_for_idle,
2728 	.check_soft_reset = dm_check_soft_reset,
2729 	.soft_reset = dm_soft_reset,
2730 	.set_clockgating_state = dm_set_clockgating_state,
2731 	.set_powergating_state = dm_set_powergating_state,
2732 };
2733 
2734 const struct amdgpu_ip_block_version dm_ip_block =
2735 {
2736 	.type = AMD_IP_BLOCK_TYPE_DCE,
2737 	.major = 1,
2738 	.minor = 0,
2739 	.rev = 0,
2740 	.funcs = &amdgpu_dm_funcs,
2741 };
2742 
2743 
2744 /**
2745  * DOC: atomic
2746  *
2747  * *WIP*
2748  */
2749 
2750 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2751 	.fb_create = amdgpu_display_user_framebuffer_create,
2752 	.get_format_info = amd_get_format_info,
2753 	.output_poll_changed = drm_fb_helper_output_poll_changed,
2754 	.atomic_check = amdgpu_dm_atomic_check,
2755 	.atomic_commit = drm_atomic_helper_commit,
2756 };
2757 
2758 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2759 	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2760 };
2761 
2762 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2763 {
2764 	u32 max_cll, min_cll, max, min, q, r;
2765 	struct amdgpu_dm_backlight_caps *caps;
2766 	struct amdgpu_display_manager *dm;
2767 	struct drm_connector *conn_base;
2768 	struct amdgpu_device *adev;
2769 	struct dc_link *link = NULL;
2770 	static const u8 pre_computed_values[] = {
2771 		50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2772 		71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2773 	int i;
2774 
2775 	if (!aconnector || !aconnector->dc_link)
2776 		return;
2777 
2778 	link = aconnector->dc_link;
2779 	if (link->connector_signal != SIGNAL_TYPE_EDP)
2780 		return;
2781 
2782 	conn_base = &aconnector->base;
2783 	adev = drm_to_adev(conn_base->dev);
2784 	dm = &adev->dm;
2785 	for (i = 0; i < dm->num_of_edps; i++) {
2786 		if (link == dm->backlight_link[i])
2787 			break;
2788 	}
2789 	if (i >= dm->num_of_edps)
2790 		return;
2791 	caps = &dm->backlight_caps[i];
2792 	caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2793 	caps->aux_support = false;
2794 	max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2795 	min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2796 
2797 	if (caps->ext_caps->bits.oled == 1 /*||
2798 	    caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2799 	    caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
2800 		caps->aux_support = true;
2801 
2802 	if (amdgpu_backlight == 0)
2803 		caps->aux_support = false;
2804 	else if (amdgpu_backlight == 1)
2805 		caps->aux_support = true;
2806 
2807 	/* From the specification (CTA-861-G), for calculating the maximum
2808 	 * luminance we need to use:
2809 	 *	Luminance = 50*2**(CV/32)
2810 	 * Where CV is a one-byte value.
2811 	 * For calculating this expression we may need float point precision;
2812 	 * to avoid this complexity level, we take advantage that CV is divided
2813 	 * by a constant. From the Euclids division algorithm, we know that CV
2814 	 * can be written as: CV = 32*q + r. Next, we replace CV in the
2815 	 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2816 	 * need to pre-compute the value of r/32. For pre-computing the values
2817 	 * We just used the following Ruby line:
2818 	 *	(0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2819 	 * The results of the above expressions can be verified at
2820 	 * pre_computed_values.
2821 	 */
2822 	q = max_cll >> 5;
2823 	r = max_cll % 32;
2824 	max = (1 << q) * pre_computed_values[r];
2825 
2826 	// min luminance: maxLum * (CV/255)^2 / 100
2827 	q = DIV_ROUND_CLOSEST(min_cll, 255);
2828 	min = max * DIV_ROUND_CLOSEST((q * q), 100);
2829 
2830 	caps->aux_max_input_signal = max;
2831 	caps->aux_min_input_signal = min;
2832 }
2833 
2834 void amdgpu_dm_update_connector_after_detect(
2835 		struct amdgpu_dm_connector *aconnector)
2836 {
2837 	struct drm_connector *connector = &aconnector->base;
2838 	struct drm_device *dev = connector->dev;
2839 	struct dc_sink *sink;
2840 
2841 	/* MST handled by drm_mst framework */
2842 	if (aconnector->mst_mgr.mst_state == true)
2843 		return;
2844 
2845 	sink = aconnector->dc_link->local_sink;
2846 	if (sink)
2847 		dc_sink_retain(sink);
2848 
2849 	/*
2850 	 * Edid mgmt connector gets first update only in mode_valid hook and then
2851 	 * the connector sink is set to either fake or physical sink depends on link status.
2852 	 * Skip if already done during boot.
2853 	 */
2854 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2855 			&& aconnector->dc_em_sink) {
2856 
2857 		/*
2858 		 * For S3 resume with headless use eml_sink to fake stream
2859 		 * because on resume connector->sink is set to NULL
2860 		 */
2861 		mutex_lock(&dev->mode_config.mutex);
2862 
2863 		if (sink) {
2864 			if (aconnector->dc_sink) {
2865 				amdgpu_dm_update_freesync_caps(connector, NULL);
2866 				/*
2867 				 * retain and release below are used to
2868 				 * bump up refcount for sink because the link doesn't point
2869 				 * to it anymore after disconnect, so on next crtc to connector
2870 				 * reshuffle by UMD we will get into unwanted dc_sink release
2871 				 */
2872 				dc_sink_release(aconnector->dc_sink);
2873 			}
2874 			aconnector->dc_sink = sink;
2875 			dc_sink_retain(aconnector->dc_sink);
2876 			amdgpu_dm_update_freesync_caps(connector,
2877 					aconnector->edid);
2878 		} else {
2879 			amdgpu_dm_update_freesync_caps(connector, NULL);
2880 			if (!aconnector->dc_sink) {
2881 				aconnector->dc_sink = aconnector->dc_em_sink;
2882 				dc_sink_retain(aconnector->dc_sink);
2883 			}
2884 		}
2885 
2886 		mutex_unlock(&dev->mode_config.mutex);
2887 
2888 		if (sink)
2889 			dc_sink_release(sink);
2890 		return;
2891 	}
2892 
2893 	/*
2894 	 * TODO: temporary guard to look for proper fix
2895 	 * if this sink is MST sink, we should not do anything
2896 	 */
2897 	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2898 		dc_sink_release(sink);
2899 		return;
2900 	}
2901 
2902 	if (aconnector->dc_sink == sink) {
2903 		/*
2904 		 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2905 		 * Do nothing!!
2906 		 */
2907 		DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2908 				aconnector->connector_id);
2909 		if (sink)
2910 			dc_sink_release(sink);
2911 		return;
2912 	}
2913 
2914 	DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2915 		aconnector->connector_id, aconnector->dc_sink, sink);
2916 
2917 	mutex_lock(&dev->mode_config.mutex);
2918 
2919 	/*
2920 	 * 1. Update status of the drm connector
2921 	 * 2. Send an event and let userspace tell us what to do
2922 	 */
2923 	if (sink) {
2924 		/*
2925 		 * TODO: check if we still need the S3 mode update workaround.
2926 		 * If yes, put it here.
2927 		 */
2928 		if (aconnector->dc_sink) {
2929 			amdgpu_dm_update_freesync_caps(connector, NULL);
2930 			dc_sink_release(aconnector->dc_sink);
2931 		}
2932 
2933 		aconnector->dc_sink = sink;
2934 		dc_sink_retain(aconnector->dc_sink);
2935 		if (sink->dc_edid.length == 0) {
2936 			aconnector->edid = NULL;
2937 			if (aconnector->dc_link->aux_mode) {
2938 				drm_dp_cec_unset_edid(
2939 					&aconnector->dm_dp_aux.aux);
2940 			}
2941 		} else {
2942 			aconnector->edid =
2943 				(struct edid *)sink->dc_edid.raw_edid;
2944 
2945 			drm_connector_update_edid_property(connector,
2946 							   aconnector->edid);
2947 			if (aconnector->dc_link->aux_mode)
2948 				drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2949 						    aconnector->edid);
2950 		}
2951 
2952 		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2953 		update_connector_ext_caps(aconnector);
2954 	} else {
2955 		drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2956 		amdgpu_dm_update_freesync_caps(connector, NULL);
2957 		drm_connector_update_edid_property(connector, NULL);
2958 		aconnector->num_modes = 0;
2959 		dc_sink_release(aconnector->dc_sink);
2960 		aconnector->dc_sink = NULL;
2961 		aconnector->edid = NULL;
2962 #ifdef CONFIG_DRM_AMD_DC_HDCP
2963 		/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2964 		if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2965 			connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2966 #endif
2967 	}
2968 
2969 	mutex_unlock(&dev->mode_config.mutex);
2970 
2971 	update_subconnector_property(aconnector);
2972 
2973 	if (sink)
2974 		dc_sink_release(sink);
2975 }
2976 
2977 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
2978 {
2979 	struct drm_connector *connector = &aconnector->base;
2980 	struct drm_device *dev = connector->dev;
2981 	enum dc_connection_type new_connection_type = dc_connection_none;
2982 	struct amdgpu_device *adev = drm_to_adev(dev);
2983 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
2984 	struct dm_crtc_state *dm_crtc_state = NULL;
2985 
2986 	if (adev->dm.disable_hpd_irq)
2987 		return;
2988 
2989 	if (dm_con_state->base.state && dm_con_state->base.crtc)
2990 		dm_crtc_state = to_dm_crtc_state(drm_atomic_get_crtc_state(
2991 					dm_con_state->base.state,
2992 					dm_con_state->base.crtc));
2993 	/*
2994 	 * In case of failure or MST no need to update connector status or notify the OS
2995 	 * since (for MST case) MST does this in its own context.
2996 	 */
2997 	mutex_lock(&aconnector->hpd_lock);
2998 
2999 #ifdef CONFIG_DRM_AMD_DC_HDCP
3000 	if (adev->dm.hdcp_workqueue) {
3001 		hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
3002 		dm_con_state->update_hdcp = true;
3003 	}
3004 #endif
3005 	if (aconnector->fake_enable)
3006 		aconnector->fake_enable = false;
3007 
3008 	if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
3009 		DRM_ERROR("KMS: Failed to detect connector\n");
3010 
3011 	if (aconnector->base.force && new_connection_type == dc_connection_none) {
3012 		emulated_link_detect(aconnector->dc_link);
3013 
3014 		drm_modeset_lock_all(dev);
3015 		dm_restore_drm_connector_state(dev, connector);
3016 		drm_modeset_unlock_all(dev);
3017 
3018 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3019 			drm_kms_helper_hotplug_event(dev);
3020 
3021 	} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
3022 		if (new_connection_type == dc_connection_none &&
3023 		    aconnector->dc_link->type == dc_connection_none &&
3024 		    dm_crtc_state)
3025 			dm_set_dpms_off(aconnector->dc_link, dm_crtc_state);
3026 
3027 		amdgpu_dm_update_connector_after_detect(aconnector);
3028 
3029 		drm_modeset_lock_all(dev);
3030 		dm_restore_drm_connector_state(dev, connector);
3031 		drm_modeset_unlock_all(dev);
3032 
3033 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3034 			drm_kms_helper_hotplug_event(dev);
3035 	}
3036 	mutex_unlock(&aconnector->hpd_lock);
3037 
3038 }
3039 
3040 static void handle_hpd_irq(void *param)
3041 {
3042 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3043 
3044 	handle_hpd_irq_helper(aconnector);
3045 
3046 }
3047 
3048 static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
3049 {
3050 	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
3051 	uint8_t dret;
3052 	bool new_irq_handled = false;
3053 	int dpcd_addr;
3054 	int dpcd_bytes_to_read;
3055 
3056 	const int max_process_count = 30;
3057 	int process_count = 0;
3058 
3059 	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
3060 
3061 	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
3062 		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
3063 		/* DPCD 0x200 - 0x201 for downstream IRQ */
3064 		dpcd_addr = DP_SINK_COUNT;
3065 	} else {
3066 		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
3067 		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
3068 		dpcd_addr = DP_SINK_COUNT_ESI;
3069 	}
3070 
3071 	dret = drm_dp_dpcd_read(
3072 		&aconnector->dm_dp_aux.aux,
3073 		dpcd_addr,
3074 		esi,
3075 		dpcd_bytes_to_read);
3076 
3077 	while (dret == dpcd_bytes_to_read &&
3078 		process_count < max_process_count) {
3079 		uint8_t retry;
3080 		dret = 0;
3081 
3082 		process_count++;
3083 
3084 		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
3085 		/* handle HPD short pulse irq */
3086 		if (aconnector->mst_mgr.mst_state)
3087 			drm_dp_mst_hpd_irq(
3088 				&aconnector->mst_mgr,
3089 				esi,
3090 				&new_irq_handled);
3091 
3092 		if (new_irq_handled) {
3093 			/* ACK at DPCD to notify down stream */
3094 			const int ack_dpcd_bytes_to_write =
3095 				dpcd_bytes_to_read - 1;
3096 
3097 			for (retry = 0; retry < 3; retry++) {
3098 				uint8_t wret;
3099 
3100 				wret = drm_dp_dpcd_write(
3101 					&aconnector->dm_dp_aux.aux,
3102 					dpcd_addr + 1,
3103 					&esi[1],
3104 					ack_dpcd_bytes_to_write);
3105 				if (wret == ack_dpcd_bytes_to_write)
3106 					break;
3107 			}
3108 
3109 			/* check if there is new irq to be handled */
3110 			dret = drm_dp_dpcd_read(
3111 				&aconnector->dm_dp_aux.aux,
3112 				dpcd_addr,
3113 				esi,
3114 				dpcd_bytes_to_read);
3115 
3116 			new_irq_handled = false;
3117 		} else {
3118 			break;
3119 		}
3120 	}
3121 
3122 	if (process_count == max_process_count)
3123 		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
3124 }
3125 
3126 static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
3127 							union hpd_irq_data hpd_irq_data)
3128 {
3129 	struct hpd_rx_irq_offload_work *offload_work =
3130 				kzalloc(sizeof(*offload_work), GFP_KERNEL);
3131 
3132 	if (!offload_work) {
3133 		DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
3134 		return;
3135 	}
3136 
3137 	INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
3138 	offload_work->data = hpd_irq_data;
3139 	offload_work->offload_wq = offload_wq;
3140 
3141 	queue_work(offload_wq->wq, &offload_work->work);
3142 	DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
3143 }
3144 
3145 static void handle_hpd_rx_irq(void *param)
3146 {
3147 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3148 	struct drm_connector *connector = &aconnector->base;
3149 	struct drm_device *dev = connector->dev;
3150 	struct dc_link *dc_link = aconnector->dc_link;
3151 	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
3152 	bool result = false;
3153 	enum dc_connection_type new_connection_type = dc_connection_none;
3154 	struct amdgpu_device *adev = drm_to_adev(dev);
3155 	union hpd_irq_data hpd_irq_data;
3156 	bool link_loss = false;
3157 	bool has_left_work = false;
3158 	int idx = aconnector->base.index;
3159 	struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
3160 
3161 	memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
3162 
3163 	if (adev->dm.disable_hpd_irq)
3164 		return;
3165 
3166 	/*
3167 	 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
3168 	 * conflict, after implement i2c helper, this mutex should be
3169 	 * retired.
3170 	 */
3171 	mutex_lock(&aconnector->hpd_lock);
3172 
3173 	result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
3174 						&link_loss, true, &has_left_work);
3175 
3176 	if (!has_left_work)
3177 		goto out;
3178 
3179 	if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
3180 		schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3181 		goto out;
3182 	}
3183 
3184 	if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
3185 		if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
3186 			hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
3187 			dm_handle_mst_sideband_msg(aconnector);
3188 			goto out;
3189 		}
3190 
3191 		if (link_loss) {
3192 			bool skip = false;
3193 
3194 			spin_lock(&offload_wq->offload_lock);
3195 			skip = offload_wq->is_handling_link_loss;
3196 
3197 			if (!skip)
3198 				offload_wq->is_handling_link_loss = true;
3199 
3200 			spin_unlock(&offload_wq->offload_lock);
3201 
3202 			if (!skip)
3203 				schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3204 
3205 			goto out;
3206 		}
3207 	}
3208 
3209 out:
3210 	if (result && !is_mst_root_connector) {
3211 		/* Downstream Port status changed. */
3212 		if (!dc_link_detect_sink(dc_link, &new_connection_type))
3213 			DRM_ERROR("KMS: Failed to detect connector\n");
3214 
3215 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
3216 			emulated_link_detect(dc_link);
3217 
3218 			if (aconnector->fake_enable)
3219 				aconnector->fake_enable = false;
3220 
3221 			amdgpu_dm_update_connector_after_detect(aconnector);
3222 
3223 
3224 			drm_modeset_lock_all(dev);
3225 			dm_restore_drm_connector_state(dev, connector);
3226 			drm_modeset_unlock_all(dev);
3227 
3228 			drm_kms_helper_hotplug_event(dev);
3229 		} else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
3230 
3231 			if (aconnector->fake_enable)
3232 				aconnector->fake_enable = false;
3233 
3234 			amdgpu_dm_update_connector_after_detect(aconnector);
3235 
3236 
3237 			drm_modeset_lock_all(dev);
3238 			dm_restore_drm_connector_state(dev, connector);
3239 			drm_modeset_unlock_all(dev);
3240 
3241 			drm_kms_helper_hotplug_event(dev);
3242 		}
3243 	}
3244 #ifdef CONFIG_DRM_AMD_DC_HDCP
3245 	if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
3246 		if (adev->dm.hdcp_workqueue)
3247 			hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
3248 	}
3249 #endif
3250 
3251 	if (dc_link->type != dc_connection_mst_branch)
3252 		drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
3253 
3254 	mutex_unlock(&aconnector->hpd_lock);
3255 }
3256 
3257 static void register_hpd_handlers(struct amdgpu_device *adev)
3258 {
3259 	struct drm_device *dev = adev_to_drm(adev);
3260 	struct drm_connector *connector;
3261 	struct amdgpu_dm_connector *aconnector;
3262 	const struct dc_link *dc_link;
3263 	struct dc_interrupt_params int_params = {0};
3264 
3265 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3266 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3267 
3268 	list_for_each_entry(connector,
3269 			&dev->mode_config.connector_list, head)	{
3270 
3271 		aconnector = to_amdgpu_dm_connector(connector);
3272 		dc_link = aconnector->dc_link;
3273 
3274 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
3275 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3276 			int_params.irq_source = dc_link->irq_source_hpd;
3277 
3278 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
3279 					handle_hpd_irq,
3280 					(void *) aconnector);
3281 		}
3282 
3283 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
3284 
3285 			/* Also register for DP short pulse (hpd_rx). */
3286 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3287 			int_params.irq_source =	dc_link->irq_source_hpd_rx;
3288 
3289 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
3290 					handle_hpd_rx_irq,
3291 					(void *) aconnector);
3292 
3293 			if (adev->dm.hpd_rx_offload_wq)
3294 				adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
3295 					aconnector;
3296 		}
3297 	}
3298 }
3299 
3300 #if defined(CONFIG_DRM_AMD_DC_SI)
3301 /* Register IRQ sources and initialize IRQ callbacks */
3302 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
3303 {
3304 	struct dc *dc = adev->dm.dc;
3305 	struct common_irq_params *c_irq_params;
3306 	struct dc_interrupt_params int_params = {0};
3307 	int r;
3308 	int i;
3309 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3310 
3311 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3312 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3313 
3314 	/*
3315 	 * Actions of amdgpu_irq_add_id():
3316 	 * 1. Register a set() function with base driver.
3317 	 *    Base driver will call set() function to enable/disable an
3318 	 *    interrupt in DC hardware.
3319 	 * 2. Register amdgpu_dm_irq_handler().
3320 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3321 	 *    coming from DC hardware.
3322 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3323 	 *    for acknowledging and handling. */
3324 
3325 	/* Use VBLANK interrupt */
3326 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
3327 		r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
3328 		if (r) {
3329 			DRM_ERROR("Failed to add crtc irq id!\n");
3330 			return r;
3331 		}
3332 
3333 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3334 		int_params.irq_source =
3335 			dc_interrupt_to_irq_source(dc, i+1 , 0);
3336 
3337 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3338 
3339 		c_irq_params->adev = adev;
3340 		c_irq_params->irq_src = int_params.irq_source;
3341 
3342 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3343 				dm_crtc_high_irq, c_irq_params);
3344 	}
3345 
3346 	/* Use GRPH_PFLIP interrupt */
3347 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3348 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3349 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3350 		if (r) {
3351 			DRM_ERROR("Failed to add page flip irq id!\n");
3352 			return r;
3353 		}
3354 
3355 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3356 		int_params.irq_source =
3357 			dc_interrupt_to_irq_source(dc, i, 0);
3358 
3359 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3360 
3361 		c_irq_params->adev = adev;
3362 		c_irq_params->irq_src = int_params.irq_source;
3363 
3364 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3365 				dm_pflip_high_irq, c_irq_params);
3366 
3367 	}
3368 
3369 	/* HPD */
3370 	r = amdgpu_irq_add_id(adev, client_id,
3371 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3372 	if (r) {
3373 		DRM_ERROR("Failed to add hpd irq id!\n");
3374 		return r;
3375 	}
3376 
3377 	register_hpd_handlers(adev);
3378 
3379 	return 0;
3380 }
3381 #endif
3382 
3383 /* Register IRQ sources and initialize IRQ callbacks */
3384 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
3385 {
3386 	struct dc *dc = adev->dm.dc;
3387 	struct common_irq_params *c_irq_params;
3388 	struct dc_interrupt_params int_params = {0};
3389 	int r;
3390 	int i;
3391 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3392 
3393 	if (adev->family >= AMDGPU_FAMILY_AI)
3394 		client_id = SOC15_IH_CLIENTID_DCE;
3395 
3396 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3397 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3398 
3399 	/*
3400 	 * Actions of amdgpu_irq_add_id():
3401 	 * 1. Register a set() function with base driver.
3402 	 *    Base driver will call set() function to enable/disable an
3403 	 *    interrupt in DC hardware.
3404 	 * 2. Register amdgpu_dm_irq_handler().
3405 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3406 	 *    coming from DC hardware.
3407 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3408 	 *    for acknowledging and handling. */
3409 
3410 	/* Use VBLANK interrupt */
3411 	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
3412 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
3413 		if (r) {
3414 			DRM_ERROR("Failed to add crtc irq id!\n");
3415 			return r;
3416 		}
3417 
3418 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3419 		int_params.irq_source =
3420 			dc_interrupt_to_irq_source(dc, i, 0);
3421 
3422 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3423 
3424 		c_irq_params->adev = adev;
3425 		c_irq_params->irq_src = int_params.irq_source;
3426 
3427 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3428 				dm_crtc_high_irq, c_irq_params);
3429 	}
3430 
3431 	/* Use VUPDATE interrupt */
3432 	for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3433 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3434 		if (r) {
3435 			DRM_ERROR("Failed to add vupdate irq id!\n");
3436 			return r;
3437 		}
3438 
3439 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3440 		int_params.irq_source =
3441 			dc_interrupt_to_irq_source(dc, i, 0);
3442 
3443 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3444 
3445 		c_irq_params->adev = adev;
3446 		c_irq_params->irq_src = int_params.irq_source;
3447 
3448 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3449 				dm_vupdate_high_irq, c_irq_params);
3450 	}
3451 
3452 	/* Use GRPH_PFLIP interrupt */
3453 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3454 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3455 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3456 		if (r) {
3457 			DRM_ERROR("Failed to add page flip irq id!\n");
3458 			return r;
3459 		}
3460 
3461 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3462 		int_params.irq_source =
3463 			dc_interrupt_to_irq_source(dc, i, 0);
3464 
3465 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3466 
3467 		c_irq_params->adev = adev;
3468 		c_irq_params->irq_src = int_params.irq_source;
3469 
3470 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3471 				dm_pflip_high_irq, c_irq_params);
3472 
3473 	}
3474 
3475 	/* HPD */
3476 	r = amdgpu_irq_add_id(adev, client_id,
3477 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3478 	if (r) {
3479 		DRM_ERROR("Failed to add hpd irq id!\n");
3480 		return r;
3481 	}
3482 
3483 	register_hpd_handlers(adev);
3484 
3485 	return 0;
3486 }
3487 
3488 #if defined(CONFIG_DRM_AMD_DC_DCN)
3489 /* Register IRQ sources and initialize IRQ callbacks */
3490 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3491 {
3492 	struct dc *dc = adev->dm.dc;
3493 	struct common_irq_params *c_irq_params;
3494 	struct dc_interrupt_params int_params = {0};
3495 	int r;
3496 	int i;
3497 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3498 	static const unsigned int vrtl_int_srcid[] = {
3499 		DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3500 		DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3501 		DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3502 		DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3503 		DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3504 		DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3505 	};
3506 #endif
3507 
3508 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3509 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3510 
3511 	/*
3512 	 * Actions of amdgpu_irq_add_id():
3513 	 * 1. Register a set() function with base driver.
3514 	 *    Base driver will call set() function to enable/disable an
3515 	 *    interrupt in DC hardware.
3516 	 * 2. Register amdgpu_dm_irq_handler().
3517 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3518 	 *    coming from DC hardware.
3519 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3520 	 *    for acknowledging and handling.
3521 	 */
3522 
3523 	/* Use VSTARTUP interrupt */
3524 	for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3525 			i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3526 			i++) {
3527 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3528 
3529 		if (r) {
3530 			DRM_ERROR("Failed to add crtc irq id!\n");
3531 			return r;
3532 		}
3533 
3534 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3535 		int_params.irq_source =
3536 			dc_interrupt_to_irq_source(dc, i, 0);
3537 
3538 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3539 
3540 		c_irq_params->adev = adev;
3541 		c_irq_params->irq_src = int_params.irq_source;
3542 
3543 		amdgpu_dm_irq_register_interrupt(
3544 			adev, &int_params, dm_crtc_high_irq, c_irq_params);
3545 	}
3546 
3547 	/* Use otg vertical line interrupt */
3548 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3549 	for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3550 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3551 				vrtl_int_srcid[i], &adev->vline0_irq);
3552 
3553 		if (r) {
3554 			DRM_ERROR("Failed to add vline0 irq id!\n");
3555 			return r;
3556 		}
3557 
3558 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3559 		int_params.irq_source =
3560 			dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3561 
3562 		if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3563 			DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3564 			break;
3565 		}
3566 
3567 		c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3568 					- DC_IRQ_SOURCE_DC1_VLINE0];
3569 
3570 		c_irq_params->adev = adev;
3571 		c_irq_params->irq_src = int_params.irq_source;
3572 
3573 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3574 				dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3575 	}
3576 #endif
3577 
3578 	/* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3579 	 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3580 	 * to trigger at end of each vblank, regardless of state of the lock,
3581 	 * matching DCE behaviour.
3582 	 */
3583 	for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3584 	     i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3585 	     i++) {
3586 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3587 
3588 		if (r) {
3589 			DRM_ERROR("Failed to add vupdate irq id!\n");
3590 			return r;
3591 		}
3592 
3593 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3594 		int_params.irq_source =
3595 			dc_interrupt_to_irq_source(dc, i, 0);
3596 
3597 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3598 
3599 		c_irq_params->adev = adev;
3600 		c_irq_params->irq_src = int_params.irq_source;
3601 
3602 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3603 				dm_vupdate_high_irq, c_irq_params);
3604 	}
3605 
3606 	/* Use GRPH_PFLIP interrupt */
3607 	for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3608 			i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
3609 			i++) {
3610 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3611 		if (r) {
3612 			DRM_ERROR("Failed to add page flip irq id!\n");
3613 			return r;
3614 		}
3615 
3616 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3617 		int_params.irq_source =
3618 			dc_interrupt_to_irq_source(dc, i, 0);
3619 
3620 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3621 
3622 		c_irq_params->adev = adev;
3623 		c_irq_params->irq_src = int_params.irq_source;
3624 
3625 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3626 				dm_pflip_high_irq, c_irq_params);
3627 
3628 	}
3629 
3630 	/* HPD */
3631 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3632 			&adev->hpd_irq);
3633 	if (r) {
3634 		DRM_ERROR("Failed to add hpd irq id!\n");
3635 		return r;
3636 	}
3637 
3638 	register_hpd_handlers(adev);
3639 
3640 	return 0;
3641 }
3642 /* Register Outbox IRQ sources and initialize IRQ callbacks */
3643 static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3644 {
3645 	struct dc *dc = adev->dm.dc;
3646 	struct common_irq_params *c_irq_params;
3647 	struct dc_interrupt_params int_params = {0};
3648 	int r, i;
3649 
3650 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3651 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3652 
3653 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3654 			&adev->dmub_outbox_irq);
3655 	if (r) {
3656 		DRM_ERROR("Failed to add outbox irq id!\n");
3657 		return r;
3658 	}
3659 
3660 	if (dc->ctx->dmub_srv) {
3661 		i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3662 		int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3663 		int_params.irq_source =
3664 		dc_interrupt_to_irq_source(dc, i, 0);
3665 
3666 		c_irq_params = &adev->dm.dmub_outbox_params[0];
3667 
3668 		c_irq_params->adev = adev;
3669 		c_irq_params->irq_src = int_params.irq_source;
3670 
3671 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3672 				dm_dmub_outbox1_low_irq, c_irq_params);
3673 	}
3674 
3675 	return 0;
3676 }
3677 #endif
3678 
3679 /*
3680  * Acquires the lock for the atomic state object and returns
3681  * the new atomic state.
3682  *
3683  * This should only be called during atomic check.
3684  */
3685 static int dm_atomic_get_state(struct drm_atomic_state *state,
3686 			       struct dm_atomic_state **dm_state)
3687 {
3688 	struct drm_device *dev = state->dev;
3689 	struct amdgpu_device *adev = drm_to_adev(dev);
3690 	struct amdgpu_display_manager *dm = &adev->dm;
3691 	struct drm_private_state *priv_state;
3692 
3693 	if (*dm_state)
3694 		return 0;
3695 
3696 	priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3697 	if (IS_ERR(priv_state))
3698 		return PTR_ERR(priv_state);
3699 
3700 	*dm_state = to_dm_atomic_state(priv_state);
3701 
3702 	return 0;
3703 }
3704 
3705 static struct dm_atomic_state *
3706 dm_atomic_get_new_state(struct drm_atomic_state *state)
3707 {
3708 	struct drm_device *dev = state->dev;
3709 	struct amdgpu_device *adev = drm_to_adev(dev);
3710 	struct amdgpu_display_manager *dm = &adev->dm;
3711 	struct drm_private_obj *obj;
3712 	struct drm_private_state *new_obj_state;
3713 	int i;
3714 
3715 	for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3716 		if (obj->funcs == dm->atomic_obj.funcs)
3717 			return to_dm_atomic_state(new_obj_state);
3718 	}
3719 
3720 	return NULL;
3721 }
3722 
3723 static struct drm_private_state *
3724 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3725 {
3726 	struct dm_atomic_state *old_state, *new_state;
3727 
3728 	new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3729 	if (!new_state)
3730 		return NULL;
3731 
3732 	__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3733 
3734 	old_state = to_dm_atomic_state(obj->state);
3735 
3736 	if (old_state && old_state->context)
3737 		new_state->context = dc_copy_state(old_state->context);
3738 
3739 	if (!new_state->context) {
3740 		kfree(new_state);
3741 		return NULL;
3742 	}
3743 
3744 	return &new_state->base;
3745 }
3746 
3747 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3748 				    struct drm_private_state *state)
3749 {
3750 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3751 
3752 	if (dm_state && dm_state->context)
3753 		dc_release_state(dm_state->context);
3754 
3755 	kfree(dm_state);
3756 }
3757 
3758 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3759 	.atomic_duplicate_state = dm_atomic_duplicate_state,
3760 	.atomic_destroy_state = dm_atomic_destroy_state,
3761 };
3762 
3763 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3764 {
3765 	struct dm_atomic_state *state;
3766 	int r;
3767 
3768 	adev->mode_info.mode_config_initialized = true;
3769 
3770 	adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3771 	adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3772 
3773 	adev_to_drm(adev)->mode_config.max_width = 16384;
3774 	adev_to_drm(adev)->mode_config.max_height = 16384;
3775 
3776 	adev_to_drm(adev)->mode_config.preferred_depth = 24;
3777 	adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3778 	/* indicates support for immediate flip */
3779 	adev_to_drm(adev)->mode_config.async_page_flip = true;
3780 
3781 	adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3782 
3783 	state = kzalloc(sizeof(*state), GFP_KERNEL);
3784 	if (!state)
3785 		return -ENOMEM;
3786 
3787 	state->context = dc_create_state(adev->dm.dc);
3788 	if (!state->context) {
3789 		kfree(state);
3790 		return -ENOMEM;
3791 	}
3792 
3793 	dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3794 
3795 	drm_atomic_private_obj_init(adev_to_drm(adev),
3796 				    &adev->dm.atomic_obj,
3797 				    &state->base,
3798 				    &dm_atomic_state_funcs);
3799 
3800 	r = amdgpu_display_modeset_create_props(adev);
3801 	if (r) {
3802 		dc_release_state(state->context);
3803 		kfree(state);
3804 		return r;
3805 	}
3806 
3807 	r = amdgpu_dm_audio_init(adev);
3808 	if (r) {
3809 		dc_release_state(state->context);
3810 		kfree(state);
3811 		return r;
3812 	}
3813 
3814 	return 0;
3815 }
3816 
3817 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3818 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3819 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3820 
3821 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3822 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3823 
3824 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
3825 					    int bl_idx)
3826 {
3827 #if defined(CONFIG_ACPI)
3828 	struct amdgpu_dm_backlight_caps caps;
3829 
3830 	memset(&caps, 0, sizeof(caps));
3831 
3832 	if (dm->backlight_caps[bl_idx].caps_valid)
3833 		return;
3834 
3835 	amdgpu_acpi_get_backlight_caps(&caps);
3836 	if (caps.caps_valid) {
3837 		dm->backlight_caps[bl_idx].caps_valid = true;
3838 		if (caps.aux_support)
3839 			return;
3840 		dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
3841 		dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
3842 	} else {
3843 		dm->backlight_caps[bl_idx].min_input_signal =
3844 				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3845 		dm->backlight_caps[bl_idx].max_input_signal =
3846 				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3847 	}
3848 #else
3849 	if (dm->backlight_caps[bl_idx].aux_support)
3850 		return;
3851 
3852 	dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3853 	dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3854 #endif
3855 }
3856 
3857 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3858 				unsigned *min, unsigned *max)
3859 {
3860 	if (!caps)
3861 		return 0;
3862 
3863 	if (caps->aux_support) {
3864 		// Firmware limits are in nits, DC API wants millinits.
3865 		*max = 1000 * caps->aux_max_input_signal;
3866 		*min = 1000 * caps->aux_min_input_signal;
3867 	} else {
3868 		// Firmware limits are 8-bit, PWM control is 16-bit.
3869 		*max = 0x101 * caps->max_input_signal;
3870 		*min = 0x101 * caps->min_input_signal;
3871 	}
3872 	return 1;
3873 }
3874 
3875 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3876 					uint32_t brightness)
3877 {
3878 	unsigned min, max;
3879 
3880 	if (!get_brightness_range(caps, &min, &max))
3881 		return brightness;
3882 
3883 	// Rescale 0..255 to min..max
3884 	return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3885 				       AMDGPU_MAX_BL_LEVEL);
3886 }
3887 
3888 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3889 				      uint32_t brightness)
3890 {
3891 	unsigned min, max;
3892 
3893 	if (!get_brightness_range(caps, &min, &max))
3894 		return brightness;
3895 
3896 	if (brightness < min)
3897 		return 0;
3898 	// Rescale min..max to 0..255
3899 	return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3900 				 max - min);
3901 }
3902 
3903 static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
3904 					 int bl_idx,
3905 					 u32 user_brightness)
3906 {
3907 	struct amdgpu_dm_backlight_caps caps;
3908 	struct dc_link *link;
3909 	u32 brightness;
3910 	bool rc;
3911 
3912 	amdgpu_dm_update_backlight_caps(dm, bl_idx);
3913 	caps = dm->backlight_caps[bl_idx];
3914 
3915 	dm->brightness[bl_idx] = user_brightness;
3916 	brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
3917 	link = (struct dc_link *)dm->backlight_link[bl_idx];
3918 
3919 	/* Change brightness based on AUX property */
3920 	if (caps.aux_support) {
3921 		rc = dc_link_set_backlight_level_nits(link, true, brightness,
3922 						      AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3923 		if (!rc)
3924 			DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
3925 	} else {
3926 		rc = dc_link_set_backlight_level(link, brightness, 0);
3927 		if (!rc)
3928 			DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
3929 	}
3930 
3931 	return rc ? 0 : 1;
3932 }
3933 
3934 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3935 {
3936 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3937 	int i;
3938 
3939 	for (i = 0; i < dm->num_of_edps; i++) {
3940 		if (bd == dm->backlight_dev[i])
3941 			break;
3942 	}
3943 	if (i >= AMDGPU_DM_MAX_NUM_EDP)
3944 		i = 0;
3945 	amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
3946 
3947 	return 0;
3948 }
3949 
3950 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
3951 					 int bl_idx)
3952 {
3953 	struct amdgpu_dm_backlight_caps caps;
3954 	struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
3955 
3956 	amdgpu_dm_update_backlight_caps(dm, bl_idx);
3957 	caps = dm->backlight_caps[bl_idx];
3958 
3959 	if (caps.aux_support) {
3960 		u32 avg, peak;
3961 		bool rc;
3962 
3963 		rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3964 		if (!rc)
3965 			return dm->brightness[bl_idx];
3966 		return convert_brightness_to_user(&caps, avg);
3967 	} else {
3968 		int ret = dc_link_get_backlight_level(link);
3969 
3970 		if (ret == DC_ERROR_UNEXPECTED)
3971 			return dm->brightness[bl_idx];
3972 		return convert_brightness_to_user(&caps, ret);
3973 	}
3974 }
3975 
3976 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3977 {
3978 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3979 	int i;
3980 
3981 	for (i = 0; i < dm->num_of_edps; i++) {
3982 		if (bd == dm->backlight_dev[i])
3983 			break;
3984 	}
3985 	if (i >= AMDGPU_DM_MAX_NUM_EDP)
3986 		i = 0;
3987 	return amdgpu_dm_backlight_get_level(dm, i);
3988 }
3989 
3990 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3991 	.options = BL_CORE_SUSPENDRESUME,
3992 	.get_brightness = amdgpu_dm_backlight_get_brightness,
3993 	.update_status	= amdgpu_dm_backlight_update_status,
3994 };
3995 
3996 static void
3997 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3998 {
3999 	char bl_name[16];
4000 	struct backlight_properties props = { 0 };
4001 
4002 	amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps);
4003 	dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL;
4004 
4005 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
4006 	props.brightness = AMDGPU_MAX_BL_LEVEL;
4007 	props.type = BACKLIGHT_RAW;
4008 
4009 	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
4010 		 adev_to_drm(dm->adev)->primary->index + dm->num_of_edps);
4011 
4012 	dm->backlight_dev[dm->num_of_edps] = backlight_device_register(bl_name,
4013 								       adev_to_drm(dm->adev)->dev,
4014 								       dm,
4015 								       &amdgpu_dm_backlight_ops,
4016 								       &props);
4017 
4018 	if (IS_ERR(dm->backlight_dev[dm->num_of_edps]))
4019 		DRM_ERROR("DM: Backlight registration failed!\n");
4020 	else
4021 		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4022 }
4023 #endif
4024 
4025 static int initialize_plane(struct amdgpu_display_manager *dm,
4026 			    struct amdgpu_mode_info *mode_info, int plane_id,
4027 			    enum drm_plane_type plane_type,
4028 			    const struct dc_plane_cap *plane_cap)
4029 {
4030 	struct drm_plane *plane;
4031 	unsigned long possible_crtcs;
4032 	int ret = 0;
4033 
4034 	plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
4035 	if (!plane) {
4036 		DRM_ERROR("KMS: Failed to allocate plane\n");
4037 		return -ENOMEM;
4038 	}
4039 	plane->type = plane_type;
4040 
4041 	/*
4042 	 * HACK: IGT tests expect that the primary plane for a CRTC
4043 	 * can only have one possible CRTC. Only expose support for
4044 	 * any CRTC if they're not going to be used as a primary plane
4045 	 * for a CRTC - like overlay or underlay planes.
4046 	 */
4047 	possible_crtcs = 1 << plane_id;
4048 	if (plane_id >= dm->dc->caps.max_streams)
4049 		possible_crtcs = 0xff;
4050 
4051 	ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
4052 
4053 	if (ret) {
4054 		DRM_ERROR("KMS: Failed to initialize plane\n");
4055 		kfree(plane);
4056 		return ret;
4057 	}
4058 
4059 	if (mode_info)
4060 		mode_info->planes[plane_id] = plane;
4061 
4062 	return ret;
4063 }
4064 
4065 
4066 static void register_backlight_device(struct amdgpu_display_manager *dm,
4067 				      struct dc_link *link)
4068 {
4069 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4070 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4071 
4072 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4073 	    link->type != dc_connection_none) {
4074 		/*
4075 		 * Event if registration failed, we should continue with
4076 		 * DM initialization because not having a backlight control
4077 		 * is better then a black screen.
4078 		 */
4079 		if (!dm->backlight_dev[dm->num_of_edps])
4080 			amdgpu_dm_register_backlight_device(dm);
4081 
4082 		if (dm->backlight_dev[dm->num_of_edps]) {
4083 			dm->backlight_link[dm->num_of_edps] = link;
4084 			dm->num_of_edps++;
4085 		}
4086 	}
4087 #endif
4088 }
4089 
4090 
4091 /*
4092  * In this architecture, the association
4093  * connector -> encoder -> crtc
4094  * id not really requried. The crtc and connector will hold the
4095  * display_index as an abstraction to use with DAL component
4096  *
4097  * Returns 0 on success
4098  */
4099 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4100 {
4101 	struct amdgpu_display_manager *dm = &adev->dm;
4102 	int32_t i;
4103 	struct amdgpu_dm_connector *aconnector = NULL;
4104 	struct amdgpu_encoder *aencoder = NULL;
4105 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
4106 	uint32_t link_cnt;
4107 	int32_t primary_planes;
4108 	enum dc_connection_type new_connection_type = dc_connection_none;
4109 	const struct dc_plane_cap *plane;
4110 	bool psr_feature_enabled = false;
4111 
4112 	dm->display_indexes_num = dm->dc->caps.max_streams;
4113 	/* Update the actual used number of crtc */
4114 	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
4115 
4116 	link_cnt = dm->dc->caps.max_links;
4117 	if (amdgpu_dm_mode_config_init(dm->adev)) {
4118 		DRM_ERROR("DM: Failed to initialize mode config\n");
4119 		return -EINVAL;
4120 	}
4121 
4122 	/* There is one primary plane per CRTC */
4123 	primary_planes = dm->dc->caps.max_streams;
4124 	ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
4125 
4126 	/*
4127 	 * Initialize primary planes, implicit planes for legacy IOCTLS.
4128 	 * Order is reversed to match iteration order in atomic check.
4129 	 */
4130 	for (i = (primary_planes - 1); i >= 0; i--) {
4131 		plane = &dm->dc->caps.planes[i];
4132 
4133 		if (initialize_plane(dm, mode_info, i,
4134 				     DRM_PLANE_TYPE_PRIMARY, plane)) {
4135 			DRM_ERROR("KMS: Failed to initialize primary plane\n");
4136 			goto fail;
4137 		}
4138 	}
4139 
4140 	/*
4141 	 * Initialize overlay planes, index starting after primary planes.
4142 	 * These planes have a higher DRM index than the primary planes since
4143 	 * they should be considered as having a higher z-order.
4144 	 * Order is reversed to match iteration order in atomic check.
4145 	 *
4146 	 * Only support DCN for now, and only expose one so we don't encourage
4147 	 * userspace to use up all the pipes.
4148 	 */
4149 	for (i = 0; i < dm->dc->caps.max_planes; ++i) {
4150 		struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
4151 
4152 		if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
4153 			continue;
4154 
4155 		if (!plane->blends_with_above || !plane->blends_with_below)
4156 			continue;
4157 
4158 		if (!plane->pixel_format_support.argb8888)
4159 			continue;
4160 
4161 		if (initialize_plane(dm, NULL, primary_planes + i,
4162 				     DRM_PLANE_TYPE_OVERLAY, plane)) {
4163 			DRM_ERROR("KMS: Failed to initialize overlay plane\n");
4164 			goto fail;
4165 		}
4166 
4167 		/* Only create one overlay plane. */
4168 		break;
4169 	}
4170 
4171 	for (i = 0; i < dm->dc->caps.max_streams; i++)
4172 		if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4173 			DRM_ERROR("KMS: Failed to initialize crtc\n");
4174 			goto fail;
4175 		}
4176 
4177 #if defined(CONFIG_DRM_AMD_DC_DCN)
4178 	/* Use Outbox interrupt */
4179 	switch (adev->ip_versions[DCE_HWIP][0]) {
4180 	case IP_VERSION(3, 0, 0):
4181 	case IP_VERSION(3, 1, 2):
4182 	case IP_VERSION(3, 1, 3):
4183 	case IP_VERSION(2, 1, 0):
4184 		if (register_outbox_irq_handlers(dm->adev)) {
4185 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4186 			goto fail;
4187 		}
4188 		break;
4189 	default:
4190 		DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n",
4191 			      adev->ip_versions[DCE_HWIP][0]);
4192 	}
4193 
4194 	/* Determine whether to enable PSR support by default. */
4195 	if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) {
4196 		switch (adev->ip_versions[DCE_HWIP][0]) {
4197 		case IP_VERSION(3, 1, 2):
4198 		case IP_VERSION(3, 1, 3):
4199 			psr_feature_enabled = true;
4200 			break;
4201 		default:
4202 			psr_feature_enabled = amdgpu_dc_feature_mask & DC_PSR_MASK;
4203 			break;
4204 		}
4205 	}
4206 #endif
4207 
4208 	/* loops over all connectors on the board */
4209 	for (i = 0; i < link_cnt; i++) {
4210 		struct dc_link *link = NULL;
4211 
4212 		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
4213 			DRM_ERROR(
4214 				"KMS: Cannot support more than %d display indexes\n",
4215 					AMDGPU_DM_MAX_DISPLAY_INDEX);
4216 			continue;
4217 		}
4218 
4219 		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
4220 		if (!aconnector)
4221 			goto fail;
4222 
4223 		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
4224 		if (!aencoder)
4225 			goto fail;
4226 
4227 		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
4228 			DRM_ERROR("KMS: Failed to initialize encoder\n");
4229 			goto fail;
4230 		}
4231 
4232 		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
4233 			DRM_ERROR("KMS: Failed to initialize connector\n");
4234 			goto fail;
4235 		}
4236 
4237 		link = dc_get_link_at_index(dm->dc, i);
4238 
4239 		if (!dc_link_detect_sink(link, &new_connection_type))
4240 			DRM_ERROR("KMS: Failed to detect connector\n");
4241 
4242 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
4243 			emulated_link_detect(link);
4244 			amdgpu_dm_update_connector_after_detect(aconnector);
4245 
4246 		} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
4247 			amdgpu_dm_update_connector_after_detect(aconnector);
4248 			register_backlight_device(dm, link);
4249 			if (dm->num_of_edps)
4250 				update_connector_ext_caps(aconnector);
4251 			if (psr_feature_enabled)
4252 				amdgpu_dm_set_psr_caps(link);
4253 		}
4254 
4255 
4256 	}
4257 
4258 	/* Software is initialized. Now we can register interrupt handlers. */
4259 	switch (adev->asic_type) {
4260 #if defined(CONFIG_DRM_AMD_DC_SI)
4261 	case CHIP_TAHITI:
4262 	case CHIP_PITCAIRN:
4263 	case CHIP_VERDE:
4264 	case CHIP_OLAND:
4265 		if (dce60_register_irq_handlers(dm->adev)) {
4266 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4267 			goto fail;
4268 		}
4269 		break;
4270 #endif
4271 	case CHIP_BONAIRE:
4272 	case CHIP_HAWAII:
4273 	case CHIP_KAVERI:
4274 	case CHIP_KABINI:
4275 	case CHIP_MULLINS:
4276 	case CHIP_TONGA:
4277 	case CHIP_FIJI:
4278 	case CHIP_CARRIZO:
4279 	case CHIP_STONEY:
4280 	case CHIP_POLARIS11:
4281 	case CHIP_POLARIS10:
4282 	case CHIP_POLARIS12:
4283 	case CHIP_VEGAM:
4284 	case CHIP_VEGA10:
4285 	case CHIP_VEGA12:
4286 	case CHIP_VEGA20:
4287 		if (dce110_register_irq_handlers(dm->adev)) {
4288 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4289 			goto fail;
4290 		}
4291 		break;
4292 	default:
4293 #if defined(CONFIG_DRM_AMD_DC_DCN)
4294 		switch (adev->ip_versions[DCE_HWIP][0]) {
4295 		case IP_VERSION(1, 0, 0):
4296 		case IP_VERSION(1, 0, 1):
4297 		case IP_VERSION(2, 0, 2):
4298 		case IP_VERSION(2, 0, 3):
4299 		case IP_VERSION(2, 0, 0):
4300 		case IP_VERSION(2, 1, 0):
4301 		case IP_VERSION(3, 0, 0):
4302 		case IP_VERSION(3, 0, 2):
4303 		case IP_VERSION(3, 0, 3):
4304 		case IP_VERSION(3, 0, 1):
4305 		case IP_VERSION(3, 1, 2):
4306 		case IP_VERSION(3, 1, 3):
4307 			if (dcn10_register_irq_handlers(dm->adev)) {
4308 				DRM_ERROR("DM: Failed to initialize IRQ\n");
4309 				goto fail;
4310 			}
4311 			break;
4312 		default:
4313 			DRM_ERROR("Unsupported DCE IP versions: 0x%X\n",
4314 					adev->ip_versions[DCE_HWIP][0]);
4315 			goto fail;
4316 		}
4317 #endif
4318 		break;
4319 	}
4320 
4321 	return 0;
4322 fail:
4323 	kfree(aencoder);
4324 	kfree(aconnector);
4325 
4326 	return -EINVAL;
4327 }
4328 
4329 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4330 {
4331 	drm_atomic_private_obj_fini(&dm->atomic_obj);
4332 	return;
4333 }
4334 
4335 /******************************************************************************
4336  * amdgpu_display_funcs functions
4337  *****************************************************************************/
4338 
4339 /*
4340  * dm_bandwidth_update - program display watermarks
4341  *
4342  * @adev: amdgpu_device pointer
4343  *
4344  * Calculate and program the display watermarks and line buffer allocation.
4345  */
4346 static void dm_bandwidth_update(struct amdgpu_device *adev)
4347 {
4348 	/* TODO: implement later */
4349 }
4350 
4351 static const struct amdgpu_display_funcs dm_display_funcs = {
4352 	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
4353 	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
4354 	.backlight_set_level = NULL, /* never called for DC */
4355 	.backlight_get_level = NULL, /* never called for DC */
4356 	.hpd_sense = NULL,/* called unconditionally */
4357 	.hpd_set_polarity = NULL, /* called unconditionally */
4358 	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4359 	.page_flip_get_scanoutpos =
4360 		dm_crtc_get_scanoutpos,/* called unconditionally */
4361 	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
4362 	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
4363 };
4364 
4365 #if defined(CONFIG_DEBUG_KERNEL_DC)
4366 
4367 static ssize_t s3_debug_store(struct device *device,
4368 			      struct device_attribute *attr,
4369 			      const char *buf,
4370 			      size_t count)
4371 {
4372 	int ret;
4373 	int s3_state;
4374 	struct drm_device *drm_dev = dev_get_drvdata(device);
4375 	struct amdgpu_device *adev = drm_to_adev(drm_dev);
4376 
4377 	ret = kstrtoint(buf, 0, &s3_state);
4378 
4379 	if (ret == 0) {
4380 		if (s3_state) {
4381 			dm_resume(adev);
4382 			drm_kms_helper_hotplug_event(adev_to_drm(adev));
4383 		} else
4384 			dm_suspend(adev);
4385 	}
4386 
4387 	return ret == 0 ? count : 0;
4388 }
4389 
4390 DEVICE_ATTR_WO(s3_debug);
4391 
4392 #endif
4393 
4394 static int dm_early_init(void *handle)
4395 {
4396 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4397 
4398 	switch (adev->asic_type) {
4399 #if defined(CONFIG_DRM_AMD_DC_SI)
4400 	case CHIP_TAHITI:
4401 	case CHIP_PITCAIRN:
4402 	case CHIP_VERDE:
4403 		adev->mode_info.num_crtc = 6;
4404 		adev->mode_info.num_hpd = 6;
4405 		adev->mode_info.num_dig = 6;
4406 		break;
4407 	case CHIP_OLAND:
4408 		adev->mode_info.num_crtc = 2;
4409 		adev->mode_info.num_hpd = 2;
4410 		adev->mode_info.num_dig = 2;
4411 		break;
4412 #endif
4413 	case CHIP_BONAIRE:
4414 	case CHIP_HAWAII:
4415 		adev->mode_info.num_crtc = 6;
4416 		adev->mode_info.num_hpd = 6;
4417 		adev->mode_info.num_dig = 6;
4418 		break;
4419 	case CHIP_KAVERI:
4420 		adev->mode_info.num_crtc = 4;
4421 		adev->mode_info.num_hpd = 6;
4422 		adev->mode_info.num_dig = 7;
4423 		break;
4424 	case CHIP_KABINI:
4425 	case CHIP_MULLINS:
4426 		adev->mode_info.num_crtc = 2;
4427 		adev->mode_info.num_hpd = 6;
4428 		adev->mode_info.num_dig = 6;
4429 		break;
4430 	case CHIP_FIJI:
4431 	case CHIP_TONGA:
4432 		adev->mode_info.num_crtc = 6;
4433 		adev->mode_info.num_hpd = 6;
4434 		adev->mode_info.num_dig = 7;
4435 		break;
4436 	case CHIP_CARRIZO:
4437 		adev->mode_info.num_crtc = 3;
4438 		adev->mode_info.num_hpd = 6;
4439 		adev->mode_info.num_dig = 9;
4440 		break;
4441 	case CHIP_STONEY:
4442 		adev->mode_info.num_crtc = 2;
4443 		adev->mode_info.num_hpd = 6;
4444 		adev->mode_info.num_dig = 9;
4445 		break;
4446 	case CHIP_POLARIS11:
4447 	case CHIP_POLARIS12:
4448 		adev->mode_info.num_crtc = 5;
4449 		adev->mode_info.num_hpd = 5;
4450 		adev->mode_info.num_dig = 5;
4451 		break;
4452 	case CHIP_POLARIS10:
4453 	case CHIP_VEGAM:
4454 		adev->mode_info.num_crtc = 6;
4455 		adev->mode_info.num_hpd = 6;
4456 		adev->mode_info.num_dig = 6;
4457 		break;
4458 	case CHIP_VEGA10:
4459 	case CHIP_VEGA12:
4460 	case CHIP_VEGA20:
4461 		adev->mode_info.num_crtc = 6;
4462 		adev->mode_info.num_hpd = 6;
4463 		adev->mode_info.num_dig = 6;
4464 		break;
4465 	default:
4466 #if defined(CONFIG_DRM_AMD_DC_DCN)
4467 		switch (adev->ip_versions[DCE_HWIP][0]) {
4468 		case IP_VERSION(2, 0, 2):
4469 		case IP_VERSION(3, 0, 0):
4470 			adev->mode_info.num_crtc = 6;
4471 			adev->mode_info.num_hpd = 6;
4472 			adev->mode_info.num_dig = 6;
4473 			break;
4474 		case IP_VERSION(2, 0, 0):
4475 		case IP_VERSION(3, 0, 2):
4476 			adev->mode_info.num_crtc = 5;
4477 			adev->mode_info.num_hpd = 5;
4478 			adev->mode_info.num_dig = 5;
4479 			break;
4480 		case IP_VERSION(2, 0, 3):
4481 		case IP_VERSION(3, 0, 3):
4482 			adev->mode_info.num_crtc = 2;
4483 			adev->mode_info.num_hpd = 2;
4484 			adev->mode_info.num_dig = 2;
4485 			break;
4486 		case IP_VERSION(1, 0, 0):
4487 		case IP_VERSION(1, 0, 1):
4488 		case IP_VERSION(3, 0, 1):
4489 		case IP_VERSION(2, 1, 0):
4490 		case IP_VERSION(3, 1, 2):
4491 		case IP_VERSION(3, 1, 3):
4492 			adev->mode_info.num_crtc = 4;
4493 			adev->mode_info.num_hpd = 4;
4494 			adev->mode_info.num_dig = 4;
4495 			break;
4496 		default:
4497 			DRM_ERROR("Unsupported DCE IP versions: 0x%x\n",
4498 					adev->ip_versions[DCE_HWIP][0]);
4499 			return -EINVAL;
4500 		}
4501 #endif
4502 		break;
4503 	}
4504 
4505 	amdgpu_dm_set_irq_funcs(adev);
4506 
4507 	if (adev->mode_info.funcs == NULL)
4508 		adev->mode_info.funcs = &dm_display_funcs;
4509 
4510 	/*
4511 	 * Note: Do NOT change adev->audio_endpt_rreg and
4512 	 * adev->audio_endpt_wreg because they are initialised in
4513 	 * amdgpu_device_init()
4514 	 */
4515 #if defined(CONFIG_DEBUG_KERNEL_DC)
4516 	device_create_file(
4517 		adev_to_drm(adev)->dev,
4518 		&dev_attr_s3_debug);
4519 #endif
4520 
4521 	return 0;
4522 }
4523 
4524 static bool modeset_required(struct drm_crtc_state *crtc_state,
4525 			     struct dc_stream_state *new_stream,
4526 			     struct dc_stream_state *old_stream)
4527 {
4528 	return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4529 }
4530 
4531 static bool modereset_required(struct drm_crtc_state *crtc_state)
4532 {
4533 	return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4534 }
4535 
4536 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
4537 {
4538 	drm_encoder_cleanup(encoder);
4539 	kfree(encoder);
4540 }
4541 
4542 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4543 	.destroy = amdgpu_dm_encoder_destroy,
4544 };
4545 
4546 
4547 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4548 					 struct drm_framebuffer *fb,
4549 					 int *min_downscale, int *max_upscale)
4550 {
4551 	struct amdgpu_device *adev = drm_to_adev(dev);
4552 	struct dc *dc = adev->dm.dc;
4553 	/* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4554 	struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4555 
4556 	switch (fb->format->format) {
4557 	case DRM_FORMAT_P010:
4558 	case DRM_FORMAT_NV12:
4559 	case DRM_FORMAT_NV21:
4560 		*max_upscale = plane_cap->max_upscale_factor.nv12;
4561 		*min_downscale = plane_cap->max_downscale_factor.nv12;
4562 		break;
4563 
4564 	case DRM_FORMAT_XRGB16161616F:
4565 	case DRM_FORMAT_ARGB16161616F:
4566 	case DRM_FORMAT_XBGR16161616F:
4567 	case DRM_FORMAT_ABGR16161616F:
4568 		*max_upscale = plane_cap->max_upscale_factor.fp16;
4569 		*min_downscale = plane_cap->max_downscale_factor.fp16;
4570 		break;
4571 
4572 	default:
4573 		*max_upscale = plane_cap->max_upscale_factor.argb8888;
4574 		*min_downscale = plane_cap->max_downscale_factor.argb8888;
4575 		break;
4576 	}
4577 
4578 	/*
4579 	 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4580 	 * scaling factor of 1.0 == 1000 units.
4581 	 */
4582 	if (*max_upscale == 1)
4583 		*max_upscale = 1000;
4584 
4585 	if (*min_downscale == 1)
4586 		*min_downscale = 1000;
4587 }
4588 
4589 
4590 static int fill_dc_scaling_info(struct amdgpu_device *adev,
4591 				const struct drm_plane_state *state,
4592 				struct dc_scaling_info *scaling_info)
4593 {
4594 	int scale_w, scale_h, min_downscale, max_upscale;
4595 
4596 	memset(scaling_info, 0, sizeof(*scaling_info));
4597 
4598 	/* Source is fixed 16.16 but we ignore mantissa for now... */
4599 	scaling_info->src_rect.x = state->src_x >> 16;
4600 	scaling_info->src_rect.y = state->src_y >> 16;
4601 
4602 	/*
4603 	 * For reasons we don't (yet) fully understand a non-zero
4604 	 * src_y coordinate into an NV12 buffer can cause a
4605 	 * system hang on DCN1x.
4606 	 * To avoid hangs (and maybe be overly cautious)
4607 	 * let's reject both non-zero src_x and src_y.
4608 	 *
4609 	 * We currently know of only one use-case to reproduce a
4610 	 * scenario with non-zero src_x and src_y for NV12, which
4611 	 * is to gesture the YouTube Android app into full screen
4612 	 * on ChromeOS.
4613 	 */
4614 	if (((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) ||
4615 	    (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) &&
4616 	    (state->fb && state->fb->format->format == DRM_FORMAT_NV12 &&
4617 	    (scaling_info->src_rect.x != 0 || scaling_info->src_rect.y != 0)))
4618 		return -EINVAL;
4619 
4620 	scaling_info->src_rect.width = state->src_w >> 16;
4621 	if (scaling_info->src_rect.width == 0)
4622 		return -EINVAL;
4623 
4624 	scaling_info->src_rect.height = state->src_h >> 16;
4625 	if (scaling_info->src_rect.height == 0)
4626 		return -EINVAL;
4627 
4628 	scaling_info->dst_rect.x = state->crtc_x;
4629 	scaling_info->dst_rect.y = state->crtc_y;
4630 
4631 	if (state->crtc_w == 0)
4632 		return -EINVAL;
4633 
4634 	scaling_info->dst_rect.width = state->crtc_w;
4635 
4636 	if (state->crtc_h == 0)
4637 		return -EINVAL;
4638 
4639 	scaling_info->dst_rect.height = state->crtc_h;
4640 
4641 	/* DRM doesn't specify clipping on destination output. */
4642 	scaling_info->clip_rect = scaling_info->dst_rect;
4643 
4644 	/* Validate scaling per-format with DC plane caps */
4645 	if (state->plane && state->plane->dev && state->fb) {
4646 		get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4647 					     &min_downscale, &max_upscale);
4648 	} else {
4649 		min_downscale = 250;
4650 		max_upscale = 16000;
4651 	}
4652 
4653 	scale_w = scaling_info->dst_rect.width * 1000 /
4654 		  scaling_info->src_rect.width;
4655 
4656 	if (scale_w < min_downscale || scale_w > max_upscale)
4657 		return -EINVAL;
4658 
4659 	scale_h = scaling_info->dst_rect.height * 1000 /
4660 		  scaling_info->src_rect.height;
4661 
4662 	if (scale_h < min_downscale || scale_h > max_upscale)
4663 		return -EINVAL;
4664 
4665 	/*
4666 	 * The "scaling_quality" can be ignored for now, quality = 0 has DC
4667 	 * assume reasonable defaults based on the format.
4668 	 */
4669 
4670 	return 0;
4671 }
4672 
4673 static void
4674 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4675 				 uint64_t tiling_flags)
4676 {
4677 	/* Fill GFX8 params */
4678 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4679 		unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4680 
4681 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4682 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4683 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4684 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4685 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4686 
4687 		/* XXX fix me for VI */
4688 		tiling_info->gfx8.num_banks = num_banks;
4689 		tiling_info->gfx8.array_mode =
4690 				DC_ARRAY_2D_TILED_THIN1;
4691 		tiling_info->gfx8.tile_split = tile_split;
4692 		tiling_info->gfx8.bank_width = bankw;
4693 		tiling_info->gfx8.bank_height = bankh;
4694 		tiling_info->gfx8.tile_aspect = mtaspect;
4695 		tiling_info->gfx8.tile_mode =
4696 				DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4697 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4698 			== DC_ARRAY_1D_TILED_THIN1) {
4699 		tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4700 	}
4701 
4702 	tiling_info->gfx8.pipe_config =
4703 			AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4704 }
4705 
4706 static void
4707 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4708 				  union dc_tiling_info *tiling_info)
4709 {
4710 	tiling_info->gfx9.num_pipes =
4711 		adev->gfx.config.gb_addr_config_fields.num_pipes;
4712 	tiling_info->gfx9.num_banks =
4713 		adev->gfx.config.gb_addr_config_fields.num_banks;
4714 	tiling_info->gfx9.pipe_interleave =
4715 		adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4716 	tiling_info->gfx9.num_shader_engines =
4717 		adev->gfx.config.gb_addr_config_fields.num_se;
4718 	tiling_info->gfx9.max_compressed_frags =
4719 		adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4720 	tiling_info->gfx9.num_rb_per_se =
4721 		adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4722 	tiling_info->gfx9.shaderEnable = 1;
4723 	if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
4724 		tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4725 }
4726 
4727 static int
4728 validate_dcc(struct amdgpu_device *adev,
4729 	     const enum surface_pixel_format format,
4730 	     const enum dc_rotation_angle rotation,
4731 	     const union dc_tiling_info *tiling_info,
4732 	     const struct dc_plane_dcc_param *dcc,
4733 	     const struct dc_plane_address *address,
4734 	     const struct plane_size *plane_size)
4735 {
4736 	struct dc *dc = adev->dm.dc;
4737 	struct dc_dcc_surface_param input;
4738 	struct dc_surface_dcc_cap output;
4739 
4740 	memset(&input, 0, sizeof(input));
4741 	memset(&output, 0, sizeof(output));
4742 
4743 	if (!dcc->enable)
4744 		return 0;
4745 
4746 	if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4747 	    !dc->cap_funcs.get_dcc_compression_cap)
4748 		return -EINVAL;
4749 
4750 	input.format = format;
4751 	input.surface_size.width = plane_size->surface_size.width;
4752 	input.surface_size.height = plane_size->surface_size.height;
4753 	input.swizzle_mode = tiling_info->gfx9.swizzle;
4754 
4755 	if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4756 		input.scan = SCAN_DIRECTION_HORIZONTAL;
4757 	else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4758 		input.scan = SCAN_DIRECTION_VERTICAL;
4759 
4760 	if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4761 		return -EINVAL;
4762 
4763 	if (!output.capable)
4764 		return -EINVAL;
4765 
4766 	if (dcc->independent_64b_blks == 0 &&
4767 	    output.grph.rgb.independent_64b_blks != 0)
4768 		return -EINVAL;
4769 
4770 	return 0;
4771 }
4772 
4773 static bool
4774 modifier_has_dcc(uint64_t modifier)
4775 {
4776 	return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4777 }
4778 
4779 static unsigned
4780 modifier_gfx9_swizzle_mode(uint64_t modifier)
4781 {
4782 	if (modifier == DRM_FORMAT_MOD_LINEAR)
4783 		return 0;
4784 
4785 	return AMD_FMT_MOD_GET(TILE, modifier);
4786 }
4787 
4788 static const struct drm_format_info *
4789 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4790 {
4791 	return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4792 }
4793 
4794 static void
4795 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4796 				    union dc_tiling_info *tiling_info,
4797 				    uint64_t modifier)
4798 {
4799 	unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4800 	unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4801 	unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4802 	unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4803 
4804 	fill_gfx9_tiling_info_from_device(adev, tiling_info);
4805 
4806 	if (!IS_AMD_FMT_MOD(modifier))
4807 		return;
4808 
4809 	tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4810 	tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4811 
4812 	if (adev->family >= AMDGPU_FAMILY_NV) {
4813 		tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4814 	} else {
4815 		tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4816 
4817 		/* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4818 	}
4819 }
4820 
4821 enum dm_micro_swizzle {
4822 	MICRO_SWIZZLE_Z = 0,
4823 	MICRO_SWIZZLE_S = 1,
4824 	MICRO_SWIZZLE_D = 2,
4825 	MICRO_SWIZZLE_R = 3
4826 };
4827 
4828 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4829 					  uint32_t format,
4830 					  uint64_t modifier)
4831 {
4832 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
4833 	const struct drm_format_info *info = drm_format_info(format);
4834 	int i;
4835 
4836 	enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4837 
4838 	if (!info)
4839 		return false;
4840 
4841 	/*
4842 	 * We always have to allow these modifiers:
4843 	 * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4844 	 * 2. Not passing any modifiers is the same as explicitly passing INVALID.
4845 	 */
4846 	if (modifier == DRM_FORMAT_MOD_LINEAR ||
4847 	    modifier == DRM_FORMAT_MOD_INVALID) {
4848 		return true;
4849 	}
4850 
4851 	/* Check that the modifier is on the list of the plane's supported modifiers. */
4852 	for (i = 0; i < plane->modifier_count; i++) {
4853 		if (modifier == plane->modifiers[i])
4854 			break;
4855 	}
4856 	if (i == plane->modifier_count)
4857 		return false;
4858 
4859 	/*
4860 	 * For D swizzle the canonical modifier depends on the bpp, so check
4861 	 * it here.
4862 	 */
4863 	if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4864 	    adev->family >= AMDGPU_FAMILY_NV) {
4865 		if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4866 			return false;
4867 	}
4868 
4869 	if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4870 	    info->cpp[0] < 8)
4871 		return false;
4872 
4873 	if (modifier_has_dcc(modifier)) {
4874 		/* Per radeonsi comments 16/64 bpp are more complicated. */
4875 		if (info->cpp[0] != 4)
4876 			return false;
4877 		/* We support multi-planar formats, but not when combined with
4878 		 * additional DCC metadata planes. */
4879 		if (info->num_planes > 1)
4880 			return false;
4881 	}
4882 
4883 	return true;
4884 }
4885 
4886 static void
4887 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4888 {
4889 	if (!*mods)
4890 		return;
4891 
4892 	if (*cap - *size < 1) {
4893 		uint64_t new_cap = *cap * 2;
4894 		uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4895 
4896 		if (!new_mods) {
4897 			kfree(*mods);
4898 			*mods = NULL;
4899 			return;
4900 		}
4901 
4902 		memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4903 		kfree(*mods);
4904 		*mods = new_mods;
4905 		*cap = new_cap;
4906 	}
4907 
4908 	(*mods)[*size] = mod;
4909 	*size += 1;
4910 }
4911 
4912 static void
4913 add_gfx9_modifiers(const struct amdgpu_device *adev,
4914 		   uint64_t **mods, uint64_t *size, uint64_t *capacity)
4915 {
4916 	int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4917 	int pipe_xor_bits = min(8, pipes +
4918 				ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4919 	int bank_xor_bits = min(8 - pipe_xor_bits,
4920 				ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4921 	int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4922 		 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4923 
4924 
4925 	if (adev->family == AMDGPU_FAMILY_RV) {
4926 		/* Raven2 and later */
4927 		bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4928 
4929 		/*
4930 		 * No _D DCC swizzles yet because we only allow 32bpp, which
4931 		 * doesn't support _D on DCN
4932 		 */
4933 
4934 		if (has_constant_encode) {
4935 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
4936 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4937 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4938 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4939 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4940 				    AMD_FMT_MOD_SET(DCC, 1) |
4941 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4942 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4943 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4944 		}
4945 
4946 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4947 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4948 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4949 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4950 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4951 			    AMD_FMT_MOD_SET(DCC, 1) |
4952 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4953 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4954 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4955 
4956 		if (has_constant_encode) {
4957 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
4958 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4959 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4960 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4961 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4962 				    AMD_FMT_MOD_SET(DCC, 1) |
4963 				    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4964 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4965 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4966 
4967 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4968 				    AMD_FMT_MOD_SET(RB, rb) |
4969 				    AMD_FMT_MOD_SET(PIPE, pipes));
4970 		}
4971 
4972 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4973 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4974 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4975 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4976 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4977 			    AMD_FMT_MOD_SET(DCC, 1) |
4978 			    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4979 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4980 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4981 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4982 			    AMD_FMT_MOD_SET(RB, rb) |
4983 			    AMD_FMT_MOD_SET(PIPE, pipes));
4984 	}
4985 
4986 	/*
4987 	 * Only supported for 64bpp on Raven, will be filtered on format in
4988 	 * dm_plane_format_mod_supported.
4989 	 */
4990 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4991 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4992 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4993 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4994 		    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4995 
4996 	if (adev->family == AMDGPU_FAMILY_RV) {
4997 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4998 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4999 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5000 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5001 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5002 	}
5003 
5004 	/*
5005 	 * Only supported for 64bpp on Raven, will be filtered on format in
5006 	 * dm_plane_format_mod_supported.
5007 	 */
5008 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5009 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5010 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5011 
5012 	if (adev->family == AMDGPU_FAMILY_RV) {
5013 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
5014 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5015 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5016 	}
5017 }
5018 
5019 static void
5020 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
5021 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
5022 {
5023 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5024 
5025 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5026 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5027 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5028 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5029 		    AMD_FMT_MOD_SET(DCC, 1) |
5030 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5031 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5032 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5033 
5034 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5035 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5036 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5037 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5038 		    AMD_FMT_MOD_SET(DCC, 1) |
5039 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5040 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5041 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5042 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5043 
5044 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5045 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5046 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5047 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5048 
5049 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5050 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5051 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5052 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5053 
5054 
5055 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5056 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5057 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5058 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5059 
5060 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5061 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5062 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5063 }
5064 
5065 static void
5066 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
5067 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
5068 {
5069 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5070 	int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
5071 
5072 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5073 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5074 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5075 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5076 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5077 		    AMD_FMT_MOD_SET(DCC, 1) |
5078 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5079 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5080 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5081 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5082 
5083 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5084 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5085 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5086 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5087 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5088 		    AMD_FMT_MOD_SET(DCC, 1) |
5089 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5090 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5091 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5092 
5093 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5094 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5095 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5096 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5097 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5098 		    AMD_FMT_MOD_SET(DCC, 1) |
5099 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5100 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5101 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5102 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5103 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5104 
5105 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5106 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5107 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5108 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5109 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5110 		    AMD_FMT_MOD_SET(DCC, 1) |
5111 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5112 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5113 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5114 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5115 
5116 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5117 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5118 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5119 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5120 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
5121 
5122 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5123 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5124 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5125 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5126 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
5127 
5128 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5129 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5130 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5131 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5132 
5133 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5134 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5135 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5136 }
5137 
5138 static int
5139 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
5140 {
5141 	uint64_t size = 0, capacity = 128;
5142 	*mods = NULL;
5143 
5144 	/* We have not hooked up any pre-GFX9 modifiers. */
5145 	if (adev->family < AMDGPU_FAMILY_AI)
5146 		return 0;
5147 
5148 	*mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
5149 
5150 	if (plane_type == DRM_PLANE_TYPE_CURSOR) {
5151 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5152 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5153 		return *mods ? 0 : -ENOMEM;
5154 	}
5155 
5156 	switch (adev->family) {
5157 	case AMDGPU_FAMILY_AI:
5158 	case AMDGPU_FAMILY_RV:
5159 		add_gfx9_modifiers(adev, mods, &size, &capacity);
5160 		break;
5161 	case AMDGPU_FAMILY_NV:
5162 	case AMDGPU_FAMILY_VGH:
5163 	case AMDGPU_FAMILY_YC:
5164 		if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
5165 			add_gfx10_3_modifiers(adev, mods, &size, &capacity);
5166 		else
5167 			add_gfx10_1_modifiers(adev, mods, &size, &capacity);
5168 		break;
5169 	}
5170 
5171 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5172 
5173 	/* INVALID marks the end of the list. */
5174 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5175 
5176 	if (!*mods)
5177 		return -ENOMEM;
5178 
5179 	return 0;
5180 }
5181 
5182 static int
5183 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
5184 					  const struct amdgpu_framebuffer *afb,
5185 					  const enum surface_pixel_format format,
5186 					  const enum dc_rotation_angle rotation,
5187 					  const struct plane_size *plane_size,
5188 					  union dc_tiling_info *tiling_info,
5189 					  struct dc_plane_dcc_param *dcc,
5190 					  struct dc_plane_address *address,
5191 					  const bool force_disable_dcc)
5192 {
5193 	const uint64_t modifier = afb->base.modifier;
5194 	int ret = 0;
5195 
5196 	fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
5197 	tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
5198 
5199 	if (modifier_has_dcc(modifier) && !force_disable_dcc) {
5200 		uint64_t dcc_address = afb->address + afb->base.offsets[1];
5201 		bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
5202 		bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier);
5203 
5204 		dcc->enable = 1;
5205 		dcc->meta_pitch = afb->base.pitches[1];
5206 		dcc->independent_64b_blks = independent_64b_blks;
5207 		if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) {
5208 			if (independent_64b_blks && independent_128b_blks)
5209 				dcc->dcc_ind_blk = hubp_ind_block_64b_no_128bcl;
5210 			else if (independent_128b_blks)
5211 				dcc->dcc_ind_blk = hubp_ind_block_128b;
5212 			else if (independent_64b_blks && !independent_128b_blks)
5213 				dcc->dcc_ind_blk = hubp_ind_block_64b;
5214 			else
5215 				dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5216 		} else {
5217 			if (independent_64b_blks)
5218 				dcc->dcc_ind_blk = hubp_ind_block_64b;
5219 			else
5220 				dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5221 		}
5222 
5223 		address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
5224 		address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
5225 	}
5226 
5227 	ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
5228 	if (ret)
5229 		drm_dbg_kms(adev_to_drm(adev), "validate_dcc: returned error: %d\n", ret);
5230 
5231 	return ret;
5232 }
5233 
5234 static int
5235 fill_plane_buffer_attributes(struct amdgpu_device *adev,
5236 			     const struct amdgpu_framebuffer *afb,
5237 			     const enum surface_pixel_format format,
5238 			     const enum dc_rotation_angle rotation,
5239 			     const uint64_t tiling_flags,
5240 			     union dc_tiling_info *tiling_info,
5241 			     struct plane_size *plane_size,
5242 			     struct dc_plane_dcc_param *dcc,
5243 			     struct dc_plane_address *address,
5244 			     bool tmz_surface,
5245 			     bool force_disable_dcc)
5246 {
5247 	const struct drm_framebuffer *fb = &afb->base;
5248 	int ret;
5249 
5250 	memset(tiling_info, 0, sizeof(*tiling_info));
5251 	memset(plane_size, 0, sizeof(*plane_size));
5252 	memset(dcc, 0, sizeof(*dcc));
5253 	memset(address, 0, sizeof(*address));
5254 
5255 	address->tmz_surface = tmz_surface;
5256 
5257 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
5258 		uint64_t addr = afb->address + fb->offsets[0];
5259 
5260 		plane_size->surface_size.x = 0;
5261 		plane_size->surface_size.y = 0;
5262 		plane_size->surface_size.width = fb->width;
5263 		plane_size->surface_size.height = fb->height;
5264 		plane_size->surface_pitch =
5265 			fb->pitches[0] / fb->format->cpp[0];
5266 
5267 		address->type = PLN_ADDR_TYPE_GRAPHICS;
5268 		address->grph.addr.low_part = lower_32_bits(addr);
5269 		address->grph.addr.high_part = upper_32_bits(addr);
5270 	} else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
5271 		uint64_t luma_addr = afb->address + fb->offsets[0];
5272 		uint64_t chroma_addr = afb->address + fb->offsets[1];
5273 
5274 		plane_size->surface_size.x = 0;
5275 		plane_size->surface_size.y = 0;
5276 		plane_size->surface_size.width = fb->width;
5277 		plane_size->surface_size.height = fb->height;
5278 		plane_size->surface_pitch =
5279 			fb->pitches[0] / fb->format->cpp[0];
5280 
5281 		plane_size->chroma_size.x = 0;
5282 		plane_size->chroma_size.y = 0;
5283 		/* TODO: set these based on surface format */
5284 		plane_size->chroma_size.width = fb->width / 2;
5285 		plane_size->chroma_size.height = fb->height / 2;
5286 
5287 		plane_size->chroma_pitch =
5288 			fb->pitches[1] / fb->format->cpp[1];
5289 
5290 		address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
5291 		address->video_progressive.luma_addr.low_part =
5292 			lower_32_bits(luma_addr);
5293 		address->video_progressive.luma_addr.high_part =
5294 			upper_32_bits(luma_addr);
5295 		address->video_progressive.chroma_addr.low_part =
5296 			lower_32_bits(chroma_addr);
5297 		address->video_progressive.chroma_addr.high_part =
5298 			upper_32_bits(chroma_addr);
5299 	}
5300 
5301 	if (adev->family >= AMDGPU_FAMILY_AI) {
5302 		ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
5303 								rotation, plane_size,
5304 								tiling_info, dcc,
5305 								address,
5306 								force_disable_dcc);
5307 		if (ret)
5308 			return ret;
5309 	} else {
5310 		fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
5311 	}
5312 
5313 	return 0;
5314 }
5315 
5316 static void
5317 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
5318 			       bool *per_pixel_alpha, bool *global_alpha,
5319 			       int *global_alpha_value)
5320 {
5321 	*per_pixel_alpha = false;
5322 	*global_alpha = false;
5323 	*global_alpha_value = 0xff;
5324 
5325 	if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
5326 		return;
5327 
5328 	if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
5329 		static const uint32_t alpha_formats[] = {
5330 			DRM_FORMAT_ARGB8888,
5331 			DRM_FORMAT_RGBA8888,
5332 			DRM_FORMAT_ABGR8888,
5333 		};
5334 		uint32_t format = plane_state->fb->format->format;
5335 		unsigned int i;
5336 
5337 		for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
5338 			if (format == alpha_formats[i]) {
5339 				*per_pixel_alpha = true;
5340 				break;
5341 			}
5342 		}
5343 	}
5344 
5345 	if (plane_state->alpha < 0xffff) {
5346 		*global_alpha = true;
5347 		*global_alpha_value = plane_state->alpha >> 8;
5348 	}
5349 }
5350 
5351 static int
5352 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
5353 			    const enum surface_pixel_format format,
5354 			    enum dc_color_space *color_space)
5355 {
5356 	bool full_range;
5357 
5358 	*color_space = COLOR_SPACE_SRGB;
5359 
5360 	/* DRM color properties only affect non-RGB formats. */
5361 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
5362 		return 0;
5363 
5364 	full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
5365 
5366 	switch (plane_state->color_encoding) {
5367 	case DRM_COLOR_YCBCR_BT601:
5368 		if (full_range)
5369 			*color_space = COLOR_SPACE_YCBCR601;
5370 		else
5371 			*color_space = COLOR_SPACE_YCBCR601_LIMITED;
5372 		break;
5373 
5374 	case DRM_COLOR_YCBCR_BT709:
5375 		if (full_range)
5376 			*color_space = COLOR_SPACE_YCBCR709;
5377 		else
5378 			*color_space = COLOR_SPACE_YCBCR709_LIMITED;
5379 		break;
5380 
5381 	case DRM_COLOR_YCBCR_BT2020:
5382 		if (full_range)
5383 			*color_space = COLOR_SPACE_2020_YCBCR;
5384 		else
5385 			return -EINVAL;
5386 		break;
5387 
5388 	default:
5389 		return -EINVAL;
5390 	}
5391 
5392 	return 0;
5393 }
5394 
5395 static int
5396 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
5397 			    const struct drm_plane_state *plane_state,
5398 			    const uint64_t tiling_flags,
5399 			    struct dc_plane_info *plane_info,
5400 			    struct dc_plane_address *address,
5401 			    bool tmz_surface,
5402 			    bool force_disable_dcc)
5403 {
5404 	const struct drm_framebuffer *fb = plane_state->fb;
5405 	const struct amdgpu_framebuffer *afb =
5406 		to_amdgpu_framebuffer(plane_state->fb);
5407 	int ret;
5408 
5409 	memset(plane_info, 0, sizeof(*plane_info));
5410 
5411 	switch (fb->format->format) {
5412 	case DRM_FORMAT_C8:
5413 		plane_info->format =
5414 			SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
5415 		break;
5416 	case DRM_FORMAT_RGB565:
5417 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
5418 		break;
5419 	case DRM_FORMAT_XRGB8888:
5420 	case DRM_FORMAT_ARGB8888:
5421 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
5422 		break;
5423 	case DRM_FORMAT_XRGB2101010:
5424 	case DRM_FORMAT_ARGB2101010:
5425 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
5426 		break;
5427 	case DRM_FORMAT_XBGR2101010:
5428 	case DRM_FORMAT_ABGR2101010:
5429 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
5430 		break;
5431 	case DRM_FORMAT_XBGR8888:
5432 	case DRM_FORMAT_ABGR8888:
5433 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
5434 		break;
5435 	case DRM_FORMAT_NV21:
5436 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
5437 		break;
5438 	case DRM_FORMAT_NV12:
5439 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
5440 		break;
5441 	case DRM_FORMAT_P010:
5442 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
5443 		break;
5444 	case DRM_FORMAT_XRGB16161616F:
5445 	case DRM_FORMAT_ARGB16161616F:
5446 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
5447 		break;
5448 	case DRM_FORMAT_XBGR16161616F:
5449 	case DRM_FORMAT_ABGR16161616F:
5450 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
5451 		break;
5452 	case DRM_FORMAT_XRGB16161616:
5453 	case DRM_FORMAT_ARGB16161616:
5454 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
5455 		break;
5456 	case DRM_FORMAT_XBGR16161616:
5457 	case DRM_FORMAT_ABGR16161616:
5458 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
5459 		break;
5460 	default:
5461 		DRM_ERROR(
5462 			"Unsupported screen format %p4cc\n",
5463 			&fb->format->format);
5464 		return -EINVAL;
5465 	}
5466 
5467 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
5468 	case DRM_MODE_ROTATE_0:
5469 		plane_info->rotation = ROTATION_ANGLE_0;
5470 		break;
5471 	case DRM_MODE_ROTATE_90:
5472 		plane_info->rotation = ROTATION_ANGLE_90;
5473 		break;
5474 	case DRM_MODE_ROTATE_180:
5475 		plane_info->rotation = ROTATION_ANGLE_180;
5476 		break;
5477 	case DRM_MODE_ROTATE_270:
5478 		plane_info->rotation = ROTATION_ANGLE_270;
5479 		break;
5480 	default:
5481 		plane_info->rotation = ROTATION_ANGLE_0;
5482 		break;
5483 	}
5484 
5485 	plane_info->visible = true;
5486 	plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
5487 
5488 	plane_info->layer_index = 0;
5489 
5490 	ret = fill_plane_color_attributes(plane_state, plane_info->format,
5491 					  &plane_info->color_space);
5492 	if (ret)
5493 		return ret;
5494 
5495 	ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
5496 					   plane_info->rotation, tiling_flags,
5497 					   &plane_info->tiling_info,
5498 					   &plane_info->plane_size,
5499 					   &plane_info->dcc, address, tmz_surface,
5500 					   force_disable_dcc);
5501 	if (ret)
5502 		return ret;
5503 
5504 	fill_blending_from_plane_state(
5505 		plane_state, &plane_info->per_pixel_alpha,
5506 		&plane_info->global_alpha, &plane_info->global_alpha_value);
5507 
5508 	return 0;
5509 }
5510 
5511 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5512 				    struct dc_plane_state *dc_plane_state,
5513 				    struct drm_plane_state *plane_state,
5514 				    struct drm_crtc_state *crtc_state)
5515 {
5516 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5517 	struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
5518 	struct dc_scaling_info scaling_info;
5519 	struct dc_plane_info plane_info;
5520 	int ret;
5521 	bool force_disable_dcc = false;
5522 
5523 	ret = fill_dc_scaling_info(adev, plane_state, &scaling_info);
5524 	if (ret)
5525 		return ret;
5526 
5527 	dc_plane_state->src_rect = scaling_info.src_rect;
5528 	dc_plane_state->dst_rect = scaling_info.dst_rect;
5529 	dc_plane_state->clip_rect = scaling_info.clip_rect;
5530 	dc_plane_state->scaling_quality = scaling_info.scaling_quality;
5531 
5532 	force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5533 	ret = fill_dc_plane_info_and_addr(adev, plane_state,
5534 					  afb->tiling_flags,
5535 					  &plane_info,
5536 					  &dc_plane_state->address,
5537 					  afb->tmz_surface,
5538 					  force_disable_dcc);
5539 	if (ret)
5540 		return ret;
5541 
5542 	dc_plane_state->format = plane_info.format;
5543 	dc_plane_state->color_space = plane_info.color_space;
5544 	dc_plane_state->format = plane_info.format;
5545 	dc_plane_state->plane_size = plane_info.plane_size;
5546 	dc_plane_state->rotation = plane_info.rotation;
5547 	dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5548 	dc_plane_state->stereo_format = plane_info.stereo_format;
5549 	dc_plane_state->tiling_info = plane_info.tiling_info;
5550 	dc_plane_state->visible = plane_info.visible;
5551 	dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5552 	dc_plane_state->global_alpha = plane_info.global_alpha;
5553 	dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5554 	dc_plane_state->dcc = plane_info.dcc;
5555 	dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
5556 	dc_plane_state->flip_int_enabled = true;
5557 
5558 	/*
5559 	 * Always set input transfer function, since plane state is refreshed
5560 	 * every time.
5561 	 */
5562 	ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5563 	if (ret)
5564 		return ret;
5565 
5566 	return 0;
5567 }
5568 
5569 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5570 					   const struct dm_connector_state *dm_state,
5571 					   struct dc_stream_state *stream)
5572 {
5573 	enum amdgpu_rmx_type rmx_type;
5574 
5575 	struct rect src = { 0 }; /* viewport in composition space*/
5576 	struct rect dst = { 0 }; /* stream addressable area */
5577 
5578 	/* no mode. nothing to be done */
5579 	if (!mode)
5580 		return;
5581 
5582 	/* Full screen scaling by default */
5583 	src.width = mode->hdisplay;
5584 	src.height = mode->vdisplay;
5585 	dst.width = stream->timing.h_addressable;
5586 	dst.height = stream->timing.v_addressable;
5587 
5588 	if (dm_state) {
5589 		rmx_type = dm_state->scaling;
5590 		if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5591 			if (src.width * dst.height <
5592 					src.height * dst.width) {
5593 				/* height needs less upscaling/more downscaling */
5594 				dst.width = src.width *
5595 						dst.height / src.height;
5596 			} else {
5597 				/* width needs less upscaling/more downscaling */
5598 				dst.height = src.height *
5599 						dst.width / src.width;
5600 			}
5601 		} else if (rmx_type == RMX_CENTER) {
5602 			dst = src;
5603 		}
5604 
5605 		dst.x = (stream->timing.h_addressable - dst.width) / 2;
5606 		dst.y = (stream->timing.v_addressable - dst.height) / 2;
5607 
5608 		if (dm_state->underscan_enable) {
5609 			dst.x += dm_state->underscan_hborder / 2;
5610 			dst.y += dm_state->underscan_vborder / 2;
5611 			dst.width -= dm_state->underscan_hborder;
5612 			dst.height -= dm_state->underscan_vborder;
5613 		}
5614 	}
5615 
5616 	stream->src = src;
5617 	stream->dst = dst;
5618 
5619 	DRM_DEBUG_KMS("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
5620 		      dst.x, dst.y, dst.width, dst.height);
5621 
5622 }
5623 
5624 static enum dc_color_depth
5625 convert_color_depth_from_display_info(const struct drm_connector *connector,
5626 				      bool is_y420, int requested_bpc)
5627 {
5628 	uint8_t bpc;
5629 
5630 	if (is_y420) {
5631 		bpc = 8;
5632 
5633 		/* Cap display bpc based on HDMI 2.0 HF-VSDB */
5634 		if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5635 			bpc = 16;
5636 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5637 			bpc = 12;
5638 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5639 			bpc = 10;
5640 	} else {
5641 		bpc = (uint8_t)connector->display_info.bpc;
5642 		/* Assume 8 bpc by default if no bpc is specified. */
5643 		bpc = bpc ? bpc : 8;
5644 	}
5645 
5646 	if (requested_bpc > 0) {
5647 		/*
5648 		 * Cap display bpc based on the user requested value.
5649 		 *
5650 		 * The value for state->max_bpc may not correctly updated
5651 		 * depending on when the connector gets added to the state
5652 		 * or if this was called outside of atomic check, so it
5653 		 * can't be used directly.
5654 		 */
5655 		bpc = min_t(u8, bpc, requested_bpc);
5656 
5657 		/* Round down to the nearest even number. */
5658 		bpc = bpc - (bpc & 1);
5659 	}
5660 
5661 	switch (bpc) {
5662 	case 0:
5663 		/*
5664 		 * Temporary Work around, DRM doesn't parse color depth for
5665 		 * EDID revision before 1.4
5666 		 * TODO: Fix edid parsing
5667 		 */
5668 		return COLOR_DEPTH_888;
5669 	case 6:
5670 		return COLOR_DEPTH_666;
5671 	case 8:
5672 		return COLOR_DEPTH_888;
5673 	case 10:
5674 		return COLOR_DEPTH_101010;
5675 	case 12:
5676 		return COLOR_DEPTH_121212;
5677 	case 14:
5678 		return COLOR_DEPTH_141414;
5679 	case 16:
5680 		return COLOR_DEPTH_161616;
5681 	default:
5682 		return COLOR_DEPTH_UNDEFINED;
5683 	}
5684 }
5685 
5686 static enum dc_aspect_ratio
5687 get_aspect_ratio(const struct drm_display_mode *mode_in)
5688 {
5689 	/* 1-1 mapping, since both enums follow the HDMI spec. */
5690 	return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5691 }
5692 
5693 static enum dc_color_space
5694 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5695 {
5696 	enum dc_color_space color_space = COLOR_SPACE_SRGB;
5697 
5698 	switch (dc_crtc_timing->pixel_encoding)	{
5699 	case PIXEL_ENCODING_YCBCR422:
5700 	case PIXEL_ENCODING_YCBCR444:
5701 	case PIXEL_ENCODING_YCBCR420:
5702 	{
5703 		/*
5704 		 * 27030khz is the separation point between HDTV and SDTV
5705 		 * according to HDMI spec, we use YCbCr709 and YCbCr601
5706 		 * respectively
5707 		 */
5708 		if (dc_crtc_timing->pix_clk_100hz > 270300) {
5709 			if (dc_crtc_timing->flags.Y_ONLY)
5710 				color_space =
5711 					COLOR_SPACE_YCBCR709_LIMITED;
5712 			else
5713 				color_space = COLOR_SPACE_YCBCR709;
5714 		} else {
5715 			if (dc_crtc_timing->flags.Y_ONLY)
5716 				color_space =
5717 					COLOR_SPACE_YCBCR601_LIMITED;
5718 			else
5719 				color_space = COLOR_SPACE_YCBCR601;
5720 		}
5721 
5722 	}
5723 	break;
5724 	case PIXEL_ENCODING_RGB:
5725 		color_space = COLOR_SPACE_SRGB;
5726 		break;
5727 
5728 	default:
5729 		WARN_ON(1);
5730 		break;
5731 	}
5732 
5733 	return color_space;
5734 }
5735 
5736 static bool adjust_colour_depth_from_display_info(
5737 	struct dc_crtc_timing *timing_out,
5738 	const struct drm_display_info *info)
5739 {
5740 	enum dc_color_depth depth = timing_out->display_color_depth;
5741 	int normalized_clk;
5742 	do {
5743 		normalized_clk = timing_out->pix_clk_100hz / 10;
5744 		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5745 		if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5746 			normalized_clk /= 2;
5747 		/* Adjusting pix clock following on HDMI spec based on colour depth */
5748 		switch (depth) {
5749 		case COLOR_DEPTH_888:
5750 			break;
5751 		case COLOR_DEPTH_101010:
5752 			normalized_clk = (normalized_clk * 30) / 24;
5753 			break;
5754 		case COLOR_DEPTH_121212:
5755 			normalized_clk = (normalized_clk * 36) / 24;
5756 			break;
5757 		case COLOR_DEPTH_161616:
5758 			normalized_clk = (normalized_clk * 48) / 24;
5759 			break;
5760 		default:
5761 			/* The above depths are the only ones valid for HDMI. */
5762 			return false;
5763 		}
5764 		if (normalized_clk <= info->max_tmds_clock) {
5765 			timing_out->display_color_depth = depth;
5766 			return true;
5767 		}
5768 	} while (--depth > COLOR_DEPTH_666);
5769 	return false;
5770 }
5771 
5772 static void fill_stream_properties_from_drm_display_mode(
5773 	struct dc_stream_state *stream,
5774 	const struct drm_display_mode *mode_in,
5775 	const struct drm_connector *connector,
5776 	const struct drm_connector_state *connector_state,
5777 	const struct dc_stream_state *old_stream,
5778 	int requested_bpc)
5779 {
5780 	struct dc_crtc_timing *timing_out = &stream->timing;
5781 	const struct drm_display_info *info = &connector->display_info;
5782 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5783 	struct hdmi_vendor_infoframe hv_frame;
5784 	struct hdmi_avi_infoframe avi_frame;
5785 
5786 	memset(&hv_frame, 0, sizeof(hv_frame));
5787 	memset(&avi_frame, 0, sizeof(avi_frame));
5788 
5789 	timing_out->h_border_left = 0;
5790 	timing_out->h_border_right = 0;
5791 	timing_out->v_border_top = 0;
5792 	timing_out->v_border_bottom = 0;
5793 	/* TODO: un-hardcode */
5794 	if (drm_mode_is_420_only(info, mode_in)
5795 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5796 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5797 	else if (drm_mode_is_420_also(info, mode_in)
5798 			&& aconnector->force_yuv420_output)
5799 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5800 	else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
5801 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5802 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5803 	else
5804 		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5805 
5806 	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5807 	timing_out->display_color_depth = convert_color_depth_from_display_info(
5808 		connector,
5809 		(timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5810 		requested_bpc);
5811 	timing_out->scan_type = SCANNING_TYPE_NODATA;
5812 	timing_out->hdmi_vic = 0;
5813 
5814 	if(old_stream) {
5815 		timing_out->vic = old_stream->timing.vic;
5816 		timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5817 		timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5818 	} else {
5819 		timing_out->vic = drm_match_cea_mode(mode_in);
5820 		if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5821 			timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5822 		if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5823 			timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5824 	}
5825 
5826 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5827 		drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5828 		timing_out->vic = avi_frame.video_code;
5829 		drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5830 		timing_out->hdmi_vic = hv_frame.vic;
5831 	}
5832 
5833 	if (is_freesync_video_mode(mode_in, aconnector)) {
5834 		timing_out->h_addressable = mode_in->hdisplay;
5835 		timing_out->h_total = mode_in->htotal;
5836 		timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5837 		timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5838 		timing_out->v_total = mode_in->vtotal;
5839 		timing_out->v_addressable = mode_in->vdisplay;
5840 		timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5841 		timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5842 		timing_out->pix_clk_100hz = mode_in->clock * 10;
5843 	} else {
5844 		timing_out->h_addressable = mode_in->crtc_hdisplay;
5845 		timing_out->h_total = mode_in->crtc_htotal;
5846 		timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5847 		timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5848 		timing_out->v_total = mode_in->crtc_vtotal;
5849 		timing_out->v_addressable = mode_in->crtc_vdisplay;
5850 		timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5851 		timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5852 		timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5853 	}
5854 
5855 	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5856 
5857 	stream->output_color_space = get_output_color_space(timing_out);
5858 
5859 	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5860 	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5861 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5862 		if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5863 		    drm_mode_is_420_also(info, mode_in) &&
5864 		    timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5865 			timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5866 			adjust_colour_depth_from_display_info(timing_out, info);
5867 		}
5868 	}
5869 }
5870 
5871 static void fill_audio_info(struct audio_info *audio_info,
5872 			    const struct drm_connector *drm_connector,
5873 			    const struct dc_sink *dc_sink)
5874 {
5875 	int i = 0;
5876 	int cea_revision = 0;
5877 	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5878 
5879 	audio_info->manufacture_id = edid_caps->manufacturer_id;
5880 	audio_info->product_id = edid_caps->product_id;
5881 
5882 	cea_revision = drm_connector->display_info.cea_rev;
5883 
5884 	strscpy(audio_info->display_name,
5885 		edid_caps->display_name,
5886 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5887 
5888 	if (cea_revision >= 3) {
5889 		audio_info->mode_count = edid_caps->audio_mode_count;
5890 
5891 		for (i = 0; i < audio_info->mode_count; ++i) {
5892 			audio_info->modes[i].format_code =
5893 					(enum audio_format_code)
5894 					(edid_caps->audio_modes[i].format_code);
5895 			audio_info->modes[i].channel_count =
5896 					edid_caps->audio_modes[i].channel_count;
5897 			audio_info->modes[i].sample_rates.all =
5898 					edid_caps->audio_modes[i].sample_rate;
5899 			audio_info->modes[i].sample_size =
5900 					edid_caps->audio_modes[i].sample_size;
5901 		}
5902 	}
5903 
5904 	audio_info->flags.all = edid_caps->speaker_flags;
5905 
5906 	/* TODO: We only check for the progressive mode, check for interlace mode too */
5907 	if (drm_connector->latency_present[0]) {
5908 		audio_info->video_latency = drm_connector->video_latency[0];
5909 		audio_info->audio_latency = drm_connector->audio_latency[0];
5910 	}
5911 
5912 	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5913 
5914 }
5915 
5916 static void
5917 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5918 				      struct drm_display_mode *dst_mode)
5919 {
5920 	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5921 	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5922 	dst_mode->crtc_clock = src_mode->crtc_clock;
5923 	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5924 	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5925 	dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
5926 	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5927 	dst_mode->crtc_htotal = src_mode->crtc_htotal;
5928 	dst_mode->crtc_hskew = src_mode->crtc_hskew;
5929 	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5930 	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5931 	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5932 	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5933 	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5934 }
5935 
5936 static void
5937 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5938 					const struct drm_display_mode *native_mode,
5939 					bool scale_enabled)
5940 {
5941 	if (scale_enabled) {
5942 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5943 	} else if (native_mode->clock == drm_mode->clock &&
5944 			native_mode->htotal == drm_mode->htotal &&
5945 			native_mode->vtotal == drm_mode->vtotal) {
5946 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5947 	} else {
5948 		/* no scaling nor amdgpu inserted, no need to patch */
5949 	}
5950 }
5951 
5952 static struct dc_sink *
5953 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5954 {
5955 	struct dc_sink_init_data sink_init_data = { 0 };
5956 	struct dc_sink *sink = NULL;
5957 	sink_init_data.link = aconnector->dc_link;
5958 	sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5959 
5960 	sink = dc_sink_create(&sink_init_data);
5961 	if (!sink) {
5962 		DRM_ERROR("Failed to create sink!\n");
5963 		return NULL;
5964 	}
5965 	sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5966 
5967 	return sink;
5968 }
5969 
5970 static void set_multisync_trigger_params(
5971 		struct dc_stream_state *stream)
5972 {
5973 	struct dc_stream_state *master = NULL;
5974 
5975 	if (stream->triggered_crtc_reset.enabled) {
5976 		master = stream->triggered_crtc_reset.event_source;
5977 		stream->triggered_crtc_reset.event =
5978 			master->timing.flags.VSYNC_POSITIVE_POLARITY ?
5979 			CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
5980 		stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
5981 	}
5982 }
5983 
5984 static void set_master_stream(struct dc_stream_state *stream_set[],
5985 			      int stream_count)
5986 {
5987 	int j, highest_rfr = 0, master_stream = 0;
5988 
5989 	for (j = 0;  j < stream_count; j++) {
5990 		if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5991 			int refresh_rate = 0;
5992 
5993 			refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5994 				(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5995 			if (refresh_rate > highest_rfr) {
5996 				highest_rfr = refresh_rate;
5997 				master_stream = j;
5998 			}
5999 		}
6000 	}
6001 	for (j = 0;  j < stream_count; j++) {
6002 		if (stream_set[j])
6003 			stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
6004 	}
6005 }
6006 
6007 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
6008 {
6009 	int i = 0;
6010 	struct dc_stream_state *stream;
6011 
6012 	if (context->stream_count < 2)
6013 		return;
6014 	for (i = 0; i < context->stream_count ; i++) {
6015 		if (!context->streams[i])
6016 			continue;
6017 		/*
6018 		 * TODO: add a function to read AMD VSDB bits and set
6019 		 * crtc_sync_master.multi_sync_enabled flag
6020 		 * For now it's set to false
6021 		 */
6022 	}
6023 
6024 	set_master_stream(context->streams, context->stream_count);
6025 
6026 	for (i = 0; i < context->stream_count ; i++) {
6027 		stream = context->streams[i];
6028 
6029 		if (!stream)
6030 			continue;
6031 
6032 		set_multisync_trigger_params(stream);
6033 	}
6034 }
6035 
6036 #if defined(CONFIG_DRM_AMD_DC_DCN)
6037 static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
6038 							struct dc_sink *sink, struct dc_stream_state *stream,
6039 							struct dsc_dec_dpcd_caps *dsc_caps)
6040 {
6041 	stream->timing.flags.DSC = 0;
6042 
6043 	if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
6044 		sink->sink_signal == SIGNAL_TYPE_EDP)) {
6045 		dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
6046 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
6047 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
6048 				      dsc_caps);
6049 	}
6050 }
6051 
6052 static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
6053 				    struct dc_sink *sink, struct dc_stream_state *stream,
6054 				    struct dsc_dec_dpcd_caps *dsc_caps,
6055 				    uint32_t max_dsc_target_bpp_limit_override)
6056 {
6057 	const struct dc_link_settings *verified_link_cap = NULL;
6058 	uint32_t link_bw_in_kbps;
6059 	uint32_t edp_min_bpp_x16, edp_max_bpp_x16;
6060 	struct dc *dc = sink->ctx->dc;
6061 	struct dc_dsc_bw_range bw_range = {0};
6062 	struct dc_dsc_config dsc_cfg = {0};
6063 
6064 	verified_link_cap = dc_link_get_link_cap(stream->link);
6065 	link_bw_in_kbps = dc_link_bandwidth_kbps(stream->link, verified_link_cap);
6066 	edp_min_bpp_x16 = 8 * 16;
6067 	edp_max_bpp_x16 = 8 * 16;
6068 
6069 	if (edp_max_bpp_x16 > dsc_caps->edp_max_bits_per_pixel)
6070 		edp_max_bpp_x16 = dsc_caps->edp_max_bits_per_pixel;
6071 
6072 	if (edp_max_bpp_x16 < edp_min_bpp_x16)
6073 		edp_min_bpp_x16 = edp_max_bpp_x16;
6074 
6075 	if (dc_dsc_compute_bandwidth_range(dc->res_pool->dscs[0],
6076 				dc->debug.dsc_min_slice_height_override,
6077 				edp_min_bpp_x16, edp_max_bpp_x16,
6078 				dsc_caps,
6079 				&stream->timing,
6080 				&bw_range)) {
6081 
6082 		if (bw_range.max_kbps < link_bw_in_kbps) {
6083 			if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6084 					dsc_caps,
6085 					dc->debug.dsc_min_slice_height_override,
6086 					max_dsc_target_bpp_limit_override,
6087 					0,
6088 					&stream->timing,
6089 					&dsc_cfg)) {
6090 				stream->timing.dsc_cfg = dsc_cfg;
6091 				stream->timing.flags.DSC = 1;
6092 				stream->timing.dsc_cfg.bits_per_pixel = edp_max_bpp_x16;
6093 			}
6094 			return;
6095 		}
6096 	}
6097 
6098 	if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6099 				dsc_caps,
6100 				dc->debug.dsc_min_slice_height_override,
6101 				max_dsc_target_bpp_limit_override,
6102 				link_bw_in_kbps,
6103 				&stream->timing,
6104 				&dsc_cfg)) {
6105 		stream->timing.dsc_cfg = dsc_cfg;
6106 		stream->timing.flags.DSC = 1;
6107 	}
6108 }
6109 
6110 static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
6111 										struct dc_sink *sink, struct dc_stream_state *stream,
6112 										struct dsc_dec_dpcd_caps *dsc_caps)
6113 {
6114 	struct drm_connector *drm_connector = &aconnector->base;
6115 	uint32_t link_bandwidth_kbps;
6116 	uint32_t max_dsc_target_bpp_limit_override = 0;
6117 	struct dc *dc = sink->ctx->dc;
6118 
6119 	link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
6120 							dc_link_get_link_cap(aconnector->dc_link));
6121 
6122 	if (stream->link && stream->link->local_sink)
6123 		max_dsc_target_bpp_limit_override =
6124 			stream->link->local_sink->edid_caps.panel_patch.max_dsc_target_bpp_limit;
6125 
6126 	/* Set DSC policy according to dsc_clock_en */
6127 	dc_dsc_policy_set_enable_dsc_when_not_needed(
6128 		aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
6129 
6130 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP && !dc->debug.disable_dsc_edp &&
6131 	    dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) {
6132 
6133 		apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override);
6134 
6135 	} else if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6136 
6137 		if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6138 						dsc_caps,
6139 						aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
6140 						max_dsc_target_bpp_limit_override,
6141 						link_bandwidth_kbps,
6142 						&stream->timing,
6143 						&stream->timing.dsc_cfg)) {
6144 			stream->timing.flags.DSC = 1;
6145 			DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", __func__, drm_connector->name);
6146 		}
6147 	}
6148 
6149 	/* Overwrite the stream flag if DSC is enabled through debugfs */
6150 	if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
6151 		stream->timing.flags.DSC = 1;
6152 
6153 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
6154 		stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
6155 
6156 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
6157 		stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
6158 
6159 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
6160 		stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
6161 }
6162 #endif /* CONFIG_DRM_AMD_DC_DCN */
6163 
6164 /**
6165  * DOC: FreeSync Video
6166  *
6167  * When a userspace application wants to play a video, the content follows a
6168  * standard format definition that usually specifies the FPS for that format.
6169  * The below list illustrates some video format and the expected FPS,
6170  * respectively:
6171  *
6172  * - TV/NTSC (23.976 FPS)
6173  * - Cinema (24 FPS)
6174  * - TV/PAL (25 FPS)
6175  * - TV/NTSC (29.97 FPS)
6176  * - TV/NTSC (30 FPS)
6177  * - Cinema HFR (48 FPS)
6178  * - TV/PAL (50 FPS)
6179  * - Commonly used (60 FPS)
6180  * - Multiples of 24 (48,72,96,120 FPS)
6181  *
6182  * The list of standards video format is not huge and can be added to the
6183  * connector modeset list beforehand. With that, userspace can leverage
6184  * FreeSync to extends the front porch in order to attain the target refresh
6185  * rate. Such a switch will happen seamlessly, without screen blanking or
6186  * reprogramming of the output in any other way. If the userspace requests a
6187  * modesetting change compatible with FreeSync modes that only differ in the
6188  * refresh rate, DC will skip the full update and avoid blink during the
6189  * transition. For example, the video player can change the modesetting from
6190  * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
6191  * causing any display blink. This same concept can be applied to a mode
6192  * setting change.
6193  */
6194 static struct drm_display_mode *
6195 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
6196 			  bool use_probed_modes)
6197 {
6198 	struct drm_display_mode *m, *m_pref = NULL;
6199 	u16 current_refresh, highest_refresh;
6200 	struct list_head *list_head = use_probed_modes ?
6201 						    &aconnector->base.probed_modes :
6202 						    &aconnector->base.modes;
6203 
6204 	if (aconnector->freesync_vid_base.clock != 0)
6205 		return &aconnector->freesync_vid_base;
6206 
6207 	/* Find the preferred mode */
6208 	list_for_each_entry (m, list_head, head) {
6209 		if (m->type & DRM_MODE_TYPE_PREFERRED) {
6210 			m_pref = m;
6211 			break;
6212 		}
6213 	}
6214 
6215 	if (!m_pref) {
6216 		/* Probably an EDID with no preferred mode. Fallback to first entry */
6217 		m_pref = list_first_entry_or_null(
6218 			&aconnector->base.modes, struct drm_display_mode, head);
6219 		if (!m_pref) {
6220 			DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
6221 			return NULL;
6222 		}
6223 	}
6224 
6225 	highest_refresh = drm_mode_vrefresh(m_pref);
6226 
6227 	/*
6228 	 * Find the mode with highest refresh rate with same resolution.
6229 	 * For some monitors, preferred mode is not the mode with highest
6230 	 * supported refresh rate.
6231 	 */
6232 	list_for_each_entry (m, list_head, head) {
6233 		current_refresh  = drm_mode_vrefresh(m);
6234 
6235 		if (m->hdisplay == m_pref->hdisplay &&
6236 		    m->vdisplay == m_pref->vdisplay &&
6237 		    highest_refresh < current_refresh) {
6238 			highest_refresh = current_refresh;
6239 			m_pref = m;
6240 		}
6241 	}
6242 
6243 	aconnector->freesync_vid_base = *m_pref;
6244 	return m_pref;
6245 }
6246 
6247 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
6248 				   struct amdgpu_dm_connector *aconnector)
6249 {
6250 	struct drm_display_mode *high_mode;
6251 	int timing_diff;
6252 
6253 	high_mode = get_highest_refresh_rate_mode(aconnector, false);
6254 	if (!high_mode || !mode)
6255 		return false;
6256 
6257 	timing_diff = high_mode->vtotal - mode->vtotal;
6258 
6259 	if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
6260 	    high_mode->hdisplay != mode->hdisplay ||
6261 	    high_mode->vdisplay != mode->vdisplay ||
6262 	    high_mode->hsync_start != mode->hsync_start ||
6263 	    high_mode->hsync_end != mode->hsync_end ||
6264 	    high_mode->htotal != mode->htotal ||
6265 	    high_mode->hskew != mode->hskew ||
6266 	    high_mode->vscan != mode->vscan ||
6267 	    high_mode->vsync_start - mode->vsync_start != timing_diff ||
6268 	    high_mode->vsync_end - mode->vsync_end != timing_diff)
6269 		return false;
6270 	else
6271 		return true;
6272 }
6273 
6274 static struct dc_stream_state *
6275 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6276 		       const struct drm_display_mode *drm_mode,
6277 		       const struct dm_connector_state *dm_state,
6278 		       const struct dc_stream_state *old_stream,
6279 		       int requested_bpc)
6280 {
6281 	struct drm_display_mode *preferred_mode = NULL;
6282 	struct drm_connector *drm_connector;
6283 	const struct drm_connector_state *con_state =
6284 		dm_state ? &dm_state->base : NULL;
6285 	struct dc_stream_state *stream = NULL;
6286 	struct drm_display_mode mode = *drm_mode;
6287 	struct drm_display_mode saved_mode;
6288 	struct drm_display_mode *freesync_mode = NULL;
6289 	bool native_mode_found = false;
6290 	bool recalculate_timing = false;
6291 	bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
6292 	int mode_refresh;
6293 	int preferred_refresh = 0;
6294 #if defined(CONFIG_DRM_AMD_DC_DCN)
6295 	struct dsc_dec_dpcd_caps dsc_caps;
6296 #endif
6297 	struct dc_sink *sink = NULL;
6298 
6299 	memset(&saved_mode, 0, sizeof(saved_mode));
6300 
6301 	if (aconnector == NULL) {
6302 		DRM_ERROR("aconnector is NULL!\n");
6303 		return stream;
6304 	}
6305 
6306 	drm_connector = &aconnector->base;
6307 
6308 	if (!aconnector->dc_sink) {
6309 		sink = create_fake_sink(aconnector);
6310 		if (!sink)
6311 			return stream;
6312 	} else {
6313 		sink = aconnector->dc_sink;
6314 		dc_sink_retain(sink);
6315 	}
6316 
6317 	stream = dc_create_stream_for_sink(sink);
6318 
6319 	if (stream == NULL) {
6320 		DRM_ERROR("Failed to create stream for sink!\n");
6321 		goto finish;
6322 	}
6323 
6324 	stream->dm_stream_context = aconnector;
6325 
6326 	stream->timing.flags.LTE_340MCSC_SCRAMBLE =
6327 		drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
6328 
6329 	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
6330 		/* Search for preferred mode */
6331 		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
6332 			native_mode_found = true;
6333 			break;
6334 		}
6335 	}
6336 	if (!native_mode_found)
6337 		preferred_mode = list_first_entry_or_null(
6338 				&aconnector->base.modes,
6339 				struct drm_display_mode,
6340 				head);
6341 
6342 	mode_refresh = drm_mode_vrefresh(&mode);
6343 
6344 	if (preferred_mode == NULL) {
6345 		/*
6346 		 * This may not be an error, the use case is when we have no
6347 		 * usermode calls to reset and set mode upon hotplug. In this
6348 		 * case, we call set mode ourselves to restore the previous mode
6349 		 * and the modelist may not be filled in in time.
6350 		 */
6351 		DRM_DEBUG_DRIVER("No preferred mode found\n");
6352 	} else {
6353 		recalculate_timing = amdgpu_freesync_vid_mode &&
6354 				 is_freesync_video_mode(&mode, aconnector);
6355 		if (recalculate_timing) {
6356 			freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
6357 			saved_mode = mode;
6358 			mode = *freesync_mode;
6359 		} else {
6360 			decide_crtc_timing_for_drm_display_mode(
6361 				&mode, preferred_mode, scale);
6362 
6363 			preferred_refresh = drm_mode_vrefresh(preferred_mode);
6364 		}
6365 	}
6366 
6367 	if (recalculate_timing)
6368 		drm_mode_set_crtcinfo(&saved_mode, 0);
6369 	else if (!dm_state)
6370 		drm_mode_set_crtcinfo(&mode, 0);
6371 
6372        /*
6373 	* If scaling is enabled and refresh rate didn't change
6374 	* we copy the vic and polarities of the old timings
6375 	*/
6376 	if (!scale || mode_refresh != preferred_refresh)
6377 		fill_stream_properties_from_drm_display_mode(
6378 			stream, &mode, &aconnector->base, con_state, NULL,
6379 			requested_bpc);
6380 	else
6381 		fill_stream_properties_from_drm_display_mode(
6382 			stream, &mode, &aconnector->base, con_state, old_stream,
6383 			requested_bpc);
6384 
6385 #if defined(CONFIG_DRM_AMD_DC_DCN)
6386 	/* SST DSC determination policy */
6387 	update_dsc_caps(aconnector, sink, stream, &dsc_caps);
6388 	if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
6389 		apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
6390 #endif
6391 
6392 	update_stream_scaling_settings(&mode, dm_state, stream);
6393 
6394 	fill_audio_info(
6395 		&stream->audio_info,
6396 		drm_connector,
6397 		sink);
6398 
6399 	update_stream_signal(stream, sink);
6400 
6401 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
6402 		mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
6403 
6404 	if (stream->link->psr_settings.psr_feature_enabled) {
6405 		//
6406 		// should decide stream support vsc sdp colorimetry capability
6407 		// before building vsc info packet
6408 		//
6409 		stream->use_vsc_sdp_for_colorimetry = false;
6410 		if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
6411 			stream->use_vsc_sdp_for_colorimetry =
6412 				aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
6413 		} else {
6414 			if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
6415 				stream->use_vsc_sdp_for_colorimetry = true;
6416 		}
6417 		mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
6418 		aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
6419 
6420 	}
6421 finish:
6422 	dc_sink_release(sink);
6423 
6424 	return stream;
6425 }
6426 
6427 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
6428 {
6429 	drm_crtc_cleanup(crtc);
6430 	kfree(crtc);
6431 }
6432 
6433 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
6434 				  struct drm_crtc_state *state)
6435 {
6436 	struct dm_crtc_state *cur = to_dm_crtc_state(state);
6437 
6438 	/* TODO Destroy dc_stream objects are stream object is flattened */
6439 	if (cur->stream)
6440 		dc_stream_release(cur->stream);
6441 
6442 
6443 	__drm_atomic_helper_crtc_destroy_state(state);
6444 
6445 
6446 	kfree(state);
6447 }
6448 
6449 static void dm_crtc_reset_state(struct drm_crtc *crtc)
6450 {
6451 	struct dm_crtc_state *state;
6452 
6453 	if (crtc->state)
6454 		dm_crtc_destroy_state(crtc, crtc->state);
6455 
6456 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6457 	if (WARN_ON(!state))
6458 		return;
6459 
6460 	__drm_atomic_helper_crtc_reset(crtc, &state->base);
6461 }
6462 
6463 static struct drm_crtc_state *
6464 dm_crtc_duplicate_state(struct drm_crtc *crtc)
6465 {
6466 	struct dm_crtc_state *state, *cur;
6467 
6468 	cur = to_dm_crtc_state(crtc->state);
6469 
6470 	if (WARN_ON(!crtc->state))
6471 		return NULL;
6472 
6473 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6474 	if (!state)
6475 		return NULL;
6476 
6477 	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
6478 
6479 	if (cur->stream) {
6480 		state->stream = cur->stream;
6481 		dc_stream_retain(state->stream);
6482 	}
6483 
6484 	state->active_planes = cur->active_planes;
6485 	state->vrr_infopacket = cur->vrr_infopacket;
6486 	state->abm_level = cur->abm_level;
6487 	state->vrr_supported = cur->vrr_supported;
6488 	state->freesync_config = cur->freesync_config;
6489 	state->cm_has_degamma = cur->cm_has_degamma;
6490 	state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
6491 	state->force_dpms_off = cur->force_dpms_off;
6492 	/* TODO Duplicate dc_stream after objects are stream object is flattened */
6493 
6494 	return &state->base;
6495 }
6496 
6497 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
6498 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
6499 {
6500 	crtc_debugfs_init(crtc);
6501 
6502 	return 0;
6503 }
6504 #endif
6505 
6506 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
6507 {
6508 	enum dc_irq_source irq_source;
6509 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6510 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6511 	int rc;
6512 
6513 	irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
6514 
6515 	rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
6516 
6517 	DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
6518 		      acrtc->crtc_id, enable ? "en" : "dis", rc);
6519 	return rc;
6520 }
6521 
6522 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
6523 {
6524 	enum dc_irq_source irq_source;
6525 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6526 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6527 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
6528 #if defined(CONFIG_DRM_AMD_DC_DCN)
6529 	struct amdgpu_display_manager *dm = &adev->dm;
6530 	struct vblank_control_work *work;
6531 #endif
6532 	int rc = 0;
6533 
6534 	if (enable) {
6535 		/* vblank irq on -> Only need vupdate irq in vrr mode */
6536 		if (amdgpu_dm_vrr_active(acrtc_state))
6537 			rc = dm_set_vupdate_irq(crtc, true);
6538 	} else {
6539 		/* vblank irq off -> vupdate irq off */
6540 		rc = dm_set_vupdate_irq(crtc, false);
6541 	}
6542 
6543 	if (rc)
6544 		return rc;
6545 
6546 	irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
6547 
6548 	if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
6549 		return -EBUSY;
6550 
6551 	if (amdgpu_in_reset(adev))
6552 		return 0;
6553 
6554 #if defined(CONFIG_DRM_AMD_DC_DCN)
6555 	if (dm->vblank_control_workqueue) {
6556 		work = kzalloc(sizeof(*work), GFP_ATOMIC);
6557 		if (!work)
6558 			return -ENOMEM;
6559 
6560 		INIT_WORK(&work->work, vblank_control_worker);
6561 		work->dm = dm;
6562 		work->acrtc = acrtc;
6563 		work->enable = enable;
6564 
6565 		if (acrtc_state->stream) {
6566 			dc_stream_retain(acrtc_state->stream);
6567 			work->stream = acrtc_state->stream;
6568 		}
6569 
6570 		queue_work(dm->vblank_control_workqueue, &work->work);
6571 	}
6572 #endif
6573 
6574 	return 0;
6575 }
6576 
6577 static int dm_enable_vblank(struct drm_crtc *crtc)
6578 {
6579 	return dm_set_vblank(crtc, true);
6580 }
6581 
6582 static void dm_disable_vblank(struct drm_crtc *crtc)
6583 {
6584 	dm_set_vblank(crtc, false);
6585 }
6586 
6587 /* Implemented only the options currently availible for the driver */
6588 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
6589 	.reset = dm_crtc_reset_state,
6590 	.destroy = amdgpu_dm_crtc_destroy,
6591 	.set_config = drm_atomic_helper_set_config,
6592 	.page_flip = drm_atomic_helper_page_flip,
6593 	.atomic_duplicate_state = dm_crtc_duplicate_state,
6594 	.atomic_destroy_state = dm_crtc_destroy_state,
6595 	.set_crc_source = amdgpu_dm_crtc_set_crc_source,
6596 	.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
6597 	.get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
6598 	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
6599 	.enable_vblank = dm_enable_vblank,
6600 	.disable_vblank = dm_disable_vblank,
6601 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
6602 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
6603 	.late_register = amdgpu_dm_crtc_late_register,
6604 #endif
6605 };
6606 
6607 static enum drm_connector_status
6608 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
6609 {
6610 	bool connected;
6611 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6612 
6613 	/*
6614 	 * Notes:
6615 	 * 1. This interface is NOT called in context of HPD irq.
6616 	 * 2. This interface *is called* in context of user-mode ioctl. Which
6617 	 * makes it a bad place for *any* MST-related activity.
6618 	 */
6619 
6620 	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
6621 	    !aconnector->fake_enable)
6622 		connected = (aconnector->dc_sink != NULL);
6623 	else
6624 		connected = (aconnector->base.force == DRM_FORCE_ON);
6625 
6626 	update_subconnector_property(aconnector);
6627 
6628 	return (connected ? connector_status_connected :
6629 			connector_status_disconnected);
6630 }
6631 
6632 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
6633 					    struct drm_connector_state *connector_state,
6634 					    struct drm_property *property,
6635 					    uint64_t val)
6636 {
6637 	struct drm_device *dev = connector->dev;
6638 	struct amdgpu_device *adev = drm_to_adev(dev);
6639 	struct dm_connector_state *dm_old_state =
6640 		to_dm_connector_state(connector->state);
6641 	struct dm_connector_state *dm_new_state =
6642 		to_dm_connector_state(connector_state);
6643 
6644 	int ret = -EINVAL;
6645 
6646 	if (property == dev->mode_config.scaling_mode_property) {
6647 		enum amdgpu_rmx_type rmx_type;
6648 
6649 		switch (val) {
6650 		case DRM_MODE_SCALE_CENTER:
6651 			rmx_type = RMX_CENTER;
6652 			break;
6653 		case DRM_MODE_SCALE_ASPECT:
6654 			rmx_type = RMX_ASPECT;
6655 			break;
6656 		case DRM_MODE_SCALE_FULLSCREEN:
6657 			rmx_type = RMX_FULL;
6658 			break;
6659 		case DRM_MODE_SCALE_NONE:
6660 		default:
6661 			rmx_type = RMX_OFF;
6662 			break;
6663 		}
6664 
6665 		if (dm_old_state->scaling == rmx_type)
6666 			return 0;
6667 
6668 		dm_new_state->scaling = rmx_type;
6669 		ret = 0;
6670 	} else if (property == adev->mode_info.underscan_hborder_property) {
6671 		dm_new_state->underscan_hborder = val;
6672 		ret = 0;
6673 	} else if (property == adev->mode_info.underscan_vborder_property) {
6674 		dm_new_state->underscan_vborder = val;
6675 		ret = 0;
6676 	} else if (property == adev->mode_info.underscan_property) {
6677 		dm_new_state->underscan_enable = val;
6678 		ret = 0;
6679 	} else if (property == adev->mode_info.abm_level_property) {
6680 		dm_new_state->abm_level = val;
6681 		ret = 0;
6682 	}
6683 
6684 	return ret;
6685 }
6686 
6687 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6688 					    const struct drm_connector_state *state,
6689 					    struct drm_property *property,
6690 					    uint64_t *val)
6691 {
6692 	struct drm_device *dev = connector->dev;
6693 	struct amdgpu_device *adev = drm_to_adev(dev);
6694 	struct dm_connector_state *dm_state =
6695 		to_dm_connector_state(state);
6696 	int ret = -EINVAL;
6697 
6698 	if (property == dev->mode_config.scaling_mode_property) {
6699 		switch (dm_state->scaling) {
6700 		case RMX_CENTER:
6701 			*val = DRM_MODE_SCALE_CENTER;
6702 			break;
6703 		case RMX_ASPECT:
6704 			*val = DRM_MODE_SCALE_ASPECT;
6705 			break;
6706 		case RMX_FULL:
6707 			*val = DRM_MODE_SCALE_FULLSCREEN;
6708 			break;
6709 		case RMX_OFF:
6710 		default:
6711 			*val = DRM_MODE_SCALE_NONE;
6712 			break;
6713 		}
6714 		ret = 0;
6715 	} else if (property == adev->mode_info.underscan_hborder_property) {
6716 		*val = dm_state->underscan_hborder;
6717 		ret = 0;
6718 	} else if (property == adev->mode_info.underscan_vborder_property) {
6719 		*val = dm_state->underscan_vborder;
6720 		ret = 0;
6721 	} else if (property == adev->mode_info.underscan_property) {
6722 		*val = dm_state->underscan_enable;
6723 		ret = 0;
6724 	} else if (property == adev->mode_info.abm_level_property) {
6725 		*val = dm_state->abm_level;
6726 		ret = 0;
6727 	}
6728 
6729 	return ret;
6730 }
6731 
6732 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6733 {
6734 	struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6735 
6736 	drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6737 }
6738 
6739 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
6740 {
6741 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6742 	const struct dc_link *link = aconnector->dc_link;
6743 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
6744 	struct amdgpu_display_manager *dm = &adev->dm;
6745 	int i;
6746 
6747 	/*
6748 	 * Call only if mst_mgr was iniitalized before since it's not done
6749 	 * for all connector types.
6750 	 */
6751 	if (aconnector->mst_mgr.dev)
6752 		drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6753 
6754 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
6755 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
6756 	for (i = 0; i < dm->num_of_edps; i++) {
6757 		if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) {
6758 			backlight_device_unregister(dm->backlight_dev[i]);
6759 			dm->backlight_dev[i] = NULL;
6760 		}
6761 	}
6762 #endif
6763 
6764 	if (aconnector->dc_em_sink)
6765 		dc_sink_release(aconnector->dc_em_sink);
6766 	aconnector->dc_em_sink = NULL;
6767 	if (aconnector->dc_sink)
6768 		dc_sink_release(aconnector->dc_sink);
6769 	aconnector->dc_sink = NULL;
6770 
6771 	drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
6772 	drm_connector_unregister(connector);
6773 	drm_connector_cleanup(connector);
6774 	if (aconnector->i2c) {
6775 		i2c_del_adapter(&aconnector->i2c->base);
6776 		kfree(aconnector->i2c);
6777 	}
6778 	kfree(aconnector->dm_dp_aux.aux.name);
6779 
6780 	kfree(connector);
6781 }
6782 
6783 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6784 {
6785 	struct dm_connector_state *state =
6786 		to_dm_connector_state(connector->state);
6787 
6788 	if (connector->state)
6789 		__drm_atomic_helper_connector_destroy_state(connector->state);
6790 
6791 	kfree(state);
6792 
6793 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6794 
6795 	if (state) {
6796 		state->scaling = RMX_OFF;
6797 		state->underscan_enable = false;
6798 		state->underscan_hborder = 0;
6799 		state->underscan_vborder = 0;
6800 		state->base.max_requested_bpc = 8;
6801 		state->vcpi_slots = 0;
6802 		state->pbn = 0;
6803 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6804 			state->abm_level = amdgpu_dm_abm_level;
6805 
6806 		__drm_atomic_helper_connector_reset(connector, &state->base);
6807 	}
6808 }
6809 
6810 struct drm_connector_state *
6811 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6812 {
6813 	struct dm_connector_state *state =
6814 		to_dm_connector_state(connector->state);
6815 
6816 	struct dm_connector_state *new_state =
6817 			kmemdup(state, sizeof(*state), GFP_KERNEL);
6818 
6819 	if (!new_state)
6820 		return NULL;
6821 
6822 	__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6823 
6824 	new_state->freesync_capable = state->freesync_capable;
6825 	new_state->abm_level = state->abm_level;
6826 	new_state->scaling = state->scaling;
6827 	new_state->underscan_enable = state->underscan_enable;
6828 	new_state->underscan_hborder = state->underscan_hborder;
6829 	new_state->underscan_vborder = state->underscan_vborder;
6830 	new_state->vcpi_slots = state->vcpi_slots;
6831 	new_state->pbn = state->pbn;
6832 	return &new_state->base;
6833 }
6834 
6835 static int
6836 amdgpu_dm_connector_late_register(struct drm_connector *connector)
6837 {
6838 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6839 		to_amdgpu_dm_connector(connector);
6840 	int r;
6841 
6842 	if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6843 	    (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6844 		amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6845 		r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6846 		if (r)
6847 			return r;
6848 	}
6849 
6850 #if defined(CONFIG_DEBUG_FS)
6851 	connector_debugfs_init(amdgpu_dm_connector);
6852 #endif
6853 
6854 	return 0;
6855 }
6856 
6857 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6858 	.reset = amdgpu_dm_connector_funcs_reset,
6859 	.detect = amdgpu_dm_connector_detect,
6860 	.fill_modes = drm_helper_probe_single_connector_modes,
6861 	.destroy = amdgpu_dm_connector_destroy,
6862 	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6863 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6864 	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6865 	.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6866 	.late_register = amdgpu_dm_connector_late_register,
6867 	.early_unregister = amdgpu_dm_connector_unregister
6868 };
6869 
6870 static int get_modes(struct drm_connector *connector)
6871 {
6872 	return amdgpu_dm_connector_get_modes(connector);
6873 }
6874 
6875 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6876 {
6877 	struct dc_sink_init_data init_params = {
6878 			.link = aconnector->dc_link,
6879 			.sink_signal = SIGNAL_TYPE_VIRTUAL
6880 	};
6881 	struct edid *edid;
6882 
6883 	if (!aconnector->base.edid_blob_ptr) {
6884 		DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6885 				aconnector->base.name);
6886 
6887 		aconnector->base.force = DRM_FORCE_OFF;
6888 		aconnector->base.override_edid = false;
6889 		return;
6890 	}
6891 
6892 	edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6893 
6894 	aconnector->edid = edid;
6895 
6896 	aconnector->dc_em_sink = dc_link_add_remote_sink(
6897 		aconnector->dc_link,
6898 		(uint8_t *)edid,
6899 		(edid->extensions + 1) * EDID_LENGTH,
6900 		&init_params);
6901 
6902 	if (aconnector->base.force == DRM_FORCE_ON) {
6903 		aconnector->dc_sink = aconnector->dc_link->local_sink ?
6904 		aconnector->dc_link->local_sink :
6905 		aconnector->dc_em_sink;
6906 		dc_sink_retain(aconnector->dc_sink);
6907 	}
6908 }
6909 
6910 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
6911 {
6912 	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6913 
6914 	/*
6915 	 * In case of headless boot with force on for DP managed connector
6916 	 * Those settings have to be != 0 to get initial modeset
6917 	 */
6918 	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6919 		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6920 		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6921 	}
6922 
6923 
6924 	aconnector->base.override_edid = true;
6925 	create_eml_sink(aconnector);
6926 }
6927 
6928 static struct dc_stream_state *
6929 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6930 				const struct drm_display_mode *drm_mode,
6931 				const struct dm_connector_state *dm_state,
6932 				const struct dc_stream_state *old_stream)
6933 {
6934 	struct drm_connector *connector = &aconnector->base;
6935 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
6936 	struct dc_stream_state *stream;
6937 	const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6938 	int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
6939 	enum dc_status dc_result = DC_OK;
6940 
6941 	do {
6942 		stream = create_stream_for_sink(aconnector, drm_mode,
6943 						dm_state, old_stream,
6944 						requested_bpc);
6945 		if (stream == NULL) {
6946 			DRM_ERROR("Failed to create stream for sink!\n");
6947 			break;
6948 		}
6949 
6950 		dc_result = dc_validate_stream(adev->dm.dc, stream);
6951 
6952 		if (dc_result != DC_OK) {
6953 			DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
6954 				      drm_mode->hdisplay,
6955 				      drm_mode->vdisplay,
6956 				      drm_mode->clock,
6957 				      dc_result,
6958 				      dc_status_to_str(dc_result));
6959 
6960 			dc_stream_release(stream);
6961 			stream = NULL;
6962 			requested_bpc -= 2; /* lower bpc to retry validation */
6963 		}
6964 
6965 	} while (stream == NULL && requested_bpc >= 6);
6966 
6967 	if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
6968 		DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
6969 
6970 		aconnector->force_yuv420_output = true;
6971 		stream = create_validate_stream_for_sink(aconnector, drm_mode,
6972 						dm_state, old_stream);
6973 		aconnector->force_yuv420_output = false;
6974 	}
6975 
6976 	return stream;
6977 }
6978 
6979 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
6980 				   struct drm_display_mode *mode)
6981 {
6982 	int result = MODE_ERROR;
6983 	struct dc_sink *dc_sink;
6984 	/* TODO: Unhardcode stream count */
6985 	struct dc_stream_state *stream;
6986 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6987 
6988 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
6989 			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
6990 		return result;
6991 
6992 	/*
6993 	 * Only run this the first time mode_valid is called to initilialize
6994 	 * EDID mgmt
6995 	 */
6996 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
6997 		!aconnector->dc_em_sink)
6998 		handle_edid_mgmt(aconnector);
6999 
7000 	dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
7001 
7002 	if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
7003 				aconnector->base.force != DRM_FORCE_ON) {
7004 		DRM_ERROR("dc_sink is NULL!\n");
7005 		goto fail;
7006 	}
7007 
7008 	stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
7009 	if (stream) {
7010 		dc_stream_release(stream);
7011 		result = MODE_OK;
7012 	}
7013 
7014 fail:
7015 	/* TODO: error handling*/
7016 	return result;
7017 }
7018 
7019 static int fill_hdr_info_packet(const struct drm_connector_state *state,
7020 				struct dc_info_packet *out)
7021 {
7022 	struct hdmi_drm_infoframe frame;
7023 	unsigned char buf[30]; /* 26 + 4 */
7024 	ssize_t len;
7025 	int ret, i;
7026 
7027 	memset(out, 0, sizeof(*out));
7028 
7029 	if (!state->hdr_output_metadata)
7030 		return 0;
7031 
7032 	ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
7033 	if (ret)
7034 		return ret;
7035 
7036 	len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
7037 	if (len < 0)
7038 		return (int)len;
7039 
7040 	/* Static metadata is a fixed 26 bytes + 4 byte header. */
7041 	if (len != 30)
7042 		return -EINVAL;
7043 
7044 	/* Prepare the infopacket for DC. */
7045 	switch (state->connector->connector_type) {
7046 	case DRM_MODE_CONNECTOR_HDMIA:
7047 		out->hb0 = 0x87; /* type */
7048 		out->hb1 = 0x01; /* version */
7049 		out->hb2 = 0x1A; /* length */
7050 		out->sb[0] = buf[3]; /* checksum */
7051 		i = 1;
7052 		break;
7053 
7054 	case DRM_MODE_CONNECTOR_DisplayPort:
7055 	case DRM_MODE_CONNECTOR_eDP:
7056 		out->hb0 = 0x00; /* sdp id, zero */
7057 		out->hb1 = 0x87; /* type */
7058 		out->hb2 = 0x1D; /* payload len - 1 */
7059 		out->hb3 = (0x13 << 2); /* sdp version */
7060 		out->sb[0] = 0x01; /* version */
7061 		out->sb[1] = 0x1A; /* length */
7062 		i = 2;
7063 		break;
7064 
7065 	default:
7066 		return -EINVAL;
7067 	}
7068 
7069 	memcpy(&out->sb[i], &buf[4], 26);
7070 	out->valid = true;
7071 
7072 	print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
7073 		       sizeof(out->sb), false);
7074 
7075 	return 0;
7076 }
7077 
7078 static int
7079 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
7080 				 struct drm_atomic_state *state)
7081 {
7082 	struct drm_connector_state *new_con_state =
7083 		drm_atomic_get_new_connector_state(state, conn);
7084 	struct drm_connector_state *old_con_state =
7085 		drm_atomic_get_old_connector_state(state, conn);
7086 	struct drm_crtc *crtc = new_con_state->crtc;
7087 	struct drm_crtc_state *new_crtc_state;
7088 	int ret;
7089 
7090 	trace_amdgpu_dm_connector_atomic_check(new_con_state);
7091 
7092 	if (!crtc)
7093 		return 0;
7094 
7095 	if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
7096 		struct dc_info_packet hdr_infopacket;
7097 
7098 		ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
7099 		if (ret)
7100 			return ret;
7101 
7102 		new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
7103 		if (IS_ERR(new_crtc_state))
7104 			return PTR_ERR(new_crtc_state);
7105 
7106 		/*
7107 		 * DC considers the stream backends changed if the
7108 		 * static metadata changes. Forcing the modeset also
7109 		 * gives a simple way for userspace to switch from
7110 		 * 8bpc to 10bpc when setting the metadata to enter
7111 		 * or exit HDR.
7112 		 *
7113 		 * Changing the static metadata after it's been
7114 		 * set is permissible, however. So only force a
7115 		 * modeset if we're entering or exiting HDR.
7116 		 */
7117 		new_crtc_state->mode_changed =
7118 			!old_con_state->hdr_output_metadata ||
7119 			!new_con_state->hdr_output_metadata;
7120 	}
7121 
7122 	return 0;
7123 }
7124 
7125 static const struct drm_connector_helper_funcs
7126 amdgpu_dm_connector_helper_funcs = {
7127 	/*
7128 	 * If hotplugging a second bigger display in FB Con mode, bigger resolution
7129 	 * modes will be filtered by drm_mode_validate_size(), and those modes
7130 	 * are missing after user start lightdm. So we need to renew modes list.
7131 	 * in get_modes call back, not just return the modes count
7132 	 */
7133 	.get_modes = get_modes,
7134 	.mode_valid = amdgpu_dm_connector_mode_valid,
7135 	.atomic_check = amdgpu_dm_connector_atomic_check,
7136 };
7137 
7138 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
7139 {
7140 }
7141 
7142 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
7143 {
7144 	struct drm_atomic_state *state = new_crtc_state->state;
7145 	struct drm_plane *plane;
7146 	int num_active = 0;
7147 
7148 	drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
7149 		struct drm_plane_state *new_plane_state;
7150 
7151 		/* Cursor planes are "fake". */
7152 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
7153 			continue;
7154 
7155 		new_plane_state = drm_atomic_get_new_plane_state(state, plane);
7156 
7157 		if (!new_plane_state) {
7158 			/*
7159 			 * The plane is enable on the CRTC and hasn't changed
7160 			 * state. This means that it previously passed
7161 			 * validation and is therefore enabled.
7162 			 */
7163 			num_active += 1;
7164 			continue;
7165 		}
7166 
7167 		/* We need a framebuffer to be considered enabled. */
7168 		num_active += (new_plane_state->fb != NULL);
7169 	}
7170 
7171 	return num_active;
7172 }
7173 
7174 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
7175 					 struct drm_crtc_state *new_crtc_state)
7176 {
7177 	struct dm_crtc_state *dm_new_crtc_state =
7178 		to_dm_crtc_state(new_crtc_state);
7179 
7180 	dm_new_crtc_state->active_planes = 0;
7181 
7182 	if (!dm_new_crtc_state->stream)
7183 		return;
7184 
7185 	dm_new_crtc_state->active_planes =
7186 		count_crtc_active_planes(new_crtc_state);
7187 }
7188 
7189 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
7190 				       struct drm_atomic_state *state)
7191 {
7192 	struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
7193 									  crtc);
7194 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
7195 	struct dc *dc = adev->dm.dc;
7196 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
7197 	int ret = -EINVAL;
7198 
7199 	trace_amdgpu_dm_crtc_atomic_check(crtc_state);
7200 
7201 	dm_update_crtc_active_planes(crtc, crtc_state);
7202 
7203 	if (WARN_ON(unlikely(!dm_crtc_state->stream &&
7204 		     modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
7205 		return ret;
7206 	}
7207 
7208 	/*
7209 	 * We require the primary plane to be enabled whenever the CRTC is, otherwise
7210 	 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
7211 	 * planes are disabled, which is not supported by the hardware. And there is legacy
7212 	 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
7213 	 */
7214 	if (crtc_state->enable &&
7215 	    !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
7216 		DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
7217 		return -EINVAL;
7218 	}
7219 
7220 	/* In some use cases, like reset, no stream is attached */
7221 	if (!dm_crtc_state->stream)
7222 		return 0;
7223 
7224 	if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
7225 		return 0;
7226 
7227 	DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
7228 	return ret;
7229 }
7230 
7231 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
7232 				      const struct drm_display_mode *mode,
7233 				      struct drm_display_mode *adjusted_mode)
7234 {
7235 	return true;
7236 }
7237 
7238 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
7239 	.disable = dm_crtc_helper_disable,
7240 	.atomic_check = dm_crtc_helper_atomic_check,
7241 	.mode_fixup = dm_crtc_helper_mode_fixup,
7242 	.get_scanout_position = amdgpu_crtc_get_scanout_position,
7243 };
7244 
7245 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
7246 {
7247 
7248 }
7249 
7250 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
7251 {
7252 	switch (display_color_depth) {
7253 		case COLOR_DEPTH_666:
7254 			return 6;
7255 		case COLOR_DEPTH_888:
7256 			return 8;
7257 		case COLOR_DEPTH_101010:
7258 			return 10;
7259 		case COLOR_DEPTH_121212:
7260 			return 12;
7261 		case COLOR_DEPTH_141414:
7262 			return 14;
7263 		case COLOR_DEPTH_161616:
7264 			return 16;
7265 		default:
7266 			break;
7267 		}
7268 	return 0;
7269 }
7270 
7271 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
7272 					  struct drm_crtc_state *crtc_state,
7273 					  struct drm_connector_state *conn_state)
7274 {
7275 	struct drm_atomic_state *state = crtc_state->state;
7276 	struct drm_connector *connector = conn_state->connector;
7277 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7278 	struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
7279 	const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
7280 	struct drm_dp_mst_topology_mgr *mst_mgr;
7281 	struct drm_dp_mst_port *mst_port;
7282 	enum dc_color_depth color_depth;
7283 	int clock, bpp = 0;
7284 	bool is_y420 = false;
7285 
7286 	if (!aconnector->port || !aconnector->dc_sink)
7287 		return 0;
7288 
7289 	mst_port = aconnector->port;
7290 	mst_mgr = &aconnector->mst_port->mst_mgr;
7291 
7292 	if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
7293 		return 0;
7294 
7295 	if (!state->duplicated) {
7296 		int max_bpc = conn_state->max_requested_bpc;
7297 		is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
7298 				aconnector->force_yuv420_output;
7299 		color_depth = convert_color_depth_from_display_info(connector,
7300 								    is_y420,
7301 								    max_bpc);
7302 		bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
7303 		clock = adjusted_mode->clock;
7304 		dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
7305 	}
7306 	dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
7307 									   mst_mgr,
7308 									   mst_port,
7309 									   dm_new_connector_state->pbn,
7310 									   dm_mst_get_pbn_divider(aconnector->dc_link));
7311 	if (dm_new_connector_state->vcpi_slots < 0) {
7312 		DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
7313 		return dm_new_connector_state->vcpi_slots;
7314 	}
7315 	return 0;
7316 }
7317 
7318 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
7319 	.disable = dm_encoder_helper_disable,
7320 	.atomic_check = dm_encoder_helper_atomic_check
7321 };
7322 
7323 #if defined(CONFIG_DRM_AMD_DC_DCN)
7324 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
7325 					    struct dc_state *dc_state,
7326 					    struct dsc_mst_fairness_vars *vars)
7327 {
7328 	struct dc_stream_state *stream = NULL;
7329 	struct drm_connector *connector;
7330 	struct drm_connector_state *new_con_state;
7331 	struct amdgpu_dm_connector *aconnector;
7332 	struct dm_connector_state *dm_conn_state;
7333 	int i, j;
7334 	int vcpi, pbn_div, pbn, slot_num = 0;
7335 
7336 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
7337 
7338 		aconnector = to_amdgpu_dm_connector(connector);
7339 
7340 		if (!aconnector->port)
7341 			continue;
7342 
7343 		if (!new_con_state || !new_con_state->crtc)
7344 			continue;
7345 
7346 		dm_conn_state = to_dm_connector_state(new_con_state);
7347 
7348 		for (j = 0; j < dc_state->stream_count; j++) {
7349 			stream = dc_state->streams[j];
7350 			if (!stream)
7351 				continue;
7352 
7353 			if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
7354 				break;
7355 
7356 			stream = NULL;
7357 		}
7358 
7359 		if (!stream)
7360 			continue;
7361 
7362 		pbn_div = dm_mst_get_pbn_divider(stream->link);
7363 		/* pbn is calculated by compute_mst_dsc_configs_for_state*/
7364 		for (j = 0; j < dc_state->stream_count; j++) {
7365 			if (vars[j].aconnector == aconnector) {
7366 				pbn = vars[j].pbn;
7367 				break;
7368 			}
7369 		}
7370 
7371 		if (j == dc_state->stream_count)
7372 			continue;
7373 
7374 		slot_num = DIV_ROUND_UP(pbn, pbn_div);
7375 
7376 		if (stream->timing.flags.DSC != 1) {
7377 			dm_conn_state->pbn = pbn;
7378 			dm_conn_state->vcpi_slots = slot_num;
7379 
7380 			drm_dp_mst_atomic_enable_dsc(state,
7381 						     aconnector->port,
7382 						     dm_conn_state->pbn,
7383 						     0,
7384 						     false);
7385 			continue;
7386 		}
7387 
7388 		vcpi = drm_dp_mst_atomic_enable_dsc(state,
7389 						    aconnector->port,
7390 						    pbn, pbn_div,
7391 						    true);
7392 		if (vcpi < 0)
7393 			return vcpi;
7394 
7395 		dm_conn_state->pbn = pbn;
7396 		dm_conn_state->vcpi_slots = vcpi;
7397 	}
7398 	return 0;
7399 }
7400 #endif
7401 
7402 static void dm_drm_plane_reset(struct drm_plane *plane)
7403 {
7404 	struct dm_plane_state *amdgpu_state = NULL;
7405 
7406 	if (plane->state)
7407 		plane->funcs->atomic_destroy_state(plane, plane->state);
7408 
7409 	amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
7410 	WARN_ON(amdgpu_state == NULL);
7411 
7412 	if (amdgpu_state)
7413 		__drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
7414 }
7415 
7416 static struct drm_plane_state *
7417 dm_drm_plane_duplicate_state(struct drm_plane *plane)
7418 {
7419 	struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
7420 
7421 	old_dm_plane_state = to_dm_plane_state(plane->state);
7422 	dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
7423 	if (!dm_plane_state)
7424 		return NULL;
7425 
7426 	__drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
7427 
7428 	if (old_dm_plane_state->dc_state) {
7429 		dm_plane_state->dc_state = old_dm_plane_state->dc_state;
7430 		dc_plane_state_retain(dm_plane_state->dc_state);
7431 	}
7432 
7433 	return &dm_plane_state->base;
7434 }
7435 
7436 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
7437 				struct drm_plane_state *state)
7438 {
7439 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
7440 
7441 	if (dm_plane_state->dc_state)
7442 		dc_plane_state_release(dm_plane_state->dc_state);
7443 
7444 	drm_atomic_helper_plane_destroy_state(plane, state);
7445 }
7446 
7447 static const struct drm_plane_funcs dm_plane_funcs = {
7448 	.update_plane	= drm_atomic_helper_update_plane,
7449 	.disable_plane	= drm_atomic_helper_disable_plane,
7450 	.destroy	= drm_primary_helper_destroy,
7451 	.reset = dm_drm_plane_reset,
7452 	.atomic_duplicate_state = dm_drm_plane_duplicate_state,
7453 	.atomic_destroy_state = dm_drm_plane_destroy_state,
7454 	.format_mod_supported = dm_plane_format_mod_supported,
7455 };
7456 
7457 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
7458 				      struct drm_plane_state *new_state)
7459 {
7460 	struct amdgpu_framebuffer *afb;
7461 	struct drm_gem_object *obj;
7462 	struct amdgpu_device *adev;
7463 	struct amdgpu_bo *rbo;
7464 	struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
7465 	struct list_head list;
7466 	struct ttm_validate_buffer tv;
7467 	struct ww_acquire_ctx ticket;
7468 	uint32_t domain;
7469 	int r;
7470 
7471 	if (!new_state->fb) {
7472 		DRM_DEBUG_KMS("No FB bound\n");
7473 		return 0;
7474 	}
7475 
7476 	afb = to_amdgpu_framebuffer(new_state->fb);
7477 	obj = new_state->fb->obj[0];
7478 	rbo = gem_to_amdgpu_bo(obj);
7479 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
7480 	INIT_LIST_HEAD(&list);
7481 
7482 	tv.bo = &rbo->tbo;
7483 	tv.num_shared = 1;
7484 	list_add(&tv.head, &list);
7485 
7486 	r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
7487 	if (r) {
7488 		dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
7489 		return r;
7490 	}
7491 
7492 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
7493 		domain = amdgpu_display_supported_domains(adev, rbo->flags);
7494 	else
7495 		domain = AMDGPU_GEM_DOMAIN_VRAM;
7496 
7497 	r = amdgpu_bo_pin(rbo, domain);
7498 	if (unlikely(r != 0)) {
7499 		if (r != -ERESTARTSYS)
7500 			DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
7501 		ttm_eu_backoff_reservation(&ticket, &list);
7502 		return r;
7503 	}
7504 
7505 	r = amdgpu_ttm_alloc_gart(&rbo->tbo);
7506 	if (unlikely(r != 0)) {
7507 		amdgpu_bo_unpin(rbo);
7508 		ttm_eu_backoff_reservation(&ticket, &list);
7509 		DRM_ERROR("%p bind failed\n", rbo);
7510 		return r;
7511 	}
7512 
7513 	ttm_eu_backoff_reservation(&ticket, &list);
7514 
7515 	afb->address = amdgpu_bo_gpu_offset(rbo);
7516 
7517 	amdgpu_bo_ref(rbo);
7518 
7519 	/**
7520 	 * We don't do surface updates on planes that have been newly created,
7521 	 * but we also don't have the afb->address during atomic check.
7522 	 *
7523 	 * Fill in buffer attributes depending on the address here, but only on
7524 	 * newly created planes since they're not being used by DC yet and this
7525 	 * won't modify global state.
7526 	 */
7527 	dm_plane_state_old = to_dm_plane_state(plane->state);
7528 	dm_plane_state_new = to_dm_plane_state(new_state);
7529 
7530 	if (dm_plane_state_new->dc_state &&
7531 	    dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
7532 		struct dc_plane_state *plane_state =
7533 			dm_plane_state_new->dc_state;
7534 		bool force_disable_dcc = !plane_state->dcc.enable;
7535 
7536 		fill_plane_buffer_attributes(
7537 			adev, afb, plane_state->format, plane_state->rotation,
7538 			afb->tiling_flags,
7539 			&plane_state->tiling_info, &plane_state->plane_size,
7540 			&plane_state->dcc, &plane_state->address,
7541 			afb->tmz_surface, force_disable_dcc);
7542 	}
7543 
7544 	return 0;
7545 }
7546 
7547 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
7548 				       struct drm_plane_state *old_state)
7549 {
7550 	struct amdgpu_bo *rbo;
7551 	int r;
7552 
7553 	if (!old_state->fb)
7554 		return;
7555 
7556 	rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
7557 	r = amdgpu_bo_reserve(rbo, false);
7558 	if (unlikely(r)) {
7559 		DRM_ERROR("failed to reserve rbo before unpin\n");
7560 		return;
7561 	}
7562 
7563 	amdgpu_bo_unpin(rbo);
7564 	amdgpu_bo_unreserve(rbo);
7565 	amdgpu_bo_unref(&rbo);
7566 }
7567 
7568 static int dm_plane_helper_check_state(struct drm_plane_state *state,
7569 				       struct drm_crtc_state *new_crtc_state)
7570 {
7571 	struct drm_framebuffer *fb = state->fb;
7572 	int min_downscale, max_upscale;
7573 	int min_scale = 0;
7574 	int max_scale = INT_MAX;
7575 
7576 	/* Plane enabled? Validate viewport and get scaling factors from plane caps. */
7577 	if (fb && state->crtc) {
7578 		/* Validate viewport to cover the case when only the position changes */
7579 		if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
7580 			int viewport_width = state->crtc_w;
7581 			int viewport_height = state->crtc_h;
7582 
7583 			if (state->crtc_x < 0)
7584 				viewport_width += state->crtc_x;
7585 			else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
7586 				viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
7587 
7588 			if (state->crtc_y < 0)
7589 				viewport_height += state->crtc_y;
7590 			else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
7591 				viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
7592 
7593 			if (viewport_width < 0 || viewport_height < 0) {
7594 				DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
7595 				return -EINVAL;
7596 			} else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
7597 				DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
7598 				return -EINVAL;
7599 			} else if (viewport_height < MIN_VIEWPORT_SIZE) {
7600 				DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
7601 				return -EINVAL;
7602 			}
7603 
7604 		}
7605 
7606 		/* Get min/max allowed scaling factors from plane caps. */
7607 		get_min_max_dc_plane_scaling(state->crtc->dev, fb,
7608 					     &min_downscale, &max_upscale);
7609 		/*
7610 		 * Convert to drm convention: 16.16 fixed point, instead of dc's
7611 		 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
7612 		 * dst/src, so min_scale = 1.0 / max_upscale, etc.
7613 		 */
7614 		min_scale = (1000 << 16) / max_upscale;
7615 		max_scale = (1000 << 16) / min_downscale;
7616 	}
7617 
7618 	return drm_atomic_helper_check_plane_state(
7619 		state, new_crtc_state, min_scale, max_scale, true, true);
7620 }
7621 
7622 static int dm_plane_atomic_check(struct drm_plane *plane,
7623 				 struct drm_atomic_state *state)
7624 {
7625 	struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
7626 										 plane);
7627 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
7628 	struct dc *dc = adev->dm.dc;
7629 	struct dm_plane_state *dm_plane_state;
7630 	struct dc_scaling_info scaling_info;
7631 	struct drm_crtc_state *new_crtc_state;
7632 	int ret;
7633 
7634 	trace_amdgpu_dm_plane_atomic_check(new_plane_state);
7635 
7636 	dm_plane_state = to_dm_plane_state(new_plane_state);
7637 
7638 	if (!dm_plane_state->dc_state)
7639 		return 0;
7640 
7641 	new_crtc_state =
7642 		drm_atomic_get_new_crtc_state(state,
7643 					      new_plane_state->crtc);
7644 	if (!new_crtc_state)
7645 		return -EINVAL;
7646 
7647 	ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
7648 	if (ret)
7649 		return ret;
7650 
7651 	ret = fill_dc_scaling_info(adev, new_plane_state, &scaling_info);
7652 	if (ret)
7653 		return ret;
7654 
7655 	if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
7656 		return 0;
7657 
7658 	return -EINVAL;
7659 }
7660 
7661 static int dm_plane_atomic_async_check(struct drm_plane *plane,
7662 				       struct drm_atomic_state *state)
7663 {
7664 	/* Only support async updates on cursor planes. */
7665 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
7666 		return -EINVAL;
7667 
7668 	return 0;
7669 }
7670 
7671 static void dm_plane_atomic_async_update(struct drm_plane *plane,
7672 					 struct drm_atomic_state *state)
7673 {
7674 	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
7675 									   plane);
7676 	struct drm_plane_state *old_state =
7677 		drm_atomic_get_old_plane_state(state, plane);
7678 
7679 	trace_amdgpu_dm_atomic_update_cursor(new_state);
7680 
7681 	swap(plane->state->fb, new_state->fb);
7682 
7683 	plane->state->src_x = new_state->src_x;
7684 	plane->state->src_y = new_state->src_y;
7685 	plane->state->src_w = new_state->src_w;
7686 	plane->state->src_h = new_state->src_h;
7687 	plane->state->crtc_x = new_state->crtc_x;
7688 	plane->state->crtc_y = new_state->crtc_y;
7689 	plane->state->crtc_w = new_state->crtc_w;
7690 	plane->state->crtc_h = new_state->crtc_h;
7691 
7692 	handle_cursor_update(plane, old_state);
7693 }
7694 
7695 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
7696 	.prepare_fb = dm_plane_helper_prepare_fb,
7697 	.cleanup_fb = dm_plane_helper_cleanup_fb,
7698 	.atomic_check = dm_plane_atomic_check,
7699 	.atomic_async_check = dm_plane_atomic_async_check,
7700 	.atomic_async_update = dm_plane_atomic_async_update
7701 };
7702 
7703 /*
7704  * TODO: these are currently initialized to rgb formats only.
7705  * For future use cases we should either initialize them dynamically based on
7706  * plane capabilities, or initialize this array to all formats, so internal drm
7707  * check will succeed, and let DC implement proper check
7708  */
7709 static const uint32_t rgb_formats[] = {
7710 	DRM_FORMAT_XRGB8888,
7711 	DRM_FORMAT_ARGB8888,
7712 	DRM_FORMAT_RGBA8888,
7713 	DRM_FORMAT_XRGB2101010,
7714 	DRM_FORMAT_XBGR2101010,
7715 	DRM_FORMAT_ARGB2101010,
7716 	DRM_FORMAT_ABGR2101010,
7717 	DRM_FORMAT_XRGB16161616,
7718 	DRM_FORMAT_XBGR16161616,
7719 	DRM_FORMAT_ARGB16161616,
7720 	DRM_FORMAT_ABGR16161616,
7721 	DRM_FORMAT_XBGR8888,
7722 	DRM_FORMAT_ABGR8888,
7723 	DRM_FORMAT_RGB565,
7724 };
7725 
7726 static const uint32_t overlay_formats[] = {
7727 	DRM_FORMAT_XRGB8888,
7728 	DRM_FORMAT_ARGB8888,
7729 	DRM_FORMAT_RGBA8888,
7730 	DRM_FORMAT_XBGR8888,
7731 	DRM_FORMAT_ABGR8888,
7732 	DRM_FORMAT_RGB565
7733 };
7734 
7735 static const u32 cursor_formats[] = {
7736 	DRM_FORMAT_ARGB8888
7737 };
7738 
7739 static int get_plane_formats(const struct drm_plane *plane,
7740 			     const struct dc_plane_cap *plane_cap,
7741 			     uint32_t *formats, int max_formats)
7742 {
7743 	int i, num_formats = 0;
7744 
7745 	/*
7746 	 * TODO: Query support for each group of formats directly from
7747 	 * DC plane caps. This will require adding more formats to the
7748 	 * caps list.
7749 	 */
7750 
7751 	switch (plane->type) {
7752 	case DRM_PLANE_TYPE_PRIMARY:
7753 		for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
7754 			if (num_formats >= max_formats)
7755 				break;
7756 
7757 			formats[num_formats++] = rgb_formats[i];
7758 		}
7759 
7760 		if (plane_cap && plane_cap->pixel_format_support.nv12)
7761 			formats[num_formats++] = DRM_FORMAT_NV12;
7762 		if (plane_cap && plane_cap->pixel_format_support.p010)
7763 			formats[num_formats++] = DRM_FORMAT_P010;
7764 		if (plane_cap && plane_cap->pixel_format_support.fp16) {
7765 			formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
7766 			formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
7767 			formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
7768 			formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
7769 		}
7770 		break;
7771 
7772 	case DRM_PLANE_TYPE_OVERLAY:
7773 		for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
7774 			if (num_formats >= max_formats)
7775 				break;
7776 
7777 			formats[num_formats++] = overlay_formats[i];
7778 		}
7779 		break;
7780 
7781 	case DRM_PLANE_TYPE_CURSOR:
7782 		for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
7783 			if (num_formats >= max_formats)
7784 				break;
7785 
7786 			formats[num_formats++] = cursor_formats[i];
7787 		}
7788 		break;
7789 	}
7790 
7791 	return num_formats;
7792 }
7793 
7794 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7795 				struct drm_plane *plane,
7796 				unsigned long possible_crtcs,
7797 				const struct dc_plane_cap *plane_cap)
7798 {
7799 	uint32_t formats[32];
7800 	int num_formats;
7801 	int res = -EPERM;
7802 	unsigned int supported_rotations;
7803 	uint64_t *modifiers = NULL;
7804 
7805 	num_formats = get_plane_formats(plane, plane_cap, formats,
7806 					ARRAY_SIZE(formats));
7807 
7808 	res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7809 	if (res)
7810 		return res;
7811 
7812 	res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
7813 				       &dm_plane_funcs, formats, num_formats,
7814 				       modifiers, plane->type, NULL);
7815 	kfree(modifiers);
7816 	if (res)
7817 		return res;
7818 
7819 	if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7820 	    plane_cap && plane_cap->per_pixel_alpha) {
7821 		unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7822 					  BIT(DRM_MODE_BLEND_PREMULTI);
7823 
7824 		drm_plane_create_alpha_property(plane);
7825 		drm_plane_create_blend_mode_property(plane, blend_caps);
7826 	}
7827 
7828 	if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
7829 	    plane_cap &&
7830 	    (plane_cap->pixel_format_support.nv12 ||
7831 	     plane_cap->pixel_format_support.p010)) {
7832 		/* This only affects YUV formats. */
7833 		drm_plane_create_color_properties(
7834 			plane,
7835 			BIT(DRM_COLOR_YCBCR_BT601) |
7836 			BIT(DRM_COLOR_YCBCR_BT709) |
7837 			BIT(DRM_COLOR_YCBCR_BT2020),
7838 			BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7839 			BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7840 			DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7841 	}
7842 
7843 	supported_rotations =
7844 		DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7845 		DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7846 
7847 	if (dm->adev->asic_type >= CHIP_BONAIRE &&
7848 	    plane->type != DRM_PLANE_TYPE_CURSOR)
7849 		drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7850 						   supported_rotations);
7851 
7852 	drm_plane_helper_add(plane, &dm_plane_helper_funcs);
7853 
7854 	/* Create (reset) the plane state */
7855 	if (plane->funcs->reset)
7856 		plane->funcs->reset(plane);
7857 
7858 	return 0;
7859 }
7860 
7861 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7862 			       struct drm_plane *plane,
7863 			       uint32_t crtc_index)
7864 {
7865 	struct amdgpu_crtc *acrtc = NULL;
7866 	struct drm_plane *cursor_plane;
7867 
7868 	int res = -ENOMEM;
7869 
7870 	cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7871 	if (!cursor_plane)
7872 		goto fail;
7873 
7874 	cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
7875 	res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
7876 
7877 	acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
7878 	if (!acrtc)
7879 		goto fail;
7880 
7881 	res = drm_crtc_init_with_planes(
7882 			dm->ddev,
7883 			&acrtc->base,
7884 			plane,
7885 			cursor_plane,
7886 			&amdgpu_dm_crtc_funcs, NULL);
7887 
7888 	if (res)
7889 		goto fail;
7890 
7891 	drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
7892 
7893 	/* Create (reset) the plane state */
7894 	if (acrtc->base.funcs->reset)
7895 		acrtc->base.funcs->reset(&acrtc->base);
7896 
7897 	acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
7898 	acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
7899 
7900 	acrtc->crtc_id = crtc_index;
7901 	acrtc->base.enabled = false;
7902 	acrtc->otg_inst = -1;
7903 
7904 	dm->adev->mode_info.crtcs[crtc_index] = acrtc;
7905 	drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
7906 				   true, MAX_COLOR_LUT_ENTRIES);
7907 	drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
7908 
7909 	return 0;
7910 
7911 fail:
7912 	kfree(acrtc);
7913 	kfree(cursor_plane);
7914 	return res;
7915 }
7916 
7917 
7918 static int to_drm_connector_type(enum signal_type st)
7919 {
7920 	switch (st) {
7921 	case SIGNAL_TYPE_HDMI_TYPE_A:
7922 		return DRM_MODE_CONNECTOR_HDMIA;
7923 	case SIGNAL_TYPE_EDP:
7924 		return DRM_MODE_CONNECTOR_eDP;
7925 	case SIGNAL_TYPE_LVDS:
7926 		return DRM_MODE_CONNECTOR_LVDS;
7927 	case SIGNAL_TYPE_RGB:
7928 		return DRM_MODE_CONNECTOR_VGA;
7929 	case SIGNAL_TYPE_DISPLAY_PORT:
7930 	case SIGNAL_TYPE_DISPLAY_PORT_MST:
7931 		return DRM_MODE_CONNECTOR_DisplayPort;
7932 	case SIGNAL_TYPE_DVI_DUAL_LINK:
7933 	case SIGNAL_TYPE_DVI_SINGLE_LINK:
7934 		return DRM_MODE_CONNECTOR_DVID;
7935 	case SIGNAL_TYPE_VIRTUAL:
7936 		return DRM_MODE_CONNECTOR_VIRTUAL;
7937 
7938 	default:
7939 		return DRM_MODE_CONNECTOR_Unknown;
7940 	}
7941 }
7942 
7943 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
7944 {
7945 	struct drm_encoder *encoder;
7946 
7947 	/* There is only one encoder per connector */
7948 	drm_connector_for_each_possible_encoder(connector, encoder)
7949 		return encoder;
7950 
7951 	return NULL;
7952 }
7953 
7954 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
7955 {
7956 	struct drm_encoder *encoder;
7957 	struct amdgpu_encoder *amdgpu_encoder;
7958 
7959 	encoder = amdgpu_dm_connector_to_encoder(connector);
7960 
7961 	if (encoder == NULL)
7962 		return;
7963 
7964 	amdgpu_encoder = to_amdgpu_encoder(encoder);
7965 
7966 	amdgpu_encoder->native_mode.clock = 0;
7967 
7968 	if (!list_empty(&connector->probed_modes)) {
7969 		struct drm_display_mode *preferred_mode = NULL;
7970 
7971 		list_for_each_entry(preferred_mode,
7972 				    &connector->probed_modes,
7973 				    head) {
7974 			if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
7975 				amdgpu_encoder->native_mode = *preferred_mode;
7976 
7977 			break;
7978 		}
7979 
7980 	}
7981 }
7982 
7983 static struct drm_display_mode *
7984 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
7985 			     char *name,
7986 			     int hdisplay, int vdisplay)
7987 {
7988 	struct drm_device *dev = encoder->dev;
7989 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7990 	struct drm_display_mode *mode = NULL;
7991 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7992 
7993 	mode = drm_mode_duplicate(dev, native_mode);
7994 
7995 	if (mode == NULL)
7996 		return NULL;
7997 
7998 	mode->hdisplay = hdisplay;
7999 	mode->vdisplay = vdisplay;
8000 	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8001 	strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
8002 
8003 	return mode;
8004 
8005 }
8006 
8007 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
8008 						 struct drm_connector *connector)
8009 {
8010 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8011 	struct drm_display_mode *mode = NULL;
8012 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
8013 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8014 				to_amdgpu_dm_connector(connector);
8015 	int i;
8016 	int n;
8017 	struct mode_size {
8018 		char name[DRM_DISPLAY_MODE_LEN];
8019 		int w;
8020 		int h;
8021 	} common_modes[] = {
8022 		{  "640x480",  640,  480},
8023 		{  "800x600",  800,  600},
8024 		{ "1024x768", 1024,  768},
8025 		{ "1280x720", 1280,  720},
8026 		{ "1280x800", 1280,  800},
8027 		{"1280x1024", 1280, 1024},
8028 		{ "1440x900", 1440,  900},
8029 		{"1680x1050", 1680, 1050},
8030 		{"1600x1200", 1600, 1200},
8031 		{"1920x1080", 1920, 1080},
8032 		{"1920x1200", 1920, 1200}
8033 	};
8034 
8035 	n = ARRAY_SIZE(common_modes);
8036 
8037 	for (i = 0; i < n; i++) {
8038 		struct drm_display_mode *curmode = NULL;
8039 		bool mode_existed = false;
8040 
8041 		if (common_modes[i].w > native_mode->hdisplay ||
8042 		    common_modes[i].h > native_mode->vdisplay ||
8043 		   (common_modes[i].w == native_mode->hdisplay &&
8044 		    common_modes[i].h == native_mode->vdisplay))
8045 			continue;
8046 
8047 		list_for_each_entry(curmode, &connector->probed_modes, head) {
8048 			if (common_modes[i].w == curmode->hdisplay &&
8049 			    common_modes[i].h == curmode->vdisplay) {
8050 				mode_existed = true;
8051 				break;
8052 			}
8053 		}
8054 
8055 		if (mode_existed)
8056 			continue;
8057 
8058 		mode = amdgpu_dm_create_common_mode(encoder,
8059 				common_modes[i].name, common_modes[i].w,
8060 				common_modes[i].h);
8061 		drm_mode_probed_add(connector, mode);
8062 		amdgpu_dm_connector->num_modes++;
8063 	}
8064 }
8065 
8066 static void amdgpu_set_panel_orientation(struct drm_connector *connector)
8067 {
8068 	struct drm_encoder *encoder;
8069 	struct amdgpu_encoder *amdgpu_encoder;
8070 	const struct drm_display_mode *native_mode;
8071 
8072 	if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
8073 	    connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
8074 		return;
8075 
8076 	encoder = amdgpu_dm_connector_to_encoder(connector);
8077 	if (!encoder)
8078 		return;
8079 
8080 	amdgpu_encoder = to_amdgpu_encoder(encoder);
8081 
8082 	native_mode = &amdgpu_encoder->native_mode;
8083 	if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
8084 		return;
8085 
8086 	drm_connector_set_panel_orientation_with_quirk(connector,
8087 						       DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
8088 						       native_mode->hdisplay,
8089 						       native_mode->vdisplay);
8090 }
8091 
8092 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
8093 					      struct edid *edid)
8094 {
8095 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8096 			to_amdgpu_dm_connector(connector);
8097 
8098 	if (edid) {
8099 		/* empty probed_modes */
8100 		INIT_LIST_HEAD(&connector->probed_modes);
8101 		amdgpu_dm_connector->num_modes =
8102 				drm_add_edid_modes(connector, edid);
8103 
8104 		/* sorting the probed modes before calling function
8105 		 * amdgpu_dm_get_native_mode() since EDID can have
8106 		 * more than one preferred mode. The modes that are
8107 		 * later in the probed mode list could be of higher
8108 		 * and preferred resolution. For example, 3840x2160
8109 		 * resolution in base EDID preferred timing and 4096x2160
8110 		 * preferred resolution in DID extension block later.
8111 		 */
8112 		drm_mode_sort(&connector->probed_modes);
8113 		amdgpu_dm_get_native_mode(connector);
8114 
8115 		/* Freesync capabilities are reset by calling
8116 		 * drm_add_edid_modes() and need to be
8117 		 * restored here.
8118 		 */
8119 		amdgpu_dm_update_freesync_caps(connector, edid);
8120 
8121 		amdgpu_set_panel_orientation(connector);
8122 	} else {
8123 		amdgpu_dm_connector->num_modes = 0;
8124 	}
8125 }
8126 
8127 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
8128 			      struct drm_display_mode *mode)
8129 {
8130 	struct drm_display_mode *m;
8131 
8132 	list_for_each_entry (m, &aconnector->base.probed_modes, head) {
8133 		if (drm_mode_equal(m, mode))
8134 			return true;
8135 	}
8136 
8137 	return false;
8138 }
8139 
8140 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
8141 {
8142 	const struct drm_display_mode *m;
8143 	struct drm_display_mode *new_mode;
8144 	uint i;
8145 	uint32_t new_modes_count = 0;
8146 
8147 	/* Standard FPS values
8148 	 *
8149 	 * 23.976       - TV/NTSC
8150 	 * 24 	        - Cinema
8151 	 * 25 	        - TV/PAL
8152 	 * 29.97        - TV/NTSC
8153 	 * 30 	        - TV/NTSC
8154 	 * 48 	        - Cinema HFR
8155 	 * 50 	        - TV/PAL
8156 	 * 60 	        - Commonly used
8157 	 * 48,72,96,120 - Multiples of 24
8158 	 */
8159 	static const uint32_t common_rates[] = {
8160 		23976, 24000, 25000, 29970, 30000,
8161 		48000, 50000, 60000, 72000, 96000, 120000
8162 	};
8163 
8164 	/*
8165 	 * Find mode with highest refresh rate with the same resolution
8166 	 * as the preferred mode. Some monitors report a preferred mode
8167 	 * with lower resolution than the highest refresh rate supported.
8168 	 */
8169 
8170 	m = get_highest_refresh_rate_mode(aconnector, true);
8171 	if (!m)
8172 		return 0;
8173 
8174 	for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
8175 		uint64_t target_vtotal, target_vtotal_diff;
8176 		uint64_t num, den;
8177 
8178 		if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
8179 			continue;
8180 
8181 		if (common_rates[i] < aconnector->min_vfreq * 1000 ||
8182 		    common_rates[i] > aconnector->max_vfreq * 1000)
8183 			continue;
8184 
8185 		num = (unsigned long long)m->clock * 1000 * 1000;
8186 		den = common_rates[i] * (unsigned long long)m->htotal;
8187 		target_vtotal = div_u64(num, den);
8188 		target_vtotal_diff = target_vtotal - m->vtotal;
8189 
8190 		/* Check for illegal modes */
8191 		if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
8192 		    m->vsync_end + target_vtotal_diff < m->vsync_start ||
8193 		    m->vtotal + target_vtotal_diff < m->vsync_end)
8194 			continue;
8195 
8196 		new_mode = drm_mode_duplicate(aconnector->base.dev, m);
8197 		if (!new_mode)
8198 			goto out;
8199 
8200 		new_mode->vtotal += (u16)target_vtotal_diff;
8201 		new_mode->vsync_start += (u16)target_vtotal_diff;
8202 		new_mode->vsync_end += (u16)target_vtotal_diff;
8203 		new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8204 		new_mode->type |= DRM_MODE_TYPE_DRIVER;
8205 
8206 		if (!is_duplicate_mode(aconnector, new_mode)) {
8207 			drm_mode_probed_add(&aconnector->base, new_mode);
8208 			new_modes_count += 1;
8209 		} else
8210 			drm_mode_destroy(aconnector->base.dev, new_mode);
8211 	}
8212  out:
8213 	return new_modes_count;
8214 }
8215 
8216 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
8217 						   struct edid *edid)
8218 {
8219 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8220 		to_amdgpu_dm_connector(connector);
8221 
8222 	if (!(amdgpu_freesync_vid_mode && edid))
8223 		return;
8224 
8225 	if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
8226 		amdgpu_dm_connector->num_modes +=
8227 			add_fs_modes(amdgpu_dm_connector);
8228 }
8229 
8230 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
8231 {
8232 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8233 			to_amdgpu_dm_connector(connector);
8234 	struct drm_encoder *encoder;
8235 	struct edid *edid = amdgpu_dm_connector->edid;
8236 
8237 	encoder = amdgpu_dm_connector_to_encoder(connector);
8238 
8239 	if (!drm_edid_is_valid(edid)) {
8240 		amdgpu_dm_connector->num_modes =
8241 				drm_add_modes_noedid(connector, 640, 480);
8242 	} else {
8243 		amdgpu_dm_connector_ddc_get_modes(connector, edid);
8244 		amdgpu_dm_connector_add_common_modes(encoder, connector);
8245 		amdgpu_dm_connector_add_freesync_modes(connector, edid);
8246 	}
8247 	amdgpu_dm_fbc_init(connector);
8248 
8249 	return amdgpu_dm_connector->num_modes;
8250 }
8251 
8252 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
8253 				     struct amdgpu_dm_connector *aconnector,
8254 				     int connector_type,
8255 				     struct dc_link *link,
8256 				     int link_index)
8257 {
8258 	struct amdgpu_device *adev = drm_to_adev(dm->ddev);
8259 
8260 	/*
8261 	 * Some of the properties below require access to state, like bpc.
8262 	 * Allocate some default initial connector state with our reset helper.
8263 	 */
8264 	if (aconnector->base.funcs->reset)
8265 		aconnector->base.funcs->reset(&aconnector->base);
8266 
8267 	aconnector->connector_id = link_index;
8268 	aconnector->dc_link = link;
8269 	aconnector->base.interlace_allowed = false;
8270 	aconnector->base.doublescan_allowed = false;
8271 	aconnector->base.stereo_allowed = false;
8272 	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
8273 	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
8274 	aconnector->audio_inst = -1;
8275 	mutex_init(&aconnector->hpd_lock);
8276 
8277 	/*
8278 	 * configure support HPD hot plug connector_>polled default value is 0
8279 	 * which means HPD hot plug not supported
8280 	 */
8281 	switch (connector_type) {
8282 	case DRM_MODE_CONNECTOR_HDMIA:
8283 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8284 		aconnector->base.ycbcr_420_allowed =
8285 			link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
8286 		break;
8287 	case DRM_MODE_CONNECTOR_DisplayPort:
8288 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8289 		if (link->is_dig_mapping_flexible &&
8290 		    link->dc->res_pool->funcs->link_encs_assign) {
8291 			link->link_enc =
8292 				link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link);
8293 			if (!link->link_enc)
8294 				link->link_enc =
8295 					link_enc_cfg_get_next_avail_link_enc(link->ctx->dc);
8296 		}
8297 
8298 		if (link->link_enc)
8299 			aconnector->base.ycbcr_420_allowed =
8300 			link->link_enc->features.dp_ycbcr420_supported ? true : false;
8301 		break;
8302 	case DRM_MODE_CONNECTOR_DVID:
8303 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8304 		break;
8305 	default:
8306 		break;
8307 	}
8308 
8309 	drm_object_attach_property(&aconnector->base.base,
8310 				dm->ddev->mode_config.scaling_mode_property,
8311 				DRM_MODE_SCALE_NONE);
8312 
8313 	drm_object_attach_property(&aconnector->base.base,
8314 				adev->mode_info.underscan_property,
8315 				UNDERSCAN_OFF);
8316 	drm_object_attach_property(&aconnector->base.base,
8317 				adev->mode_info.underscan_hborder_property,
8318 				0);
8319 	drm_object_attach_property(&aconnector->base.base,
8320 				adev->mode_info.underscan_vborder_property,
8321 				0);
8322 
8323 	if (!aconnector->mst_port)
8324 		drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
8325 
8326 	/* This defaults to the max in the range, but we want 8bpc for non-edp. */
8327 	aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
8328 	aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
8329 
8330 	if (connector_type == DRM_MODE_CONNECTOR_eDP &&
8331 	    (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
8332 		drm_object_attach_property(&aconnector->base.base,
8333 				adev->mode_info.abm_level_property, 0);
8334 	}
8335 
8336 	if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
8337 	    connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
8338 	    connector_type == DRM_MODE_CONNECTOR_eDP) {
8339 		drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
8340 
8341 		if (!aconnector->mst_port)
8342 			drm_connector_attach_vrr_capable_property(&aconnector->base);
8343 
8344 #ifdef CONFIG_DRM_AMD_DC_HDCP
8345 		if (adev->dm.hdcp_workqueue)
8346 			drm_connector_attach_content_protection_property(&aconnector->base, true);
8347 #endif
8348 	}
8349 }
8350 
8351 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
8352 			      struct i2c_msg *msgs, int num)
8353 {
8354 	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
8355 	struct ddc_service *ddc_service = i2c->ddc_service;
8356 	struct i2c_command cmd;
8357 	int i;
8358 	int result = -EIO;
8359 
8360 	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
8361 
8362 	if (!cmd.payloads)
8363 		return result;
8364 
8365 	cmd.number_of_payloads = num;
8366 	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
8367 	cmd.speed = 100;
8368 
8369 	for (i = 0; i < num; i++) {
8370 		cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
8371 		cmd.payloads[i].address = msgs[i].addr;
8372 		cmd.payloads[i].length = msgs[i].len;
8373 		cmd.payloads[i].data = msgs[i].buf;
8374 	}
8375 
8376 	if (dc_submit_i2c(
8377 			ddc_service->ctx->dc,
8378 			ddc_service->ddc_pin->hw_info.ddc_channel,
8379 			&cmd))
8380 		result = num;
8381 
8382 	kfree(cmd.payloads);
8383 	return result;
8384 }
8385 
8386 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
8387 {
8388 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
8389 }
8390 
8391 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
8392 	.master_xfer = amdgpu_dm_i2c_xfer,
8393 	.functionality = amdgpu_dm_i2c_func,
8394 };
8395 
8396 static struct amdgpu_i2c_adapter *
8397 create_i2c(struct ddc_service *ddc_service,
8398 	   int link_index,
8399 	   int *res)
8400 {
8401 	struct amdgpu_device *adev = ddc_service->ctx->driver_context;
8402 	struct amdgpu_i2c_adapter *i2c;
8403 
8404 	i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
8405 	if (!i2c)
8406 		return NULL;
8407 	i2c->base.owner = THIS_MODULE;
8408 	i2c->base.class = I2C_CLASS_DDC;
8409 	i2c->base.dev.parent = &adev->pdev->dev;
8410 	i2c->base.algo = &amdgpu_dm_i2c_algo;
8411 	snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
8412 	i2c_set_adapdata(&i2c->base, i2c);
8413 	i2c->ddc_service = ddc_service;
8414 	if (i2c->ddc_service->ddc_pin)
8415 		i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
8416 
8417 	return i2c;
8418 }
8419 
8420 
8421 /*
8422  * Note: this function assumes that dc_link_detect() was called for the
8423  * dc_link which will be represented by this aconnector.
8424  */
8425 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
8426 				    struct amdgpu_dm_connector *aconnector,
8427 				    uint32_t link_index,
8428 				    struct amdgpu_encoder *aencoder)
8429 {
8430 	int res = 0;
8431 	int connector_type;
8432 	struct dc *dc = dm->dc;
8433 	struct dc_link *link = dc_get_link_at_index(dc, link_index);
8434 	struct amdgpu_i2c_adapter *i2c;
8435 
8436 	link->priv = aconnector;
8437 
8438 	DRM_DEBUG_DRIVER("%s()\n", __func__);
8439 
8440 	i2c = create_i2c(link->ddc, link->link_index, &res);
8441 	if (!i2c) {
8442 		DRM_ERROR("Failed to create i2c adapter data\n");
8443 		return -ENOMEM;
8444 	}
8445 
8446 	aconnector->i2c = i2c;
8447 	res = i2c_add_adapter(&i2c->base);
8448 
8449 	if (res) {
8450 		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
8451 		goto out_free;
8452 	}
8453 
8454 	connector_type = to_drm_connector_type(link->connector_signal);
8455 
8456 	res = drm_connector_init_with_ddc(
8457 			dm->ddev,
8458 			&aconnector->base,
8459 			&amdgpu_dm_connector_funcs,
8460 			connector_type,
8461 			&i2c->base);
8462 
8463 	if (res) {
8464 		DRM_ERROR("connector_init failed\n");
8465 		aconnector->connector_id = -1;
8466 		goto out_free;
8467 	}
8468 
8469 	drm_connector_helper_add(
8470 			&aconnector->base,
8471 			&amdgpu_dm_connector_helper_funcs);
8472 
8473 	amdgpu_dm_connector_init_helper(
8474 		dm,
8475 		aconnector,
8476 		connector_type,
8477 		link,
8478 		link_index);
8479 
8480 	drm_connector_attach_encoder(
8481 		&aconnector->base, &aencoder->base);
8482 
8483 	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
8484 		|| connector_type == DRM_MODE_CONNECTOR_eDP)
8485 		amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
8486 
8487 out_free:
8488 	if (res) {
8489 		kfree(i2c);
8490 		aconnector->i2c = NULL;
8491 	}
8492 	return res;
8493 }
8494 
8495 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
8496 {
8497 	switch (adev->mode_info.num_crtc) {
8498 	case 1:
8499 		return 0x1;
8500 	case 2:
8501 		return 0x3;
8502 	case 3:
8503 		return 0x7;
8504 	case 4:
8505 		return 0xf;
8506 	case 5:
8507 		return 0x1f;
8508 	case 6:
8509 	default:
8510 		return 0x3f;
8511 	}
8512 }
8513 
8514 static int amdgpu_dm_encoder_init(struct drm_device *dev,
8515 				  struct amdgpu_encoder *aencoder,
8516 				  uint32_t link_index)
8517 {
8518 	struct amdgpu_device *adev = drm_to_adev(dev);
8519 
8520 	int res = drm_encoder_init(dev,
8521 				   &aencoder->base,
8522 				   &amdgpu_dm_encoder_funcs,
8523 				   DRM_MODE_ENCODER_TMDS,
8524 				   NULL);
8525 
8526 	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
8527 
8528 	if (!res)
8529 		aencoder->encoder_id = link_index;
8530 	else
8531 		aencoder->encoder_id = -1;
8532 
8533 	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
8534 
8535 	return res;
8536 }
8537 
8538 static void manage_dm_interrupts(struct amdgpu_device *adev,
8539 				 struct amdgpu_crtc *acrtc,
8540 				 bool enable)
8541 {
8542 	/*
8543 	 * We have no guarantee that the frontend index maps to the same
8544 	 * backend index - some even map to more than one.
8545 	 *
8546 	 * TODO: Use a different interrupt or check DC itself for the mapping.
8547 	 */
8548 	int irq_type =
8549 		amdgpu_display_crtc_idx_to_irq_type(
8550 			adev,
8551 			acrtc->crtc_id);
8552 
8553 	if (enable) {
8554 		drm_crtc_vblank_on(&acrtc->base);
8555 		amdgpu_irq_get(
8556 			adev,
8557 			&adev->pageflip_irq,
8558 			irq_type);
8559 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8560 		amdgpu_irq_get(
8561 			adev,
8562 			&adev->vline0_irq,
8563 			irq_type);
8564 #endif
8565 	} else {
8566 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8567 		amdgpu_irq_put(
8568 			adev,
8569 			&adev->vline0_irq,
8570 			irq_type);
8571 #endif
8572 		amdgpu_irq_put(
8573 			adev,
8574 			&adev->pageflip_irq,
8575 			irq_type);
8576 		drm_crtc_vblank_off(&acrtc->base);
8577 	}
8578 }
8579 
8580 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
8581 				      struct amdgpu_crtc *acrtc)
8582 {
8583 	int irq_type =
8584 		amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
8585 
8586 	/**
8587 	 * This reads the current state for the IRQ and force reapplies
8588 	 * the setting to hardware.
8589 	 */
8590 	amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
8591 }
8592 
8593 static bool
8594 is_scaling_state_different(const struct dm_connector_state *dm_state,
8595 			   const struct dm_connector_state *old_dm_state)
8596 {
8597 	if (dm_state->scaling != old_dm_state->scaling)
8598 		return true;
8599 	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
8600 		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
8601 			return true;
8602 	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
8603 		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
8604 			return true;
8605 	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
8606 		   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
8607 		return true;
8608 	return false;
8609 }
8610 
8611 #ifdef CONFIG_DRM_AMD_DC_HDCP
8612 static bool is_content_protection_different(struct drm_connector_state *state,
8613 					    const struct drm_connector_state *old_state,
8614 					    const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
8615 {
8616 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8617 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
8618 
8619 	/* Handle: Type0/1 change */
8620 	if (old_state->hdcp_content_type != state->hdcp_content_type &&
8621 	    state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
8622 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8623 		return true;
8624 	}
8625 
8626 	/* CP is being re enabled, ignore this
8627 	 *
8628 	 * Handles:	ENABLED -> DESIRED
8629 	 */
8630 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
8631 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8632 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
8633 		return false;
8634 	}
8635 
8636 	/* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
8637 	 *
8638 	 * Handles:	UNDESIRED -> ENABLED
8639 	 */
8640 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
8641 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
8642 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8643 
8644 	/* Stream removed and re-enabled
8645 	 *
8646 	 * Can sometimes overlap with the HPD case,
8647 	 * thus set update_hdcp to false to avoid
8648 	 * setting HDCP multiple times.
8649 	 *
8650 	 * Handles:	DESIRED -> DESIRED (Special case)
8651 	 */
8652 	if (!(old_state->crtc && old_state->crtc->enabled) &&
8653 		state->crtc && state->crtc->enabled &&
8654 		connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8655 		dm_con_state->update_hdcp = false;
8656 		return true;
8657 	}
8658 
8659 	/* Hot-plug, headless s3, dpms
8660 	 *
8661 	 * Only start HDCP if the display is connected/enabled.
8662 	 * update_hdcp flag will be set to false until the next
8663 	 * HPD comes in.
8664 	 *
8665 	 * Handles:	DESIRED -> DESIRED (Special case)
8666 	 */
8667 	if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
8668 	    connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
8669 		dm_con_state->update_hdcp = false;
8670 		return true;
8671 	}
8672 
8673 	/*
8674 	 * Handles:	UNDESIRED -> UNDESIRED
8675 	 *		DESIRED -> DESIRED
8676 	 *		ENABLED -> ENABLED
8677 	 */
8678 	if (old_state->content_protection == state->content_protection)
8679 		return false;
8680 
8681 	/*
8682 	 * Handles:	UNDESIRED -> DESIRED
8683 	 *		DESIRED -> UNDESIRED
8684 	 *		ENABLED -> UNDESIRED
8685 	 */
8686 	if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
8687 		return true;
8688 
8689 	/*
8690 	 * Handles:	DESIRED -> ENABLED
8691 	 */
8692 	return false;
8693 }
8694 
8695 #endif
8696 static void remove_stream(struct amdgpu_device *adev,
8697 			  struct amdgpu_crtc *acrtc,
8698 			  struct dc_stream_state *stream)
8699 {
8700 	/* this is the update mode case */
8701 
8702 	acrtc->otg_inst = -1;
8703 	acrtc->enabled = false;
8704 }
8705 
8706 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
8707 			       struct dc_cursor_position *position)
8708 {
8709 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8710 	int x, y;
8711 	int xorigin = 0, yorigin = 0;
8712 
8713 	if (!crtc || !plane->state->fb)
8714 		return 0;
8715 
8716 	if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
8717 	    (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
8718 		DRM_ERROR("%s: bad cursor width or height %d x %d\n",
8719 			  __func__,
8720 			  plane->state->crtc_w,
8721 			  plane->state->crtc_h);
8722 		return -EINVAL;
8723 	}
8724 
8725 	x = plane->state->crtc_x;
8726 	y = plane->state->crtc_y;
8727 
8728 	if (x <= -amdgpu_crtc->max_cursor_width ||
8729 	    y <= -amdgpu_crtc->max_cursor_height)
8730 		return 0;
8731 
8732 	if (x < 0) {
8733 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
8734 		x = 0;
8735 	}
8736 	if (y < 0) {
8737 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
8738 		y = 0;
8739 	}
8740 	position->enable = true;
8741 	position->translate_by_source = true;
8742 	position->x = x;
8743 	position->y = y;
8744 	position->x_hotspot = xorigin;
8745 	position->y_hotspot = yorigin;
8746 
8747 	return 0;
8748 }
8749 
8750 static void handle_cursor_update(struct drm_plane *plane,
8751 				 struct drm_plane_state *old_plane_state)
8752 {
8753 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
8754 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
8755 	struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
8756 	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
8757 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8758 	uint64_t address = afb ? afb->address : 0;
8759 	struct dc_cursor_position position = {0};
8760 	struct dc_cursor_attributes attributes;
8761 	int ret;
8762 
8763 	if (!plane->state->fb && !old_plane_state->fb)
8764 		return;
8765 
8766 	DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
8767 		      __func__,
8768 		      amdgpu_crtc->crtc_id,
8769 		      plane->state->crtc_w,
8770 		      plane->state->crtc_h);
8771 
8772 	ret = get_cursor_position(plane, crtc, &position);
8773 	if (ret)
8774 		return;
8775 
8776 	if (!position.enable) {
8777 		/* turn off cursor */
8778 		if (crtc_state && crtc_state->stream) {
8779 			mutex_lock(&adev->dm.dc_lock);
8780 			dc_stream_set_cursor_position(crtc_state->stream,
8781 						      &position);
8782 			mutex_unlock(&adev->dm.dc_lock);
8783 		}
8784 		return;
8785 	}
8786 
8787 	amdgpu_crtc->cursor_width = plane->state->crtc_w;
8788 	amdgpu_crtc->cursor_height = plane->state->crtc_h;
8789 
8790 	memset(&attributes, 0, sizeof(attributes));
8791 	attributes.address.high_part = upper_32_bits(address);
8792 	attributes.address.low_part  = lower_32_bits(address);
8793 	attributes.width             = plane->state->crtc_w;
8794 	attributes.height            = plane->state->crtc_h;
8795 	attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
8796 	attributes.rotation_angle    = 0;
8797 	attributes.attribute_flags.value = 0;
8798 
8799 	attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
8800 
8801 	if (crtc_state->stream) {
8802 		mutex_lock(&adev->dm.dc_lock);
8803 		if (!dc_stream_set_cursor_attributes(crtc_state->stream,
8804 							 &attributes))
8805 			DRM_ERROR("DC failed to set cursor attributes\n");
8806 
8807 		if (!dc_stream_set_cursor_position(crtc_state->stream,
8808 						   &position))
8809 			DRM_ERROR("DC failed to set cursor position\n");
8810 		mutex_unlock(&adev->dm.dc_lock);
8811 	}
8812 }
8813 
8814 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
8815 {
8816 
8817 	assert_spin_locked(&acrtc->base.dev->event_lock);
8818 	WARN_ON(acrtc->event);
8819 
8820 	acrtc->event = acrtc->base.state->event;
8821 
8822 	/* Set the flip status */
8823 	acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
8824 
8825 	/* Mark this event as consumed */
8826 	acrtc->base.state->event = NULL;
8827 
8828 	DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
8829 		     acrtc->crtc_id);
8830 }
8831 
8832 static void update_freesync_state_on_stream(
8833 	struct amdgpu_display_manager *dm,
8834 	struct dm_crtc_state *new_crtc_state,
8835 	struct dc_stream_state *new_stream,
8836 	struct dc_plane_state *surface,
8837 	u32 flip_timestamp_in_us)
8838 {
8839 	struct mod_vrr_params vrr_params;
8840 	struct dc_info_packet vrr_infopacket = {0};
8841 	struct amdgpu_device *adev = dm->adev;
8842 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8843 	unsigned long flags;
8844 	bool pack_sdp_v1_3 = false;
8845 
8846 	if (!new_stream)
8847 		return;
8848 
8849 	/*
8850 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8851 	 * For now it's sufficient to just guard against these conditions.
8852 	 */
8853 
8854 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8855 		return;
8856 
8857 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8858         vrr_params = acrtc->dm_irq_params.vrr_params;
8859 
8860 	if (surface) {
8861 		mod_freesync_handle_preflip(
8862 			dm->freesync_module,
8863 			surface,
8864 			new_stream,
8865 			flip_timestamp_in_us,
8866 			&vrr_params);
8867 
8868 		if (adev->family < AMDGPU_FAMILY_AI &&
8869 		    amdgpu_dm_vrr_active(new_crtc_state)) {
8870 			mod_freesync_handle_v_update(dm->freesync_module,
8871 						     new_stream, &vrr_params);
8872 
8873 			/* Need to call this before the frame ends. */
8874 			dc_stream_adjust_vmin_vmax(dm->dc,
8875 						   new_crtc_state->stream,
8876 						   &vrr_params.adjust);
8877 		}
8878 	}
8879 
8880 	mod_freesync_build_vrr_infopacket(
8881 		dm->freesync_module,
8882 		new_stream,
8883 		&vrr_params,
8884 		PACKET_TYPE_VRR,
8885 		TRANSFER_FUNC_UNKNOWN,
8886 		&vrr_infopacket,
8887 		pack_sdp_v1_3);
8888 
8889 	new_crtc_state->freesync_timing_changed |=
8890 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8891 			&vrr_params.adjust,
8892 			sizeof(vrr_params.adjust)) != 0);
8893 
8894 	new_crtc_state->freesync_vrr_info_changed |=
8895 		(memcmp(&new_crtc_state->vrr_infopacket,
8896 			&vrr_infopacket,
8897 			sizeof(vrr_infopacket)) != 0);
8898 
8899 	acrtc->dm_irq_params.vrr_params = vrr_params;
8900 	new_crtc_state->vrr_infopacket = vrr_infopacket;
8901 
8902 	new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
8903 	new_stream->vrr_infopacket = vrr_infopacket;
8904 
8905 	if (new_crtc_state->freesync_vrr_info_changed)
8906 		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
8907 			      new_crtc_state->base.crtc->base.id,
8908 			      (int)new_crtc_state->base.vrr_enabled,
8909 			      (int)vrr_params.state);
8910 
8911 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8912 }
8913 
8914 static void update_stream_irq_parameters(
8915 	struct amdgpu_display_manager *dm,
8916 	struct dm_crtc_state *new_crtc_state)
8917 {
8918 	struct dc_stream_state *new_stream = new_crtc_state->stream;
8919 	struct mod_vrr_params vrr_params;
8920 	struct mod_freesync_config config = new_crtc_state->freesync_config;
8921 	struct amdgpu_device *adev = dm->adev;
8922 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8923 	unsigned long flags;
8924 
8925 	if (!new_stream)
8926 		return;
8927 
8928 	/*
8929 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8930 	 * For now it's sufficient to just guard against these conditions.
8931 	 */
8932 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8933 		return;
8934 
8935 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8936 	vrr_params = acrtc->dm_irq_params.vrr_params;
8937 
8938 	if (new_crtc_state->vrr_supported &&
8939 	    config.min_refresh_in_uhz &&
8940 	    config.max_refresh_in_uhz) {
8941 		/*
8942 		 * if freesync compatible mode was set, config.state will be set
8943 		 * in atomic check
8944 		 */
8945 		if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
8946 		    (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
8947 		     new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
8948 			vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
8949 			vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
8950 			vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
8951 			vrr_params.state = VRR_STATE_ACTIVE_FIXED;
8952 		} else {
8953 			config.state = new_crtc_state->base.vrr_enabled ?
8954 						     VRR_STATE_ACTIVE_VARIABLE :
8955 						     VRR_STATE_INACTIVE;
8956 		}
8957 	} else {
8958 		config.state = VRR_STATE_UNSUPPORTED;
8959 	}
8960 
8961 	mod_freesync_build_vrr_params(dm->freesync_module,
8962 				      new_stream,
8963 				      &config, &vrr_params);
8964 
8965 	new_crtc_state->freesync_timing_changed |=
8966 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8967 			&vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
8968 
8969 	new_crtc_state->freesync_config = config;
8970 	/* Copy state for access from DM IRQ handler */
8971 	acrtc->dm_irq_params.freesync_config = config;
8972 	acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
8973 	acrtc->dm_irq_params.vrr_params = vrr_params;
8974 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8975 }
8976 
8977 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
8978 					    struct dm_crtc_state *new_state)
8979 {
8980 	bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
8981 	bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
8982 
8983 	if (!old_vrr_active && new_vrr_active) {
8984 		/* Transition VRR inactive -> active:
8985 		 * While VRR is active, we must not disable vblank irq, as a
8986 		 * reenable after disable would compute bogus vblank/pflip
8987 		 * timestamps if it likely happened inside display front-porch.
8988 		 *
8989 		 * We also need vupdate irq for the actual core vblank handling
8990 		 * at end of vblank.
8991 		 */
8992 		dm_set_vupdate_irq(new_state->base.crtc, true);
8993 		drm_crtc_vblank_get(new_state->base.crtc);
8994 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
8995 				 __func__, new_state->base.crtc->base.id);
8996 	} else if (old_vrr_active && !new_vrr_active) {
8997 		/* Transition VRR active -> inactive:
8998 		 * Allow vblank irq disable again for fixed refresh rate.
8999 		 */
9000 		dm_set_vupdate_irq(new_state->base.crtc, false);
9001 		drm_crtc_vblank_put(new_state->base.crtc);
9002 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
9003 				 __func__, new_state->base.crtc->base.id);
9004 	}
9005 }
9006 
9007 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
9008 {
9009 	struct drm_plane *plane;
9010 	struct drm_plane_state *old_plane_state;
9011 	int i;
9012 
9013 	/*
9014 	 * TODO: Make this per-stream so we don't issue redundant updates for
9015 	 * commits with multiple streams.
9016 	 */
9017 	for_each_old_plane_in_state(state, plane, old_plane_state, i)
9018 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
9019 			handle_cursor_update(plane, old_plane_state);
9020 }
9021 
9022 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
9023 				    struct dc_state *dc_state,
9024 				    struct drm_device *dev,
9025 				    struct amdgpu_display_manager *dm,
9026 				    struct drm_crtc *pcrtc,
9027 				    bool wait_for_vblank)
9028 {
9029 	uint32_t i;
9030 	uint64_t timestamp_ns;
9031 	struct drm_plane *plane;
9032 	struct drm_plane_state *old_plane_state, *new_plane_state;
9033 	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
9034 	struct drm_crtc_state *new_pcrtc_state =
9035 			drm_atomic_get_new_crtc_state(state, pcrtc);
9036 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
9037 	struct dm_crtc_state *dm_old_crtc_state =
9038 			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
9039 	int planes_count = 0, vpos, hpos;
9040 	long r;
9041 	unsigned long flags;
9042 	struct amdgpu_bo *abo;
9043 	uint32_t target_vblank, last_flip_vblank;
9044 	bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
9045 	bool pflip_present = false;
9046 	struct {
9047 		struct dc_surface_update surface_updates[MAX_SURFACES];
9048 		struct dc_plane_info plane_infos[MAX_SURFACES];
9049 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
9050 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
9051 		struct dc_stream_update stream_update;
9052 	} *bundle;
9053 
9054 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
9055 
9056 	if (!bundle) {
9057 		dm_error("Failed to allocate update bundle\n");
9058 		goto cleanup;
9059 	}
9060 
9061 	/*
9062 	 * Disable the cursor first if we're disabling all the planes.
9063 	 * It'll remain on the screen after the planes are re-enabled
9064 	 * if we don't.
9065 	 */
9066 	if (acrtc_state->active_planes == 0)
9067 		amdgpu_dm_commit_cursors(state);
9068 
9069 	/* update planes when needed */
9070 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9071 		struct drm_crtc *crtc = new_plane_state->crtc;
9072 		struct drm_crtc_state *new_crtc_state;
9073 		struct drm_framebuffer *fb = new_plane_state->fb;
9074 		struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
9075 		bool plane_needs_flip;
9076 		struct dc_plane_state *dc_plane;
9077 		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
9078 
9079 		/* Cursor plane is handled after stream updates */
9080 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
9081 			continue;
9082 
9083 		if (!fb || !crtc || pcrtc != crtc)
9084 			continue;
9085 
9086 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
9087 		if (!new_crtc_state->active)
9088 			continue;
9089 
9090 		dc_plane = dm_new_plane_state->dc_state;
9091 
9092 		bundle->surface_updates[planes_count].surface = dc_plane;
9093 		if (new_pcrtc_state->color_mgmt_changed) {
9094 			bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
9095 			bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
9096 			bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
9097 		}
9098 
9099 		fill_dc_scaling_info(dm->adev, new_plane_state,
9100 				     &bundle->scaling_infos[planes_count]);
9101 
9102 		bundle->surface_updates[planes_count].scaling_info =
9103 			&bundle->scaling_infos[planes_count];
9104 
9105 		plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
9106 
9107 		pflip_present = pflip_present || plane_needs_flip;
9108 
9109 		if (!plane_needs_flip) {
9110 			planes_count += 1;
9111 			continue;
9112 		}
9113 
9114 		abo = gem_to_amdgpu_bo(fb->obj[0]);
9115 
9116 		/*
9117 		 * Wait for all fences on this FB. Do limited wait to avoid
9118 		 * deadlock during GPU reset when this fence will not signal
9119 		 * but we hold reservation lock for the BO.
9120 		 */
9121 		r = dma_resv_wait_timeout(abo->tbo.base.resv, true, false,
9122 					  msecs_to_jiffies(5000));
9123 		if (unlikely(r <= 0))
9124 			DRM_ERROR("Waiting for fences timed out!");
9125 
9126 		fill_dc_plane_info_and_addr(
9127 			dm->adev, new_plane_state,
9128 			afb->tiling_flags,
9129 			&bundle->plane_infos[planes_count],
9130 			&bundle->flip_addrs[planes_count].address,
9131 			afb->tmz_surface, false);
9132 
9133 		DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
9134 				 new_plane_state->plane->index,
9135 				 bundle->plane_infos[planes_count].dcc.enable);
9136 
9137 		bundle->surface_updates[planes_count].plane_info =
9138 			&bundle->plane_infos[planes_count];
9139 
9140 		/*
9141 		 * Only allow immediate flips for fast updates that don't
9142 		 * change FB pitch, DCC state, rotation or mirroing.
9143 		 */
9144 		bundle->flip_addrs[planes_count].flip_immediate =
9145 			crtc->state->async_flip &&
9146 			acrtc_state->update_type == UPDATE_TYPE_FAST;
9147 
9148 		timestamp_ns = ktime_get_ns();
9149 		bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
9150 		bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
9151 		bundle->surface_updates[planes_count].surface = dc_plane;
9152 
9153 		if (!bundle->surface_updates[planes_count].surface) {
9154 			DRM_ERROR("No surface for CRTC: id=%d\n",
9155 					acrtc_attach->crtc_id);
9156 			continue;
9157 		}
9158 
9159 		if (plane == pcrtc->primary)
9160 			update_freesync_state_on_stream(
9161 				dm,
9162 				acrtc_state,
9163 				acrtc_state->stream,
9164 				dc_plane,
9165 				bundle->flip_addrs[planes_count].flip_timestamp_in_us);
9166 
9167 		DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
9168 				 __func__,
9169 				 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
9170 				 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
9171 
9172 		planes_count += 1;
9173 
9174 	}
9175 
9176 	if (pflip_present) {
9177 		if (!vrr_active) {
9178 			/* Use old throttling in non-vrr fixed refresh rate mode
9179 			 * to keep flip scheduling based on target vblank counts
9180 			 * working in a backwards compatible way, e.g., for
9181 			 * clients using the GLX_OML_sync_control extension or
9182 			 * DRI3/Present extension with defined target_msc.
9183 			 */
9184 			last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
9185 		}
9186 		else {
9187 			/* For variable refresh rate mode only:
9188 			 * Get vblank of last completed flip to avoid > 1 vrr
9189 			 * flips per video frame by use of throttling, but allow
9190 			 * flip programming anywhere in the possibly large
9191 			 * variable vrr vblank interval for fine-grained flip
9192 			 * timing control and more opportunity to avoid stutter
9193 			 * on late submission of flips.
9194 			 */
9195 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9196 			last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
9197 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9198 		}
9199 
9200 		target_vblank = last_flip_vblank + wait_for_vblank;
9201 
9202 		/*
9203 		 * Wait until we're out of the vertical blank period before the one
9204 		 * targeted by the flip
9205 		 */
9206 		while ((acrtc_attach->enabled &&
9207 			(amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
9208 							    0, &vpos, &hpos, NULL,
9209 							    NULL, &pcrtc->hwmode)
9210 			 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
9211 			(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
9212 			(int)(target_vblank -
9213 			  amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
9214 			usleep_range(1000, 1100);
9215 		}
9216 
9217 		/**
9218 		 * Prepare the flip event for the pageflip interrupt to handle.
9219 		 *
9220 		 * This only works in the case where we've already turned on the
9221 		 * appropriate hardware blocks (eg. HUBP) so in the transition case
9222 		 * from 0 -> n planes we have to skip a hardware generated event
9223 		 * and rely on sending it from software.
9224 		 */
9225 		if (acrtc_attach->base.state->event &&
9226 		    acrtc_state->active_planes > 0 &&
9227 		    !acrtc_state->force_dpms_off) {
9228 			drm_crtc_vblank_get(pcrtc);
9229 
9230 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9231 
9232 			WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
9233 			prepare_flip_isr(acrtc_attach);
9234 
9235 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9236 		}
9237 
9238 		if (acrtc_state->stream) {
9239 			if (acrtc_state->freesync_vrr_info_changed)
9240 				bundle->stream_update.vrr_infopacket =
9241 					&acrtc_state->stream->vrr_infopacket;
9242 		}
9243 	}
9244 
9245 	/* Update the planes if changed or disable if we don't have any. */
9246 	if ((planes_count || acrtc_state->active_planes == 0) &&
9247 		acrtc_state->stream) {
9248 #if defined(CONFIG_DRM_AMD_DC_DCN)
9249 		/*
9250 		 * If PSR or idle optimizations are enabled then flush out
9251 		 * any pending work before hardware programming.
9252 		 */
9253 		if (dm->vblank_control_workqueue)
9254 			flush_workqueue(dm->vblank_control_workqueue);
9255 #endif
9256 
9257 		bundle->stream_update.stream = acrtc_state->stream;
9258 		if (new_pcrtc_state->mode_changed) {
9259 			bundle->stream_update.src = acrtc_state->stream->src;
9260 			bundle->stream_update.dst = acrtc_state->stream->dst;
9261 		}
9262 
9263 		if (new_pcrtc_state->color_mgmt_changed) {
9264 			/*
9265 			 * TODO: This isn't fully correct since we've actually
9266 			 * already modified the stream in place.
9267 			 */
9268 			bundle->stream_update.gamut_remap =
9269 				&acrtc_state->stream->gamut_remap_matrix;
9270 			bundle->stream_update.output_csc_transform =
9271 				&acrtc_state->stream->csc_color_matrix;
9272 			bundle->stream_update.out_transfer_func =
9273 				acrtc_state->stream->out_transfer_func;
9274 		}
9275 
9276 		acrtc_state->stream->abm_level = acrtc_state->abm_level;
9277 		if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
9278 			bundle->stream_update.abm_level = &acrtc_state->abm_level;
9279 
9280 		/*
9281 		 * If FreeSync state on the stream has changed then we need to
9282 		 * re-adjust the min/max bounds now that DC doesn't handle this
9283 		 * as part of commit.
9284 		 */
9285 		if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
9286 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9287 			dc_stream_adjust_vmin_vmax(
9288 				dm->dc, acrtc_state->stream,
9289 				&acrtc_attach->dm_irq_params.vrr_params.adjust);
9290 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9291 		}
9292 		mutex_lock(&dm->dc_lock);
9293 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9294 				acrtc_state->stream->link->psr_settings.psr_allow_active)
9295 			amdgpu_dm_psr_disable(acrtc_state->stream);
9296 
9297 		dc_commit_updates_for_stream(dm->dc,
9298 						     bundle->surface_updates,
9299 						     planes_count,
9300 						     acrtc_state->stream,
9301 						     &bundle->stream_update,
9302 						     dc_state);
9303 
9304 		/**
9305 		 * Enable or disable the interrupts on the backend.
9306 		 *
9307 		 * Most pipes are put into power gating when unused.
9308 		 *
9309 		 * When power gating is enabled on a pipe we lose the
9310 		 * interrupt enablement state when power gating is disabled.
9311 		 *
9312 		 * So we need to update the IRQ control state in hardware
9313 		 * whenever the pipe turns on (since it could be previously
9314 		 * power gated) or off (since some pipes can't be power gated
9315 		 * on some ASICs).
9316 		 */
9317 		if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
9318 			dm_update_pflip_irq_state(drm_to_adev(dev),
9319 						  acrtc_attach);
9320 
9321 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9322 				acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
9323 				!acrtc_state->stream->link->psr_settings.psr_feature_enabled)
9324 			amdgpu_dm_link_setup_psr(acrtc_state->stream);
9325 
9326 		/* Decrement skip count when PSR is enabled and we're doing fast updates. */
9327 		if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
9328 		    acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
9329 			struct amdgpu_dm_connector *aconn =
9330 				(struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
9331 
9332 			if (aconn->psr_skip_count > 0)
9333 				aconn->psr_skip_count--;
9334 
9335 			/* Allow PSR when skip count is 0. */
9336 			acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
9337 		} else {
9338 			acrtc_attach->dm_irq_params.allow_psr_entry = false;
9339 		}
9340 
9341 		mutex_unlock(&dm->dc_lock);
9342 	}
9343 
9344 	/*
9345 	 * Update cursor state *after* programming all the planes.
9346 	 * This avoids redundant programming in the case where we're going
9347 	 * to be disabling a single plane - those pipes are being disabled.
9348 	 */
9349 	if (acrtc_state->active_planes)
9350 		amdgpu_dm_commit_cursors(state);
9351 
9352 cleanup:
9353 	kfree(bundle);
9354 }
9355 
9356 static void amdgpu_dm_commit_audio(struct drm_device *dev,
9357 				   struct drm_atomic_state *state)
9358 {
9359 	struct amdgpu_device *adev = drm_to_adev(dev);
9360 	struct amdgpu_dm_connector *aconnector;
9361 	struct drm_connector *connector;
9362 	struct drm_connector_state *old_con_state, *new_con_state;
9363 	struct drm_crtc_state *new_crtc_state;
9364 	struct dm_crtc_state *new_dm_crtc_state;
9365 	const struct dc_stream_status *status;
9366 	int i, inst;
9367 
9368 	/* Notify device removals. */
9369 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9370 		if (old_con_state->crtc != new_con_state->crtc) {
9371 			/* CRTC changes require notification. */
9372 			goto notify;
9373 		}
9374 
9375 		if (!new_con_state->crtc)
9376 			continue;
9377 
9378 		new_crtc_state = drm_atomic_get_new_crtc_state(
9379 			state, new_con_state->crtc);
9380 
9381 		if (!new_crtc_state)
9382 			continue;
9383 
9384 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9385 			continue;
9386 
9387 	notify:
9388 		aconnector = to_amdgpu_dm_connector(connector);
9389 
9390 		mutex_lock(&adev->dm.audio_lock);
9391 		inst = aconnector->audio_inst;
9392 		aconnector->audio_inst = -1;
9393 		mutex_unlock(&adev->dm.audio_lock);
9394 
9395 		amdgpu_dm_audio_eld_notify(adev, inst);
9396 	}
9397 
9398 	/* Notify audio device additions. */
9399 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
9400 		if (!new_con_state->crtc)
9401 			continue;
9402 
9403 		new_crtc_state = drm_atomic_get_new_crtc_state(
9404 			state, new_con_state->crtc);
9405 
9406 		if (!new_crtc_state)
9407 			continue;
9408 
9409 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9410 			continue;
9411 
9412 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
9413 		if (!new_dm_crtc_state->stream)
9414 			continue;
9415 
9416 		status = dc_stream_get_status(new_dm_crtc_state->stream);
9417 		if (!status)
9418 			continue;
9419 
9420 		aconnector = to_amdgpu_dm_connector(connector);
9421 
9422 		mutex_lock(&adev->dm.audio_lock);
9423 		inst = status->audio_inst;
9424 		aconnector->audio_inst = inst;
9425 		mutex_unlock(&adev->dm.audio_lock);
9426 
9427 		amdgpu_dm_audio_eld_notify(adev, inst);
9428 	}
9429 }
9430 
9431 /*
9432  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
9433  * @crtc_state: the DRM CRTC state
9434  * @stream_state: the DC stream state.
9435  *
9436  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
9437  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
9438  */
9439 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
9440 						struct dc_stream_state *stream_state)
9441 {
9442 	stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
9443 }
9444 
9445 /**
9446  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
9447  * @state: The atomic state to commit
9448  *
9449  * This will tell DC to commit the constructed DC state from atomic_check,
9450  * programming the hardware. Any failures here implies a hardware failure, since
9451  * atomic check should have filtered anything non-kosher.
9452  */
9453 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
9454 {
9455 	struct drm_device *dev = state->dev;
9456 	struct amdgpu_device *adev = drm_to_adev(dev);
9457 	struct amdgpu_display_manager *dm = &adev->dm;
9458 	struct dm_atomic_state *dm_state;
9459 	struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
9460 	uint32_t i, j;
9461 	struct drm_crtc *crtc;
9462 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9463 	unsigned long flags;
9464 	bool wait_for_vblank = true;
9465 	struct drm_connector *connector;
9466 	struct drm_connector_state *old_con_state, *new_con_state;
9467 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9468 	int crtc_disable_count = 0;
9469 	bool mode_set_reset_required = false;
9470 
9471 	trace_amdgpu_dm_atomic_commit_tail_begin(state);
9472 
9473 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
9474 
9475 	dm_state = dm_atomic_get_new_state(state);
9476 	if (dm_state && dm_state->context) {
9477 		dc_state = dm_state->context;
9478 	} else {
9479 		/* No state changes, retain current state. */
9480 		dc_state_temp = dc_create_state(dm->dc);
9481 		ASSERT(dc_state_temp);
9482 		dc_state = dc_state_temp;
9483 		dc_resource_state_copy_construct_current(dm->dc, dc_state);
9484 	}
9485 
9486 	for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
9487 				       new_crtc_state, i) {
9488 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9489 
9490 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9491 
9492 		if (old_crtc_state->active &&
9493 		    (!new_crtc_state->active ||
9494 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9495 			manage_dm_interrupts(adev, acrtc, false);
9496 			dc_stream_release(dm_old_crtc_state->stream);
9497 		}
9498 	}
9499 
9500 	drm_atomic_helper_calc_timestamping_constants(state);
9501 
9502 	/* update changed items */
9503 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9504 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9505 
9506 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9507 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9508 
9509 		DRM_DEBUG_ATOMIC(
9510 			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9511 			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
9512 			"connectors_changed:%d\n",
9513 			acrtc->crtc_id,
9514 			new_crtc_state->enable,
9515 			new_crtc_state->active,
9516 			new_crtc_state->planes_changed,
9517 			new_crtc_state->mode_changed,
9518 			new_crtc_state->active_changed,
9519 			new_crtc_state->connectors_changed);
9520 
9521 		/* Disable cursor if disabling crtc */
9522 		if (old_crtc_state->active && !new_crtc_state->active) {
9523 			struct dc_cursor_position position;
9524 
9525 			memset(&position, 0, sizeof(position));
9526 			mutex_lock(&dm->dc_lock);
9527 			dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
9528 			mutex_unlock(&dm->dc_lock);
9529 		}
9530 
9531 		/* Copy all transient state flags into dc state */
9532 		if (dm_new_crtc_state->stream) {
9533 			amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
9534 							    dm_new_crtc_state->stream);
9535 		}
9536 
9537 		/* handles headless hotplug case, updating new_state and
9538 		 * aconnector as needed
9539 		 */
9540 
9541 		if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
9542 
9543 			DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
9544 
9545 			if (!dm_new_crtc_state->stream) {
9546 				/*
9547 				 * this could happen because of issues with
9548 				 * userspace notifications delivery.
9549 				 * In this case userspace tries to set mode on
9550 				 * display which is disconnected in fact.
9551 				 * dc_sink is NULL in this case on aconnector.
9552 				 * We expect reset mode will come soon.
9553 				 *
9554 				 * This can also happen when unplug is done
9555 				 * during resume sequence ended
9556 				 *
9557 				 * In this case, we want to pretend we still
9558 				 * have a sink to keep the pipe running so that
9559 				 * hw state is consistent with the sw state
9560 				 */
9561 				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9562 						__func__, acrtc->base.base.id);
9563 				continue;
9564 			}
9565 
9566 			if (dm_old_crtc_state->stream)
9567 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9568 
9569 			pm_runtime_get_noresume(dev->dev);
9570 
9571 			acrtc->enabled = true;
9572 			acrtc->hw_mode = new_crtc_state->mode;
9573 			crtc->hwmode = new_crtc_state->mode;
9574 			mode_set_reset_required = true;
9575 		} else if (modereset_required(new_crtc_state)) {
9576 			DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
9577 			/* i.e. reset mode */
9578 			if (dm_old_crtc_state->stream)
9579 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9580 
9581 			mode_set_reset_required = true;
9582 		}
9583 	} /* for_each_crtc_in_state() */
9584 
9585 	if (dc_state) {
9586 		/* if there mode set or reset, disable eDP PSR */
9587 		if (mode_set_reset_required) {
9588 #if defined(CONFIG_DRM_AMD_DC_DCN)
9589 			if (dm->vblank_control_workqueue)
9590 				flush_workqueue(dm->vblank_control_workqueue);
9591 #endif
9592 			amdgpu_dm_psr_disable_all(dm);
9593 		}
9594 
9595 		dm_enable_per_frame_crtc_master_sync(dc_state);
9596 		mutex_lock(&dm->dc_lock);
9597 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
9598 #if defined(CONFIG_DRM_AMD_DC_DCN)
9599                /* Allow idle optimization when vblank count is 0 for display off */
9600                if (dm->active_vblank_irq_count == 0)
9601                    dc_allow_idle_optimizations(dm->dc,true);
9602 #endif
9603 		mutex_unlock(&dm->dc_lock);
9604 	}
9605 
9606 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9607 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9608 
9609 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9610 
9611 		if (dm_new_crtc_state->stream != NULL) {
9612 			const struct dc_stream_status *status =
9613 					dc_stream_get_status(dm_new_crtc_state->stream);
9614 
9615 			if (!status)
9616 				status = dc_stream_get_status_from_state(dc_state,
9617 									 dm_new_crtc_state->stream);
9618 			if (!status)
9619 				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
9620 			else
9621 				acrtc->otg_inst = status->primary_otg_inst;
9622 		}
9623 	}
9624 #ifdef CONFIG_DRM_AMD_DC_HDCP
9625 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9626 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9627 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9628 		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9629 
9630 		new_crtc_state = NULL;
9631 
9632 		if (acrtc)
9633 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9634 
9635 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9636 
9637 		if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
9638 		    connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
9639 			hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
9640 			new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
9641 			dm_new_con_state->update_hdcp = true;
9642 			continue;
9643 		}
9644 
9645 		if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
9646 			hdcp_update_display(
9647 				adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
9648 				new_con_state->hdcp_content_type,
9649 				new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
9650 	}
9651 #endif
9652 
9653 	/* Handle connector state changes */
9654 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9655 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9656 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9657 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9658 		struct dc_surface_update dummy_updates[MAX_SURFACES];
9659 		struct dc_stream_update stream_update;
9660 		struct dc_info_packet hdr_packet;
9661 		struct dc_stream_status *status = NULL;
9662 		bool abm_changed, hdr_changed, scaling_changed;
9663 
9664 		memset(&dummy_updates, 0, sizeof(dummy_updates));
9665 		memset(&stream_update, 0, sizeof(stream_update));
9666 
9667 		if (acrtc) {
9668 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9669 			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
9670 		}
9671 
9672 		/* Skip any modesets/resets */
9673 		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
9674 			continue;
9675 
9676 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9677 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9678 
9679 		scaling_changed = is_scaling_state_different(dm_new_con_state,
9680 							     dm_old_con_state);
9681 
9682 		abm_changed = dm_new_crtc_state->abm_level !=
9683 			      dm_old_crtc_state->abm_level;
9684 
9685 		hdr_changed =
9686 			!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
9687 
9688 		if (!scaling_changed && !abm_changed && !hdr_changed)
9689 			continue;
9690 
9691 		stream_update.stream = dm_new_crtc_state->stream;
9692 		if (scaling_changed) {
9693 			update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
9694 					dm_new_con_state, dm_new_crtc_state->stream);
9695 
9696 			stream_update.src = dm_new_crtc_state->stream->src;
9697 			stream_update.dst = dm_new_crtc_state->stream->dst;
9698 		}
9699 
9700 		if (abm_changed) {
9701 			dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
9702 
9703 			stream_update.abm_level = &dm_new_crtc_state->abm_level;
9704 		}
9705 
9706 		if (hdr_changed) {
9707 			fill_hdr_info_packet(new_con_state, &hdr_packet);
9708 			stream_update.hdr_static_metadata = &hdr_packet;
9709 		}
9710 
9711 		status = dc_stream_get_status(dm_new_crtc_state->stream);
9712 
9713 		if (WARN_ON(!status))
9714 			continue;
9715 
9716 		WARN_ON(!status->plane_count);
9717 
9718 		/*
9719 		 * TODO: DC refuses to perform stream updates without a dc_surface_update.
9720 		 * Here we create an empty update on each plane.
9721 		 * To fix this, DC should permit updating only stream properties.
9722 		 */
9723 		for (j = 0; j < status->plane_count; j++)
9724 			dummy_updates[j].surface = status->plane_states[0];
9725 
9726 
9727 		mutex_lock(&dm->dc_lock);
9728 		dc_commit_updates_for_stream(dm->dc,
9729 						     dummy_updates,
9730 						     status->plane_count,
9731 						     dm_new_crtc_state->stream,
9732 						     &stream_update,
9733 						     dc_state);
9734 		mutex_unlock(&dm->dc_lock);
9735 	}
9736 
9737 	/* Count number of newly disabled CRTCs for dropping PM refs later. */
9738 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
9739 				      new_crtc_state, i) {
9740 		if (old_crtc_state->active && !new_crtc_state->active)
9741 			crtc_disable_count++;
9742 
9743 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9744 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9745 
9746 		/* For freesync config update on crtc state and params for irq */
9747 		update_stream_irq_parameters(dm, dm_new_crtc_state);
9748 
9749 		/* Handle vrr on->off / off->on transitions */
9750 		amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
9751 						dm_new_crtc_state);
9752 	}
9753 
9754 	/**
9755 	 * Enable interrupts for CRTCs that are newly enabled or went through
9756 	 * a modeset. It was intentionally deferred until after the front end
9757 	 * state was modified to wait until the OTG was on and so the IRQ
9758 	 * handlers didn't access stale or invalid state.
9759 	 */
9760 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9761 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9762 #ifdef CONFIG_DEBUG_FS
9763 		bool configure_crc = false;
9764 		enum amdgpu_dm_pipe_crc_source cur_crc_src;
9765 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9766 		struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
9767 #endif
9768 		spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9769 		cur_crc_src = acrtc->dm_irq_params.crc_src;
9770 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9771 #endif
9772 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9773 
9774 		if (new_crtc_state->active &&
9775 		    (!old_crtc_state->active ||
9776 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9777 			dc_stream_retain(dm_new_crtc_state->stream);
9778 			acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
9779 			manage_dm_interrupts(adev, acrtc, true);
9780 
9781 #ifdef CONFIG_DEBUG_FS
9782 			/**
9783 			 * Frontend may have changed so reapply the CRC capture
9784 			 * settings for the stream.
9785 			 */
9786 			dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9787 
9788 			if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
9789 				configure_crc = true;
9790 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9791 				if (amdgpu_dm_crc_window_is_activated(crtc)) {
9792 					spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9793 					acrtc->dm_irq_params.crc_window.update_win = true;
9794 					acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
9795 					spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
9796 					crc_rd_wrk->crtc = crtc;
9797 					spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
9798 					spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9799 				}
9800 #endif
9801 			}
9802 
9803 			if (configure_crc)
9804 				if (amdgpu_dm_crtc_configure_crc_source(
9805 					crtc, dm_new_crtc_state, cur_crc_src))
9806 					DRM_DEBUG_DRIVER("Failed to configure crc source");
9807 #endif
9808 		}
9809 	}
9810 
9811 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
9812 		if (new_crtc_state->async_flip)
9813 			wait_for_vblank = false;
9814 
9815 	/* update planes when needed per crtc*/
9816 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
9817 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9818 
9819 		if (dm_new_crtc_state->stream)
9820 			amdgpu_dm_commit_planes(state, dc_state, dev,
9821 						dm, crtc, wait_for_vblank);
9822 	}
9823 
9824 	/* Update audio instances for each connector. */
9825 	amdgpu_dm_commit_audio(dev, state);
9826 
9827 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||		\
9828 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
9829 	/* restore the backlight level */
9830 	for (i = 0; i < dm->num_of_edps; i++) {
9831 		if (dm->backlight_dev[i] &&
9832 		    (amdgpu_dm_backlight_get_level(dm, i) != dm->brightness[i]))
9833 			amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
9834 	}
9835 #endif
9836 	/*
9837 	 * send vblank event on all events not handled in flip and
9838 	 * mark consumed event for drm_atomic_helper_commit_hw_done
9839 	 */
9840 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9841 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9842 
9843 		if (new_crtc_state->event)
9844 			drm_send_event_locked(dev, &new_crtc_state->event->base);
9845 
9846 		new_crtc_state->event = NULL;
9847 	}
9848 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9849 
9850 	/* Signal HW programming completion */
9851 	drm_atomic_helper_commit_hw_done(state);
9852 
9853 	if (wait_for_vblank)
9854 		drm_atomic_helper_wait_for_flip_done(dev, state);
9855 
9856 	drm_atomic_helper_cleanup_planes(dev, state);
9857 
9858 	/* return the stolen vga memory back to VRAM */
9859 	if (!adev->mman.keep_stolen_vga_memory)
9860 		amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
9861 	amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
9862 
9863 	/*
9864 	 * Finally, drop a runtime PM reference for each newly disabled CRTC,
9865 	 * so we can put the GPU into runtime suspend if we're not driving any
9866 	 * displays anymore
9867 	 */
9868 	for (i = 0; i < crtc_disable_count; i++)
9869 		pm_runtime_put_autosuspend(dev->dev);
9870 	pm_runtime_mark_last_busy(dev->dev);
9871 
9872 	if (dc_state_temp)
9873 		dc_release_state(dc_state_temp);
9874 }
9875 
9876 
9877 static int dm_force_atomic_commit(struct drm_connector *connector)
9878 {
9879 	int ret = 0;
9880 	struct drm_device *ddev = connector->dev;
9881 	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
9882 	struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9883 	struct drm_plane *plane = disconnected_acrtc->base.primary;
9884 	struct drm_connector_state *conn_state;
9885 	struct drm_crtc_state *crtc_state;
9886 	struct drm_plane_state *plane_state;
9887 
9888 	if (!state)
9889 		return -ENOMEM;
9890 
9891 	state->acquire_ctx = ddev->mode_config.acquire_ctx;
9892 
9893 	/* Construct an atomic state to restore previous display setting */
9894 
9895 	/*
9896 	 * Attach connectors to drm_atomic_state
9897 	 */
9898 	conn_state = drm_atomic_get_connector_state(state, connector);
9899 
9900 	ret = PTR_ERR_OR_ZERO(conn_state);
9901 	if (ret)
9902 		goto out;
9903 
9904 	/* Attach crtc to drm_atomic_state*/
9905 	crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
9906 
9907 	ret = PTR_ERR_OR_ZERO(crtc_state);
9908 	if (ret)
9909 		goto out;
9910 
9911 	/* force a restore */
9912 	crtc_state->mode_changed = true;
9913 
9914 	/* Attach plane to drm_atomic_state */
9915 	plane_state = drm_atomic_get_plane_state(state, plane);
9916 
9917 	ret = PTR_ERR_OR_ZERO(plane_state);
9918 	if (ret)
9919 		goto out;
9920 
9921 	/* Call commit internally with the state we just constructed */
9922 	ret = drm_atomic_commit(state);
9923 
9924 out:
9925 	drm_atomic_state_put(state);
9926 	if (ret)
9927 		DRM_ERROR("Restoring old state failed with %i\n", ret);
9928 
9929 	return ret;
9930 }
9931 
9932 /*
9933  * This function handles all cases when set mode does not come upon hotplug.
9934  * This includes when a display is unplugged then plugged back into the
9935  * same port and when running without usermode desktop manager supprot
9936  */
9937 void dm_restore_drm_connector_state(struct drm_device *dev,
9938 				    struct drm_connector *connector)
9939 {
9940 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9941 	struct amdgpu_crtc *disconnected_acrtc;
9942 	struct dm_crtc_state *acrtc_state;
9943 
9944 	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
9945 		return;
9946 
9947 	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9948 	if (!disconnected_acrtc)
9949 		return;
9950 
9951 	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
9952 	if (!acrtc_state->stream)
9953 		return;
9954 
9955 	/*
9956 	 * If the previous sink is not released and different from the current,
9957 	 * we deduce we are in a state where we can not rely on usermode call
9958 	 * to turn on the display, so we do it here
9959 	 */
9960 	if (acrtc_state->stream->sink != aconnector->dc_sink)
9961 		dm_force_atomic_commit(&aconnector->base);
9962 }
9963 
9964 /*
9965  * Grabs all modesetting locks to serialize against any blocking commits,
9966  * Waits for completion of all non blocking commits.
9967  */
9968 static int do_aquire_global_lock(struct drm_device *dev,
9969 				 struct drm_atomic_state *state)
9970 {
9971 	struct drm_crtc *crtc;
9972 	struct drm_crtc_commit *commit;
9973 	long ret;
9974 
9975 	/*
9976 	 * Adding all modeset locks to aquire_ctx will
9977 	 * ensure that when the framework release it the
9978 	 * extra locks we are locking here will get released to
9979 	 */
9980 	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
9981 	if (ret)
9982 		return ret;
9983 
9984 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
9985 		spin_lock(&crtc->commit_lock);
9986 		commit = list_first_entry_or_null(&crtc->commit_list,
9987 				struct drm_crtc_commit, commit_entry);
9988 		if (commit)
9989 			drm_crtc_commit_get(commit);
9990 		spin_unlock(&crtc->commit_lock);
9991 
9992 		if (!commit)
9993 			continue;
9994 
9995 		/*
9996 		 * Make sure all pending HW programming completed and
9997 		 * page flips done
9998 		 */
9999 		ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
10000 
10001 		if (ret > 0)
10002 			ret = wait_for_completion_interruptible_timeout(
10003 					&commit->flip_done, 10*HZ);
10004 
10005 		if (ret == 0)
10006 			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
10007 				  "timed out\n", crtc->base.id, crtc->name);
10008 
10009 		drm_crtc_commit_put(commit);
10010 	}
10011 
10012 	return ret < 0 ? ret : 0;
10013 }
10014 
10015 static void get_freesync_config_for_crtc(
10016 	struct dm_crtc_state *new_crtc_state,
10017 	struct dm_connector_state *new_con_state)
10018 {
10019 	struct mod_freesync_config config = {0};
10020 	struct amdgpu_dm_connector *aconnector =
10021 			to_amdgpu_dm_connector(new_con_state->base.connector);
10022 	struct drm_display_mode *mode = &new_crtc_state->base.mode;
10023 	int vrefresh = drm_mode_vrefresh(mode);
10024 	bool fs_vid_mode = false;
10025 
10026 	new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
10027 					vrefresh >= aconnector->min_vfreq &&
10028 					vrefresh <= aconnector->max_vfreq;
10029 
10030 	if (new_crtc_state->vrr_supported) {
10031 		new_crtc_state->stream->ignore_msa_timing_param = true;
10032 		fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
10033 
10034 		config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
10035 		config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
10036 		config.vsif_supported = true;
10037 		config.btr = true;
10038 
10039 		if (fs_vid_mode) {
10040 			config.state = VRR_STATE_ACTIVE_FIXED;
10041 			config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
10042 			goto out;
10043 		} else if (new_crtc_state->base.vrr_enabled) {
10044 			config.state = VRR_STATE_ACTIVE_VARIABLE;
10045 		} else {
10046 			config.state = VRR_STATE_INACTIVE;
10047 		}
10048 	}
10049 out:
10050 	new_crtc_state->freesync_config = config;
10051 }
10052 
10053 static void reset_freesync_config_for_crtc(
10054 	struct dm_crtc_state *new_crtc_state)
10055 {
10056 	new_crtc_state->vrr_supported = false;
10057 
10058 	memset(&new_crtc_state->vrr_infopacket, 0,
10059 	       sizeof(new_crtc_state->vrr_infopacket));
10060 }
10061 
10062 static bool
10063 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
10064 				 struct drm_crtc_state *new_crtc_state)
10065 {
10066 	struct drm_display_mode old_mode, new_mode;
10067 
10068 	if (!old_crtc_state || !new_crtc_state)
10069 		return false;
10070 
10071 	old_mode = old_crtc_state->mode;
10072 	new_mode = new_crtc_state->mode;
10073 
10074 	if (old_mode.clock       == new_mode.clock &&
10075 	    old_mode.hdisplay    == new_mode.hdisplay &&
10076 	    old_mode.vdisplay    == new_mode.vdisplay &&
10077 	    old_mode.htotal      == new_mode.htotal &&
10078 	    old_mode.vtotal      != new_mode.vtotal &&
10079 	    old_mode.hsync_start == new_mode.hsync_start &&
10080 	    old_mode.vsync_start != new_mode.vsync_start &&
10081 	    old_mode.hsync_end   == new_mode.hsync_end &&
10082 	    old_mode.vsync_end   != new_mode.vsync_end &&
10083 	    old_mode.hskew       == new_mode.hskew &&
10084 	    old_mode.vscan       == new_mode.vscan &&
10085 	    (old_mode.vsync_end - old_mode.vsync_start) ==
10086 	    (new_mode.vsync_end - new_mode.vsync_start))
10087 		return true;
10088 
10089 	return false;
10090 }
10091 
10092 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
10093 	uint64_t num, den, res;
10094 	struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
10095 
10096 	dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
10097 
10098 	num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
10099 	den = (unsigned long long)new_crtc_state->mode.htotal *
10100 	      (unsigned long long)new_crtc_state->mode.vtotal;
10101 
10102 	res = div_u64(num, den);
10103 	dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
10104 }
10105 
10106 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
10107 				struct drm_atomic_state *state,
10108 				struct drm_crtc *crtc,
10109 				struct drm_crtc_state *old_crtc_state,
10110 				struct drm_crtc_state *new_crtc_state,
10111 				bool enable,
10112 				bool *lock_and_validation_needed)
10113 {
10114 	struct dm_atomic_state *dm_state = NULL;
10115 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
10116 	struct dc_stream_state *new_stream;
10117 	int ret = 0;
10118 
10119 	/*
10120 	 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
10121 	 * update changed items
10122 	 */
10123 	struct amdgpu_crtc *acrtc = NULL;
10124 	struct amdgpu_dm_connector *aconnector = NULL;
10125 	struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
10126 	struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
10127 
10128 	new_stream = NULL;
10129 
10130 	dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10131 	dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10132 	acrtc = to_amdgpu_crtc(crtc);
10133 	aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
10134 
10135 	/* TODO This hack should go away */
10136 	if (aconnector && enable) {
10137 		/* Make sure fake sink is created in plug-in scenario */
10138 		drm_new_conn_state = drm_atomic_get_new_connector_state(state,
10139 							    &aconnector->base);
10140 		drm_old_conn_state = drm_atomic_get_old_connector_state(state,
10141 							    &aconnector->base);
10142 
10143 		if (IS_ERR(drm_new_conn_state)) {
10144 			ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
10145 			goto fail;
10146 		}
10147 
10148 		dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
10149 		dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
10150 
10151 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10152 			goto skip_modeset;
10153 
10154 		new_stream = create_validate_stream_for_sink(aconnector,
10155 							     &new_crtc_state->mode,
10156 							     dm_new_conn_state,
10157 							     dm_old_crtc_state->stream);
10158 
10159 		/*
10160 		 * we can have no stream on ACTION_SET if a display
10161 		 * was disconnected during S3, in this case it is not an
10162 		 * error, the OS will be updated after detection, and
10163 		 * will do the right thing on next atomic commit
10164 		 */
10165 
10166 		if (!new_stream) {
10167 			DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
10168 					__func__, acrtc->base.base.id);
10169 			ret = -ENOMEM;
10170 			goto fail;
10171 		}
10172 
10173 		/*
10174 		 * TODO: Check VSDB bits to decide whether this should
10175 		 * be enabled or not.
10176 		 */
10177 		new_stream->triggered_crtc_reset.enabled =
10178 			dm->force_timing_sync;
10179 
10180 		dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10181 
10182 		ret = fill_hdr_info_packet(drm_new_conn_state,
10183 					   &new_stream->hdr_static_metadata);
10184 		if (ret)
10185 			goto fail;
10186 
10187 		/*
10188 		 * If we already removed the old stream from the context
10189 		 * (and set the new stream to NULL) then we can't reuse
10190 		 * the old stream even if the stream and scaling are unchanged.
10191 		 * We'll hit the BUG_ON and black screen.
10192 		 *
10193 		 * TODO: Refactor this function to allow this check to work
10194 		 * in all conditions.
10195 		 */
10196 		if (amdgpu_freesync_vid_mode &&
10197 		    dm_new_crtc_state->stream &&
10198 		    is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
10199 			goto skip_modeset;
10200 
10201 		if (dm_new_crtc_state->stream &&
10202 		    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
10203 		    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
10204 			new_crtc_state->mode_changed = false;
10205 			DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
10206 					 new_crtc_state->mode_changed);
10207 		}
10208 	}
10209 
10210 	/* mode_changed flag may get updated above, need to check again */
10211 	if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10212 		goto skip_modeset;
10213 
10214 	DRM_DEBUG_ATOMIC(
10215 		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
10216 		"planes_changed:%d, mode_changed:%d,active_changed:%d,"
10217 		"connectors_changed:%d\n",
10218 		acrtc->crtc_id,
10219 		new_crtc_state->enable,
10220 		new_crtc_state->active,
10221 		new_crtc_state->planes_changed,
10222 		new_crtc_state->mode_changed,
10223 		new_crtc_state->active_changed,
10224 		new_crtc_state->connectors_changed);
10225 
10226 	/* Remove stream for any changed/disabled CRTC */
10227 	if (!enable) {
10228 
10229 		if (!dm_old_crtc_state->stream)
10230 			goto skip_modeset;
10231 
10232 		if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
10233 		    is_timing_unchanged_for_freesync(new_crtc_state,
10234 						     old_crtc_state)) {
10235 			new_crtc_state->mode_changed = false;
10236 			DRM_DEBUG_DRIVER(
10237 				"Mode change not required for front porch change, "
10238 				"setting mode_changed to %d",
10239 				new_crtc_state->mode_changed);
10240 
10241 			set_freesync_fixed_config(dm_new_crtc_state);
10242 
10243 			goto skip_modeset;
10244 		} else if (amdgpu_freesync_vid_mode && aconnector &&
10245 			   is_freesync_video_mode(&new_crtc_state->mode,
10246 						  aconnector)) {
10247 			struct drm_display_mode *high_mode;
10248 
10249 			high_mode = get_highest_refresh_rate_mode(aconnector, false);
10250 			if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
10251 				set_freesync_fixed_config(dm_new_crtc_state);
10252 			}
10253 		}
10254 
10255 		ret = dm_atomic_get_state(state, &dm_state);
10256 		if (ret)
10257 			goto fail;
10258 
10259 		DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
10260 				crtc->base.id);
10261 
10262 		/* i.e. reset mode */
10263 		if (dc_remove_stream_from_ctx(
10264 				dm->dc,
10265 				dm_state->context,
10266 				dm_old_crtc_state->stream) != DC_OK) {
10267 			ret = -EINVAL;
10268 			goto fail;
10269 		}
10270 
10271 		dc_stream_release(dm_old_crtc_state->stream);
10272 		dm_new_crtc_state->stream = NULL;
10273 
10274 		reset_freesync_config_for_crtc(dm_new_crtc_state);
10275 
10276 		*lock_and_validation_needed = true;
10277 
10278 	} else {/* Add stream for any updated/enabled CRTC */
10279 		/*
10280 		 * Quick fix to prevent NULL pointer on new_stream when
10281 		 * added MST connectors not found in existing crtc_state in the chained mode
10282 		 * TODO: need to dig out the root cause of that
10283 		 */
10284 		if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
10285 			goto skip_modeset;
10286 
10287 		if (modereset_required(new_crtc_state))
10288 			goto skip_modeset;
10289 
10290 		if (modeset_required(new_crtc_state, new_stream,
10291 				     dm_old_crtc_state->stream)) {
10292 
10293 			WARN_ON(dm_new_crtc_state->stream);
10294 
10295 			ret = dm_atomic_get_state(state, &dm_state);
10296 			if (ret)
10297 				goto fail;
10298 
10299 			dm_new_crtc_state->stream = new_stream;
10300 
10301 			dc_stream_retain(new_stream);
10302 
10303 			DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
10304 					 crtc->base.id);
10305 
10306 			if (dc_add_stream_to_ctx(
10307 					dm->dc,
10308 					dm_state->context,
10309 					dm_new_crtc_state->stream) != DC_OK) {
10310 				ret = -EINVAL;
10311 				goto fail;
10312 			}
10313 
10314 			*lock_and_validation_needed = true;
10315 		}
10316 	}
10317 
10318 skip_modeset:
10319 	/* Release extra reference */
10320 	if (new_stream)
10321 		 dc_stream_release(new_stream);
10322 
10323 	/*
10324 	 * We want to do dc stream updates that do not require a
10325 	 * full modeset below.
10326 	 */
10327 	if (!(enable && aconnector && new_crtc_state->active))
10328 		return 0;
10329 	/*
10330 	 * Given above conditions, the dc state cannot be NULL because:
10331 	 * 1. We're in the process of enabling CRTCs (just been added
10332 	 *    to the dc context, or already is on the context)
10333 	 * 2. Has a valid connector attached, and
10334 	 * 3. Is currently active and enabled.
10335 	 * => The dc stream state currently exists.
10336 	 */
10337 	BUG_ON(dm_new_crtc_state->stream == NULL);
10338 
10339 	/* Scaling or underscan settings */
10340 	if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
10341 				drm_atomic_crtc_needs_modeset(new_crtc_state))
10342 		update_stream_scaling_settings(
10343 			&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
10344 
10345 	/* ABM settings */
10346 	dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10347 
10348 	/*
10349 	 * Color management settings. We also update color properties
10350 	 * when a modeset is needed, to ensure it gets reprogrammed.
10351 	 */
10352 	if (dm_new_crtc_state->base.color_mgmt_changed ||
10353 	    drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10354 		ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
10355 		if (ret)
10356 			goto fail;
10357 	}
10358 
10359 	/* Update Freesync settings. */
10360 	get_freesync_config_for_crtc(dm_new_crtc_state,
10361 				     dm_new_conn_state);
10362 
10363 	return ret;
10364 
10365 fail:
10366 	if (new_stream)
10367 		dc_stream_release(new_stream);
10368 	return ret;
10369 }
10370 
10371 static bool should_reset_plane(struct drm_atomic_state *state,
10372 			       struct drm_plane *plane,
10373 			       struct drm_plane_state *old_plane_state,
10374 			       struct drm_plane_state *new_plane_state)
10375 {
10376 	struct drm_plane *other;
10377 	struct drm_plane_state *old_other_state, *new_other_state;
10378 	struct drm_crtc_state *new_crtc_state;
10379 	int i;
10380 
10381 	/*
10382 	 * TODO: Remove this hack once the checks below are sufficient
10383 	 * enough to determine when we need to reset all the planes on
10384 	 * the stream.
10385 	 */
10386 	if (state->allow_modeset)
10387 		return true;
10388 
10389 	/* Exit early if we know that we're adding or removing the plane. */
10390 	if (old_plane_state->crtc != new_plane_state->crtc)
10391 		return true;
10392 
10393 	/* old crtc == new_crtc == NULL, plane not in context. */
10394 	if (!new_plane_state->crtc)
10395 		return false;
10396 
10397 	new_crtc_state =
10398 		drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
10399 
10400 	if (!new_crtc_state)
10401 		return true;
10402 
10403 	/* CRTC Degamma changes currently require us to recreate planes. */
10404 	if (new_crtc_state->color_mgmt_changed)
10405 		return true;
10406 
10407 	if (drm_atomic_crtc_needs_modeset(new_crtc_state))
10408 		return true;
10409 
10410 	/*
10411 	 * If there are any new primary or overlay planes being added or
10412 	 * removed then the z-order can potentially change. To ensure
10413 	 * correct z-order and pipe acquisition the current DC architecture
10414 	 * requires us to remove and recreate all existing planes.
10415 	 *
10416 	 * TODO: Come up with a more elegant solution for this.
10417 	 */
10418 	for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
10419 		struct amdgpu_framebuffer *old_afb, *new_afb;
10420 		if (other->type == DRM_PLANE_TYPE_CURSOR)
10421 			continue;
10422 
10423 		if (old_other_state->crtc != new_plane_state->crtc &&
10424 		    new_other_state->crtc != new_plane_state->crtc)
10425 			continue;
10426 
10427 		if (old_other_state->crtc != new_other_state->crtc)
10428 			return true;
10429 
10430 		/* Src/dst size and scaling updates. */
10431 		if (old_other_state->src_w != new_other_state->src_w ||
10432 		    old_other_state->src_h != new_other_state->src_h ||
10433 		    old_other_state->crtc_w != new_other_state->crtc_w ||
10434 		    old_other_state->crtc_h != new_other_state->crtc_h)
10435 			return true;
10436 
10437 		/* Rotation / mirroring updates. */
10438 		if (old_other_state->rotation != new_other_state->rotation)
10439 			return true;
10440 
10441 		/* Blending updates. */
10442 		if (old_other_state->pixel_blend_mode !=
10443 		    new_other_state->pixel_blend_mode)
10444 			return true;
10445 
10446 		/* Alpha updates. */
10447 		if (old_other_state->alpha != new_other_state->alpha)
10448 			return true;
10449 
10450 		/* Colorspace changes. */
10451 		if (old_other_state->color_range != new_other_state->color_range ||
10452 		    old_other_state->color_encoding != new_other_state->color_encoding)
10453 			return true;
10454 
10455 		/* Framebuffer checks fall at the end. */
10456 		if (!old_other_state->fb || !new_other_state->fb)
10457 			continue;
10458 
10459 		/* Pixel format changes can require bandwidth updates. */
10460 		if (old_other_state->fb->format != new_other_state->fb->format)
10461 			return true;
10462 
10463 		old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
10464 		new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
10465 
10466 		/* Tiling and DCC changes also require bandwidth updates. */
10467 		if (old_afb->tiling_flags != new_afb->tiling_flags ||
10468 		    old_afb->base.modifier != new_afb->base.modifier)
10469 			return true;
10470 	}
10471 
10472 	return false;
10473 }
10474 
10475 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
10476 			      struct drm_plane_state *new_plane_state,
10477 			      struct drm_framebuffer *fb)
10478 {
10479 	struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
10480 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
10481 	unsigned int pitch;
10482 	bool linear;
10483 
10484 	if (fb->width > new_acrtc->max_cursor_width ||
10485 	    fb->height > new_acrtc->max_cursor_height) {
10486 		DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
10487 				 new_plane_state->fb->width,
10488 				 new_plane_state->fb->height);
10489 		return -EINVAL;
10490 	}
10491 	if (new_plane_state->src_w != fb->width << 16 ||
10492 	    new_plane_state->src_h != fb->height << 16) {
10493 		DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10494 		return -EINVAL;
10495 	}
10496 
10497 	/* Pitch in pixels */
10498 	pitch = fb->pitches[0] / fb->format->cpp[0];
10499 
10500 	if (fb->width != pitch) {
10501 		DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
10502 				 fb->width, pitch);
10503 		return -EINVAL;
10504 	}
10505 
10506 	switch (pitch) {
10507 	case 64:
10508 	case 128:
10509 	case 256:
10510 		/* FB pitch is supported by cursor plane */
10511 		break;
10512 	default:
10513 		DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
10514 		return -EINVAL;
10515 	}
10516 
10517 	/* Core DRM takes care of checking FB modifiers, so we only need to
10518 	 * check tiling flags when the FB doesn't have a modifier. */
10519 	if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
10520 		if (adev->family < AMDGPU_FAMILY_AI) {
10521 			linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
10522 			         AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
10523 				 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
10524 		} else {
10525 			linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
10526 		}
10527 		if (!linear) {
10528 			DRM_DEBUG_ATOMIC("Cursor FB not linear");
10529 			return -EINVAL;
10530 		}
10531 	}
10532 
10533 	return 0;
10534 }
10535 
10536 static int dm_update_plane_state(struct dc *dc,
10537 				 struct drm_atomic_state *state,
10538 				 struct drm_plane *plane,
10539 				 struct drm_plane_state *old_plane_state,
10540 				 struct drm_plane_state *new_plane_state,
10541 				 bool enable,
10542 				 bool *lock_and_validation_needed)
10543 {
10544 
10545 	struct dm_atomic_state *dm_state = NULL;
10546 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
10547 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10548 	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
10549 	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
10550 	struct amdgpu_crtc *new_acrtc;
10551 	bool needs_reset;
10552 	int ret = 0;
10553 
10554 
10555 	new_plane_crtc = new_plane_state->crtc;
10556 	old_plane_crtc = old_plane_state->crtc;
10557 	dm_new_plane_state = to_dm_plane_state(new_plane_state);
10558 	dm_old_plane_state = to_dm_plane_state(old_plane_state);
10559 
10560 	if (plane->type == DRM_PLANE_TYPE_CURSOR) {
10561 		if (!enable || !new_plane_crtc ||
10562 			drm_atomic_plane_disabling(plane->state, new_plane_state))
10563 			return 0;
10564 
10565 		new_acrtc = to_amdgpu_crtc(new_plane_crtc);
10566 
10567 		if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
10568 			DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10569 			return -EINVAL;
10570 		}
10571 
10572 		if (new_plane_state->fb) {
10573 			ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
10574 						 new_plane_state->fb);
10575 			if (ret)
10576 				return ret;
10577 		}
10578 
10579 		return 0;
10580 	}
10581 
10582 	needs_reset = should_reset_plane(state, plane, old_plane_state,
10583 					 new_plane_state);
10584 
10585 	/* Remove any changed/removed planes */
10586 	if (!enable) {
10587 		if (!needs_reset)
10588 			return 0;
10589 
10590 		if (!old_plane_crtc)
10591 			return 0;
10592 
10593 		old_crtc_state = drm_atomic_get_old_crtc_state(
10594 				state, old_plane_crtc);
10595 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10596 
10597 		if (!dm_old_crtc_state->stream)
10598 			return 0;
10599 
10600 		DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
10601 				plane->base.id, old_plane_crtc->base.id);
10602 
10603 		ret = dm_atomic_get_state(state, &dm_state);
10604 		if (ret)
10605 			return ret;
10606 
10607 		if (!dc_remove_plane_from_context(
10608 				dc,
10609 				dm_old_crtc_state->stream,
10610 				dm_old_plane_state->dc_state,
10611 				dm_state->context)) {
10612 
10613 			return -EINVAL;
10614 		}
10615 
10616 
10617 		dc_plane_state_release(dm_old_plane_state->dc_state);
10618 		dm_new_plane_state->dc_state = NULL;
10619 
10620 		*lock_and_validation_needed = true;
10621 
10622 	} else { /* Add new planes */
10623 		struct dc_plane_state *dc_new_plane_state;
10624 
10625 		if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10626 			return 0;
10627 
10628 		if (!new_plane_crtc)
10629 			return 0;
10630 
10631 		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
10632 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10633 
10634 		if (!dm_new_crtc_state->stream)
10635 			return 0;
10636 
10637 		if (!needs_reset)
10638 			return 0;
10639 
10640 		ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
10641 		if (ret)
10642 			return ret;
10643 
10644 		WARN_ON(dm_new_plane_state->dc_state);
10645 
10646 		dc_new_plane_state = dc_create_plane_state(dc);
10647 		if (!dc_new_plane_state)
10648 			return -ENOMEM;
10649 
10650 		DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
10651 				 plane->base.id, new_plane_crtc->base.id);
10652 
10653 		ret = fill_dc_plane_attributes(
10654 			drm_to_adev(new_plane_crtc->dev),
10655 			dc_new_plane_state,
10656 			new_plane_state,
10657 			new_crtc_state);
10658 		if (ret) {
10659 			dc_plane_state_release(dc_new_plane_state);
10660 			return ret;
10661 		}
10662 
10663 		ret = dm_atomic_get_state(state, &dm_state);
10664 		if (ret) {
10665 			dc_plane_state_release(dc_new_plane_state);
10666 			return ret;
10667 		}
10668 
10669 		/*
10670 		 * Any atomic check errors that occur after this will
10671 		 * not need a release. The plane state will be attached
10672 		 * to the stream, and therefore part of the atomic
10673 		 * state. It'll be released when the atomic state is
10674 		 * cleaned.
10675 		 */
10676 		if (!dc_add_plane_to_context(
10677 				dc,
10678 				dm_new_crtc_state->stream,
10679 				dc_new_plane_state,
10680 				dm_state->context)) {
10681 
10682 			dc_plane_state_release(dc_new_plane_state);
10683 			return -EINVAL;
10684 		}
10685 
10686 		dm_new_plane_state->dc_state = dc_new_plane_state;
10687 
10688 		/* Tell DC to do a full surface update every time there
10689 		 * is a plane change. Inefficient, but works for now.
10690 		 */
10691 		dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
10692 
10693 		*lock_and_validation_needed = true;
10694 	}
10695 
10696 
10697 	return ret;
10698 }
10699 
10700 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
10701 				struct drm_crtc *crtc,
10702 				struct drm_crtc_state *new_crtc_state)
10703 {
10704 	struct drm_plane *cursor = crtc->cursor, *underlying;
10705 	struct drm_plane_state *new_cursor_state, *new_underlying_state;
10706 	int i;
10707 	int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h;
10708 
10709 	/* On DCE and DCN there is no dedicated hardware cursor plane. We get a
10710 	 * cursor per pipe but it's going to inherit the scaling and
10711 	 * positioning from the underlying pipe. Check the cursor plane's
10712 	 * blending properties match the underlying planes'. */
10713 
10714 	new_cursor_state = drm_atomic_get_new_plane_state(state, cursor);
10715 	if (!new_cursor_state || !new_cursor_state->fb) {
10716 		return 0;
10717 	}
10718 
10719 	cursor_scale_w = new_cursor_state->crtc_w * 1000 /
10720 			 (new_cursor_state->src_w >> 16);
10721 	cursor_scale_h = new_cursor_state->crtc_h * 1000 /
10722 			 (new_cursor_state->src_h >> 16);
10723 
10724 	for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) {
10725 		/* Narrow down to non-cursor planes on the same CRTC as the cursor */
10726 		if (new_underlying_state->crtc != crtc || underlying == crtc->cursor)
10727 			continue;
10728 
10729 		/* Ignore disabled planes */
10730 		if (!new_underlying_state->fb)
10731 			continue;
10732 
10733 		underlying_scale_w = new_underlying_state->crtc_w * 1000 /
10734 				     (new_underlying_state->src_w >> 16);
10735 		underlying_scale_h = new_underlying_state->crtc_h * 1000 /
10736 				     (new_underlying_state->src_h >> 16);
10737 
10738 		if (cursor_scale_w != underlying_scale_w ||
10739 		    cursor_scale_h != underlying_scale_h) {
10740 			drm_dbg_atomic(crtc->dev,
10741 				       "Cursor [PLANE:%d:%s] scaling doesn't match underlying [PLANE:%d:%s]\n",
10742 				       cursor->base.id, cursor->name, underlying->base.id, underlying->name);
10743 			return -EINVAL;
10744 		}
10745 
10746 		/* If this plane covers the whole CRTC, no need to check planes underneath */
10747 		if (new_underlying_state->crtc_x <= 0 &&
10748 		    new_underlying_state->crtc_y <= 0 &&
10749 		    new_underlying_state->crtc_x + new_underlying_state->crtc_w >= new_crtc_state->mode.hdisplay &&
10750 		    new_underlying_state->crtc_y + new_underlying_state->crtc_h >= new_crtc_state->mode.vdisplay)
10751 			break;
10752 	}
10753 
10754 	return 0;
10755 }
10756 
10757 #if defined(CONFIG_DRM_AMD_DC_DCN)
10758 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
10759 {
10760 	struct drm_connector *connector;
10761 	struct drm_connector_state *conn_state;
10762 	struct amdgpu_dm_connector *aconnector = NULL;
10763 	int i;
10764 	for_each_new_connector_in_state(state, connector, conn_state, i) {
10765 		if (conn_state->crtc != crtc)
10766 			continue;
10767 
10768 		aconnector = to_amdgpu_dm_connector(connector);
10769 		if (!aconnector->port || !aconnector->mst_port)
10770 			aconnector = NULL;
10771 		else
10772 			break;
10773 	}
10774 
10775 	if (!aconnector)
10776 		return 0;
10777 
10778 	return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
10779 }
10780 #endif
10781 
10782 /**
10783  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
10784  * @dev: The DRM device
10785  * @state: The atomic state to commit
10786  *
10787  * Validate that the given atomic state is programmable by DC into hardware.
10788  * This involves constructing a &struct dc_state reflecting the new hardware
10789  * state we wish to commit, then querying DC to see if it is programmable. It's
10790  * important not to modify the existing DC state. Otherwise, atomic_check
10791  * may unexpectedly commit hardware changes.
10792  *
10793  * When validating the DC state, it's important that the right locks are
10794  * acquired. For full updates case which removes/adds/updates streams on one
10795  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
10796  * that any such full update commit will wait for completion of any outstanding
10797  * flip using DRMs synchronization events.
10798  *
10799  * Note that DM adds the affected connectors for all CRTCs in state, when that
10800  * might not seem necessary. This is because DC stream creation requires the
10801  * DC sink, which is tied to the DRM connector state. Cleaning this up should
10802  * be possible but non-trivial - a possible TODO item.
10803  *
10804  * Return: -Error code if validation failed.
10805  */
10806 static int amdgpu_dm_atomic_check(struct drm_device *dev,
10807 				  struct drm_atomic_state *state)
10808 {
10809 	struct amdgpu_device *adev = drm_to_adev(dev);
10810 	struct dm_atomic_state *dm_state = NULL;
10811 	struct dc *dc = adev->dm.dc;
10812 	struct drm_connector *connector;
10813 	struct drm_connector_state *old_con_state, *new_con_state;
10814 	struct drm_crtc *crtc;
10815 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10816 	struct drm_plane *plane;
10817 	struct drm_plane_state *old_plane_state, *new_plane_state;
10818 	enum dc_status status;
10819 	int ret, i;
10820 	bool lock_and_validation_needed = false;
10821 	struct dm_crtc_state *dm_old_crtc_state;
10822 #if defined(CONFIG_DRM_AMD_DC_DCN)
10823 	struct dsc_mst_fairness_vars vars[MAX_PIPES];
10824 	struct drm_dp_mst_topology_state *mst_state;
10825 	struct drm_dp_mst_topology_mgr *mgr;
10826 #endif
10827 
10828 	trace_amdgpu_dm_atomic_check_begin(state);
10829 
10830 	ret = drm_atomic_helper_check_modeset(dev, state);
10831 	if (ret) {
10832 		DRM_DEBUG_DRIVER("drm_atomic_helper_check_modeset() failed\n");
10833 		goto fail;
10834 	}
10835 
10836 	/* Check connector changes */
10837 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10838 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10839 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10840 
10841 		/* Skip connectors that are disabled or part of modeset already. */
10842 		if (!old_con_state->crtc && !new_con_state->crtc)
10843 			continue;
10844 
10845 		if (!new_con_state->crtc)
10846 			continue;
10847 
10848 		new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
10849 		if (IS_ERR(new_crtc_state)) {
10850 			DRM_DEBUG_DRIVER("drm_atomic_get_crtc_state() failed\n");
10851 			ret = PTR_ERR(new_crtc_state);
10852 			goto fail;
10853 		}
10854 
10855 		if (dm_old_con_state->abm_level !=
10856 		    dm_new_con_state->abm_level)
10857 			new_crtc_state->connectors_changed = true;
10858 	}
10859 
10860 #if defined(CONFIG_DRM_AMD_DC_DCN)
10861 	if (dc_resource_is_dsc_encoding_supported(dc)) {
10862 		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10863 			if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10864 				ret = add_affected_mst_dsc_crtcs(state, crtc);
10865 				if (ret) {
10866 					DRM_DEBUG_DRIVER("add_affected_mst_dsc_crtcs() failed\n");
10867 					goto fail;
10868 				}
10869 			}
10870 		}
10871 	}
10872 #endif
10873 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10874 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10875 
10876 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
10877 		    !new_crtc_state->color_mgmt_changed &&
10878 		    old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
10879 			dm_old_crtc_state->dsc_force_changed == false)
10880 			continue;
10881 
10882 		ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
10883 		if (ret) {
10884 			DRM_DEBUG_DRIVER("amdgpu_dm_verify_lut_sizes() failed\n");
10885 			goto fail;
10886 		}
10887 
10888 		if (!new_crtc_state->enable)
10889 			continue;
10890 
10891 		ret = drm_atomic_add_affected_connectors(state, crtc);
10892 		if (ret) {
10893 			DRM_DEBUG_DRIVER("drm_atomic_add_affected_connectors() failed\n");
10894 			goto fail;
10895 		}
10896 
10897 		ret = drm_atomic_add_affected_planes(state, crtc);
10898 		if (ret) {
10899 			DRM_DEBUG_DRIVER("drm_atomic_add_affected_planes() failed\n");
10900 			goto fail;
10901 		}
10902 
10903 		if (dm_old_crtc_state->dsc_force_changed)
10904 			new_crtc_state->mode_changed = true;
10905 	}
10906 
10907 	/*
10908 	 * Add all primary and overlay planes on the CRTC to the state
10909 	 * whenever a plane is enabled to maintain correct z-ordering
10910 	 * and to enable fast surface updates.
10911 	 */
10912 	drm_for_each_crtc(crtc, dev) {
10913 		bool modified = false;
10914 
10915 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
10916 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
10917 				continue;
10918 
10919 			if (new_plane_state->crtc == crtc ||
10920 			    old_plane_state->crtc == crtc) {
10921 				modified = true;
10922 				break;
10923 			}
10924 		}
10925 
10926 		if (!modified)
10927 			continue;
10928 
10929 		drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
10930 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
10931 				continue;
10932 
10933 			new_plane_state =
10934 				drm_atomic_get_plane_state(state, plane);
10935 
10936 			if (IS_ERR(new_plane_state)) {
10937 				ret = PTR_ERR(new_plane_state);
10938 				DRM_DEBUG_DRIVER("new_plane_state is BAD\n");
10939 				goto fail;
10940 			}
10941 		}
10942 	}
10943 
10944 	/* Remove exiting planes if they are modified */
10945 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10946 		ret = dm_update_plane_state(dc, state, plane,
10947 					    old_plane_state,
10948 					    new_plane_state,
10949 					    false,
10950 					    &lock_and_validation_needed);
10951 		if (ret) {
10952 			DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
10953 			goto fail;
10954 		}
10955 	}
10956 
10957 	/* Disable all crtcs which require disable */
10958 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10959 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
10960 					   old_crtc_state,
10961 					   new_crtc_state,
10962 					   false,
10963 					   &lock_and_validation_needed);
10964 		if (ret) {
10965 			DRM_DEBUG_DRIVER("DISABLE: dm_update_crtc_state() failed\n");
10966 			goto fail;
10967 		}
10968 	}
10969 
10970 	/* Enable all crtcs which require enable */
10971 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10972 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
10973 					   old_crtc_state,
10974 					   new_crtc_state,
10975 					   true,
10976 					   &lock_and_validation_needed);
10977 		if (ret) {
10978 			DRM_DEBUG_DRIVER("ENABLE: dm_update_crtc_state() failed\n");
10979 			goto fail;
10980 		}
10981 	}
10982 
10983 	/* Add new/modified planes */
10984 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10985 		ret = dm_update_plane_state(dc, state, plane,
10986 					    old_plane_state,
10987 					    new_plane_state,
10988 					    true,
10989 					    &lock_and_validation_needed);
10990 		if (ret) {
10991 			DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
10992 			goto fail;
10993 		}
10994 	}
10995 
10996 	/* Run this here since we want to validate the streams we created */
10997 	ret = drm_atomic_helper_check_planes(dev, state);
10998 	if (ret) {
10999 		DRM_DEBUG_DRIVER("drm_atomic_helper_check_planes() failed\n");
11000 		goto fail;
11001 	}
11002 
11003 	/* Check cursor planes scaling */
11004 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
11005 		ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
11006 		if (ret) {
11007 			DRM_DEBUG_DRIVER("dm_check_crtc_cursor() failed\n");
11008 			goto fail;
11009 		}
11010 	}
11011 
11012 	if (state->legacy_cursor_update) {
11013 		/*
11014 		 * This is a fast cursor update coming from the plane update
11015 		 * helper, check if it can be done asynchronously for better
11016 		 * performance.
11017 		 */
11018 		state->async_update =
11019 			!drm_atomic_helper_async_check(dev, state);
11020 
11021 		/*
11022 		 * Skip the remaining global validation if this is an async
11023 		 * update. Cursor updates can be done without affecting
11024 		 * state or bandwidth calcs and this avoids the performance
11025 		 * penalty of locking the private state object and
11026 		 * allocating a new dc_state.
11027 		 */
11028 		if (state->async_update)
11029 			return 0;
11030 	}
11031 
11032 	/* Check scaling and underscan changes*/
11033 	/* TODO Removed scaling changes validation due to inability to commit
11034 	 * new stream into context w\o causing full reset. Need to
11035 	 * decide how to handle.
11036 	 */
11037 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
11038 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
11039 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
11040 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
11041 
11042 		/* Skip any modesets/resets */
11043 		if (!acrtc || drm_atomic_crtc_needs_modeset(
11044 				drm_atomic_get_new_crtc_state(state, &acrtc->base)))
11045 			continue;
11046 
11047 		/* Skip any thing not scale or underscan changes */
11048 		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
11049 			continue;
11050 
11051 		lock_and_validation_needed = true;
11052 	}
11053 
11054 #if defined(CONFIG_DRM_AMD_DC_DCN)
11055 	/* set the slot info for each mst_state based on the link encoding format */
11056 	for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
11057 		struct amdgpu_dm_connector *aconnector;
11058 		struct drm_connector *connector;
11059 		struct drm_connector_list_iter iter;
11060 		u8 link_coding_cap;
11061 
11062 		if (!mgr->mst_state )
11063 			continue;
11064 
11065 		drm_connector_list_iter_begin(dev, &iter);
11066 		drm_for_each_connector_iter(connector, &iter) {
11067 			int id = connector->index;
11068 
11069 			if (id == mst_state->mgr->conn_base_id) {
11070 				aconnector = to_amdgpu_dm_connector(connector);
11071 				link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
11072 				drm_dp_mst_update_slots(mst_state, link_coding_cap);
11073 
11074 				break;
11075 			}
11076 		}
11077 		drm_connector_list_iter_end(&iter);
11078 
11079 	}
11080 #endif
11081 	/**
11082 	 * Streams and planes are reset when there are changes that affect
11083 	 * bandwidth. Anything that affects bandwidth needs to go through
11084 	 * DC global validation to ensure that the configuration can be applied
11085 	 * to hardware.
11086 	 *
11087 	 * We have to currently stall out here in atomic_check for outstanding
11088 	 * commits to finish in this case because our IRQ handlers reference
11089 	 * DRM state directly - we can end up disabling interrupts too early
11090 	 * if we don't.
11091 	 *
11092 	 * TODO: Remove this stall and drop DM state private objects.
11093 	 */
11094 	if (lock_and_validation_needed) {
11095 		ret = dm_atomic_get_state(state, &dm_state);
11096 		if (ret) {
11097 			DRM_DEBUG_DRIVER("dm_atomic_get_state() failed\n");
11098 			goto fail;
11099 		}
11100 
11101 		ret = do_aquire_global_lock(dev, state);
11102 		if (ret) {
11103 			DRM_DEBUG_DRIVER("do_aquire_global_lock() failed\n");
11104 			goto fail;
11105 		}
11106 
11107 #if defined(CONFIG_DRM_AMD_DC_DCN)
11108 		if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars)) {
11109 			DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
11110 			goto fail;
11111 		}
11112 
11113 		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
11114 		if (ret) {
11115 			DRM_DEBUG_DRIVER("dm_update_mst_vcpi_slots_for_dsc() failed\n");
11116 			goto fail;
11117 		}
11118 #endif
11119 
11120 		/*
11121 		 * Perform validation of MST topology in the state:
11122 		 * We need to perform MST atomic check before calling
11123 		 * dc_validate_global_state(), or there is a chance
11124 		 * to get stuck in an infinite loop and hang eventually.
11125 		 */
11126 		ret = drm_dp_mst_atomic_check(state);
11127 		if (ret) {
11128 			DRM_DEBUG_DRIVER("drm_dp_mst_atomic_check() failed\n");
11129 			goto fail;
11130 		}
11131 		status = dc_validate_global_state(dc, dm_state->context, false);
11132 		if (status != DC_OK) {
11133 			DRM_DEBUG_DRIVER("DC global validation failure: %s (%d)",
11134 				       dc_status_to_str(status), status);
11135 			ret = -EINVAL;
11136 			goto fail;
11137 		}
11138 	} else {
11139 		/*
11140 		 * The commit is a fast update. Fast updates shouldn't change
11141 		 * the DC context, affect global validation, and can have their
11142 		 * commit work done in parallel with other commits not touching
11143 		 * the same resource. If we have a new DC context as part of
11144 		 * the DM atomic state from validation we need to free it and
11145 		 * retain the existing one instead.
11146 		 *
11147 		 * Furthermore, since the DM atomic state only contains the DC
11148 		 * context and can safely be annulled, we can free the state
11149 		 * and clear the associated private object now to free
11150 		 * some memory and avoid a possible use-after-free later.
11151 		 */
11152 
11153 		for (i = 0; i < state->num_private_objs; i++) {
11154 			struct drm_private_obj *obj = state->private_objs[i].ptr;
11155 
11156 			if (obj->funcs == adev->dm.atomic_obj.funcs) {
11157 				int j = state->num_private_objs-1;
11158 
11159 				dm_atomic_destroy_state(obj,
11160 						state->private_objs[i].state);
11161 
11162 				/* If i is not at the end of the array then the
11163 				 * last element needs to be moved to where i was
11164 				 * before the array can safely be truncated.
11165 				 */
11166 				if (i != j)
11167 					state->private_objs[i] =
11168 						state->private_objs[j];
11169 
11170 				state->private_objs[j].ptr = NULL;
11171 				state->private_objs[j].state = NULL;
11172 				state->private_objs[j].old_state = NULL;
11173 				state->private_objs[j].new_state = NULL;
11174 
11175 				state->num_private_objs = j;
11176 				break;
11177 			}
11178 		}
11179 	}
11180 
11181 	/* Store the overall update type for use later in atomic check. */
11182 	for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
11183 		struct dm_crtc_state *dm_new_crtc_state =
11184 			to_dm_crtc_state(new_crtc_state);
11185 
11186 		dm_new_crtc_state->update_type = lock_and_validation_needed ?
11187 							 UPDATE_TYPE_FULL :
11188 							 UPDATE_TYPE_FAST;
11189 	}
11190 
11191 	/* Must be success */
11192 	WARN_ON(ret);
11193 
11194 	trace_amdgpu_dm_atomic_check_finish(state, ret);
11195 
11196 	return ret;
11197 
11198 fail:
11199 	if (ret == -EDEADLK)
11200 		DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
11201 	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
11202 		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
11203 	else
11204 		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
11205 
11206 	trace_amdgpu_dm_atomic_check_finish(state, ret);
11207 
11208 	return ret;
11209 }
11210 
11211 static bool is_dp_capable_without_timing_msa(struct dc *dc,
11212 					     struct amdgpu_dm_connector *amdgpu_dm_connector)
11213 {
11214 	uint8_t dpcd_data;
11215 	bool capable = false;
11216 
11217 	if (amdgpu_dm_connector->dc_link &&
11218 		dm_helpers_dp_read_dpcd(
11219 				NULL,
11220 				amdgpu_dm_connector->dc_link,
11221 				DP_DOWN_STREAM_PORT_COUNT,
11222 				&dpcd_data,
11223 				sizeof(dpcd_data))) {
11224 		capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
11225 	}
11226 
11227 	return capable;
11228 }
11229 
11230 static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
11231 		unsigned int offset,
11232 		unsigned int total_length,
11233 		uint8_t *data,
11234 		unsigned int length,
11235 		struct amdgpu_hdmi_vsdb_info *vsdb)
11236 {
11237 	bool res;
11238 	union dmub_rb_cmd cmd;
11239 	struct dmub_cmd_send_edid_cea *input;
11240 	struct dmub_cmd_edid_cea_output *output;
11241 
11242 	if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
11243 		return false;
11244 
11245 	memset(&cmd, 0, sizeof(cmd));
11246 
11247 	input = &cmd.edid_cea.data.input;
11248 
11249 	cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
11250 	cmd.edid_cea.header.sub_type = 0;
11251 	cmd.edid_cea.header.payload_bytes =
11252 		sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
11253 	input->offset = offset;
11254 	input->length = length;
11255 	input->total_length = total_length;
11256 	memcpy(input->payload, data, length);
11257 
11258 	res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd);
11259 	if (!res) {
11260 		DRM_ERROR("EDID CEA parser failed\n");
11261 		return false;
11262 	}
11263 
11264 	output = &cmd.edid_cea.data.output;
11265 
11266 	if (output->type == DMUB_CMD__EDID_CEA_ACK) {
11267 		if (!output->ack.success) {
11268 			DRM_ERROR("EDID CEA ack failed at offset %d\n",
11269 					output->ack.offset);
11270 		}
11271 	} else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
11272 		if (!output->amd_vsdb.vsdb_found)
11273 			return false;
11274 
11275 		vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
11276 		vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
11277 		vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
11278 		vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
11279 	} else {
11280 		DRM_WARN("Unknown EDID CEA parser results\n");
11281 		return false;
11282 	}
11283 
11284 	return true;
11285 }
11286 
11287 static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
11288 		uint8_t *edid_ext, int len,
11289 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
11290 {
11291 	int i;
11292 
11293 	/* send extension block to DMCU for parsing */
11294 	for (i = 0; i < len; i += 8) {
11295 		bool res;
11296 		int offset;
11297 
11298 		/* send 8 bytes a time */
11299 		if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
11300 			return false;
11301 
11302 		if (i+8 == len) {
11303 			/* EDID block sent completed, expect result */
11304 			int version, min_rate, max_rate;
11305 
11306 			res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
11307 			if (res) {
11308 				/* amd vsdb found */
11309 				vsdb_info->freesync_supported = 1;
11310 				vsdb_info->amd_vsdb_version = version;
11311 				vsdb_info->min_refresh_rate_hz = min_rate;
11312 				vsdb_info->max_refresh_rate_hz = max_rate;
11313 				return true;
11314 			}
11315 			/* not amd vsdb */
11316 			return false;
11317 		}
11318 
11319 		/* check for ack*/
11320 		res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
11321 		if (!res)
11322 			return false;
11323 	}
11324 
11325 	return false;
11326 }
11327 
11328 static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
11329 		uint8_t *edid_ext, int len,
11330 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
11331 {
11332 	int i;
11333 
11334 	/* send extension block to DMCU for parsing */
11335 	for (i = 0; i < len; i += 8) {
11336 		/* send 8 bytes a time */
11337 		if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
11338 			return false;
11339 	}
11340 
11341 	return vsdb_info->freesync_supported;
11342 }
11343 
11344 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
11345 		uint8_t *edid_ext, int len,
11346 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
11347 {
11348 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
11349 
11350 	if (adev->dm.dmub_srv)
11351 		return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
11352 	else
11353 		return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
11354 }
11355 
11356 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
11357 		struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
11358 {
11359 	uint8_t *edid_ext = NULL;
11360 	int i;
11361 	bool valid_vsdb_found = false;
11362 
11363 	/*----- drm_find_cea_extension() -----*/
11364 	/* No EDID or EDID extensions */
11365 	if (edid == NULL || edid->extensions == 0)
11366 		return -ENODEV;
11367 
11368 	/* Find CEA extension */
11369 	for (i = 0; i < edid->extensions; i++) {
11370 		edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
11371 		if (edid_ext[0] == CEA_EXT)
11372 			break;
11373 	}
11374 
11375 	if (i == edid->extensions)
11376 		return -ENODEV;
11377 
11378 	/*----- cea_db_offsets() -----*/
11379 	if (edid_ext[0] != CEA_EXT)
11380 		return -ENODEV;
11381 
11382 	valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
11383 
11384 	return valid_vsdb_found ? i : -ENODEV;
11385 }
11386 
11387 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
11388 					struct edid *edid)
11389 {
11390 	int i = 0;
11391 	struct detailed_timing *timing;
11392 	struct detailed_non_pixel *data;
11393 	struct detailed_data_monitor_range *range;
11394 	struct amdgpu_dm_connector *amdgpu_dm_connector =
11395 			to_amdgpu_dm_connector(connector);
11396 	struct dm_connector_state *dm_con_state = NULL;
11397 	struct dc_sink *sink;
11398 
11399 	struct drm_device *dev = connector->dev;
11400 	struct amdgpu_device *adev = drm_to_adev(dev);
11401 	bool freesync_capable = false;
11402 	struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
11403 
11404 	if (!connector->state) {
11405 		DRM_ERROR("%s - Connector has no state", __func__);
11406 		goto update;
11407 	}
11408 
11409 	sink = amdgpu_dm_connector->dc_sink ?
11410 		amdgpu_dm_connector->dc_sink :
11411 		amdgpu_dm_connector->dc_em_sink;
11412 
11413 	if (!edid || !sink) {
11414 		dm_con_state = to_dm_connector_state(connector->state);
11415 
11416 		amdgpu_dm_connector->min_vfreq = 0;
11417 		amdgpu_dm_connector->max_vfreq = 0;
11418 		amdgpu_dm_connector->pixel_clock_mhz = 0;
11419 		connector->display_info.monitor_range.min_vfreq = 0;
11420 		connector->display_info.monitor_range.max_vfreq = 0;
11421 		freesync_capable = false;
11422 
11423 		goto update;
11424 	}
11425 
11426 	dm_con_state = to_dm_connector_state(connector->state);
11427 
11428 	if (!adev->dm.freesync_module)
11429 		goto update;
11430 
11431 
11432 	if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
11433 		|| sink->sink_signal == SIGNAL_TYPE_EDP) {
11434 		bool edid_check_required = false;
11435 
11436 		if (edid) {
11437 			edid_check_required = is_dp_capable_without_timing_msa(
11438 						adev->dm.dc,
11439 						amdgpu_dm_connector);
11440 		}
11441 
11442 		if (edid_check_required == true && (edid->version > 1 ||
11443 		   (edid->version == 1 && edid->revision > 1))) {
11444 			for (i = 0; i < 4; i++) {
11445 
11446 				timing	= &edid->detailed_timings[i];
11447 				data	= &timing->data.other_data;
11448 				range	= &data->data.range;
11449 				/*
11450 				 * Check if monitor has continuous frequency mode
11451 				 */
11452 				if (data->type != EDID_DETAIL_MONITOR_RANGE)
11453 					continue;
11454 				/*
11455 				 * Check for flag range limits only. If flag == 1 then
11456 				 * no additional timing information provided.
11457 				 * Default GTF, GTF Secondary curve and CVT are not
11458 				 * supported
11459 				 */
11460 				if (range->flags != 1)
11461 					continue;
11462 
11463 				amdgpu_dm_connector->min_vfreq = range->min_vfreq;
11464 				amdgpu_dm_connector->max_vfreq = range->max_vfreq;
11465 				amdgpu_dm_connector->pixel_clock_mhz =
11466 					range->pixel_clock_mhz * 10;
11467 
11468 				connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
11469 				connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
11470 
11471 				break;
11472 			}
11473 
11474 			if (amdgpu_dm_connector->max_vfreq -
11475 			    amdgpu_dm_connector->min_vfreq > 10) {
11476 
11477 				freesync_capable = true;
11478 			}
11479 		}
11480 	} else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
11481 		i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
11482 		if (i >= 0 && vsdb_info.freesync_supported) {
11483 			timing  = &edid->detailed_timings[i];
11484 			data    = &timing->data.other_data;
11485 
11486 			amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
11487 			amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
11488 			if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
11489 				freesync_capable = true;
11490 
11491 			connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
11492 			connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
11493 		}
11494 	}
11495 
11496 update:
11497 	if (dm_con_state)
11498 		dm_con_state->freesync_capable = freesync_capable;
11499 
11500 	if (connector->vrr_capable_property)
11501 		drm_connector_set_vrr_capable_property(connector,
11502 						       freesync_capable);
11503 }
11504 
11505 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
11506 {
11507 	struct amdgpu_device *adev = drm_to_adev(dev);
11508 	struct dc *dc = adev->dm.dc;
11509 	int i;
11510 
11511 	mutex_lock(&adev->dm.dc_lock);
11512 	if (dc->current_state) {
11513 		for (i = 0; i < dc->current_state->stream_count; ++i)
11514 			dc->current_state->streams[i]
11515 				->triggered_crtc_reset.enabled =
11516 				adev->dm.force_timing_sync;
11517 
11518 		dm_enable_per_frame_crtc_master_sync(dc->current_state);
11519 		dc_trigger_sync(dc, dc->current_state);
11520 	}
11521 	mutex_unlock(&adev->dm.dc_lock);
11522 }
11523 
11524 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
11525 		       uint32_t value, const char *func_name)
11526 {
11527 #ifdef DM_CHECK_ADDR_0
11528 	if (address == 0) {
11529 		DC_ERR("invalid register write. address = 0");
11530 		return;
11531 	}
11532 #endif
11533 	cgs_write_register(ctx->cgs_device, address, value);
11534 	trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
11535 }
11536 
11537 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
11538 			  const char *func_name)
11539 {
11540 	uint32_t value;
11541 #ifdef DM_CHECK_ADDR_0
11542 	if (address == 0) {
11543 		DC_ERR("invalid register read; address = 0\n");
11544 		return 0;
11545 	}
11546 #endif
11547 
11548 	if (ctx->dmub_srv &&
11549 	    ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
11550 	    !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
11551 		ASSERT(false);
11552 		return 0;
11553 	}
11554 
11555 	value = cgs_read_register(ctx->cgs_device, address);
11556 
11557 	trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
11558 
11559 	return value;
11560 }
11561 
11562 int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux, struct dc_context *ctx,
11563 	uint8_t status_type, uint32_t *operation_result)
11564 {
11565 	struct amdgpu_device *adev = ctx->driver_context;
11566 	int return_status = -1;
11567 	struct dmub_notification *p_notify = adev->dm.dmub_notify;
11568 
11569 	if (is_cmd_aux) {
11570 		if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11571 			return_status = p_notify->aux_reply.length;
11572 			*operation_result = p_notify->result;
11573 		} else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT) {
11574 			*operation_result = AUX_RET_ERROR_TIMEOUT;
11575 		} else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_FAIL) {
11576 			*operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
11577 		} else {
11578 			*operation_result = AUX_RET_ERROR_UNKNOWN;
11579 		}
11580 	} else {
11581 		if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11582 			return_status = 0;
11583 			*operation_result = p_notify->sc_status;
11584 		} else {
11585 			*operation_result = SET_CONFIG_UNKNOWN_ERROR;
11586 		}
11587 	}
11588 
11589 	return return_status;
11590 }
11591 
11592 int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context *ctx,
11593 	unsigned int link_index, void *cmd_payload, void *operation_result)
11594 {
11595 	struct amdgpu_device *adev = ctx->driver_context;
11596 	int ret = 0;
11597 
11598 	if (is_cmd_aux) {
11599 		dc_process_dmub_aux_transfer_async(ctx->dc,
11600 			link_index, (struct aux_payload *)cmd_payload);
11601 	} else if (dc_process_dmub_set_config_async(ctx->dc, link_index,
11602 					(struct set_config_cmd_payload *)cmd_payload,
11603 					adev->dm.dmub_notify)) {
11604 		return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11605 					ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11606 					(uint32_t *)operation_result);
11607 	}
11608 
11609 	ret = wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ);
11610 	if (ret == 0) {
11611 		DRM_ERROR("wait_for_completion_timeout timeout!");
11612 		return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11613 				ctx, DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT,
11614 				(uint32_t *)operation_result);
11615 	}
11616 
11617 	if (is_cmd_aux) {
11618 		if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
11619 			struct aux_payload *payload = (struct aux_payload *)cmd_payload;
11620 
11621 			payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
11622 			if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
11623 			    payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK) {
11624 				memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
11625 				       adev->dm.dmub_notify->aux_reply.length);
11626 			}
11627 		}
11628 	}
11629 
11630 	return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11631 			ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11632 			(uint32_t *)operation_result);
11633 }
11634