xref: /linux/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c (revision 16280ded45fba1216d1d4c6acfc20c2d5b45ef50)
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/pci.h>
25 
26 #include <drm/drm_edid.h>
27 #include <drm/drm_fourcc.h>
28 #include <drm/drm_modeset_helper.h>
29 #include <drm/drm_modeset_helper_vtables.h>
30 #include <drm/drm_vblank.h>
31 
32 #include "amdgpu.h"
33 #include "amdgpu_pm.h"
34 #include "amdgpu_i2c.h"
35 #include "atom.h"
36 #include "amdgpu_atombios.h"
37 #include "atombios_crtc.h"
38 #include "atombios_encoders.h"
39 #include "amdgpu_pll.h"
40 #include "amdgpu_connectors.h"
41 #include "amdgpu_display.h"
42 
43 #include "bif/bif_3_0_d.h"
44 #include "bif/bif_3_0_sh_mask.h"
45 #include "oss/oss_1_0_d.h"
46 #include "oss/oss_1_0_sh_mask.h"
47 #include "gca/gfx_6_0_d.h"
48 #include "gca/gfx_6_0_sh_mask.h"
49 #include "gmc/gmc_6_0_d.h"
50 #include "gmc/gmc_6_0_sh_mask.h"
51 #include "dce/dce_6_0_d.h"
52 #include "dce/dce_6_0_sh_mask.h"
53 #include "gca/gfx_7_2_enum.h"
54 #include "dce_v6_0.h"
55 #include "si_enums.h"
56 
57 static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev);
58 static void dce_v6_0_set_irq_funcs(struct amdgpu_device *adev);
59 
60 static const u32 crtc_offsets[6] =
61 {
62 	SI_CRTC0_REGISTER_OFFSET,
63 	SI_CRTC1_REGISTER_OFFSET,
64 	SI_CRTC2_REGISTER_OFFSET,
65 	SI_CRTC3_REGISTER_OFFSET,
66 	SI_CRTC4_REGISTER_OFFSET,
67 	SI_CRTC5_REGISTER_OFFSET
68 };
69 
70 static const u32 hpd_offsets[] =
71 {
72 	mmDC_HPD1_INT_STATUS - mmDC_HPD1_INT_STATUS,
73 	mmDC_HPD2_INT_STATUS - mmDC_HPD1_INT_STATUS,
74 	mmDC_HPD3_INT_STATUS - mmDC_HPD1_INT_STATUS,
75 	mmDC_HPD4_INT_STATUS - mmDC_HPD1_INT_STATUS,
76 	mmDC_HPD5_INT_STATUS - mmDC_HPD1_INT_STATUS,
77 	mmDC_HPD6_INT_STATUS - mmDC_HPD1_INT_STATUS,
78 };
79 
80 static const uint32_t dig_offsets[] = {
81 	SI_CRTC0_REGISTER_OFFSET,
82 	SI_CRTC1_REGISTER_OFFSET,
83 	SI_CRTC2_REGISTER_OFFSET,
84 	SI_CRTC3_REGISTER_OFFSET,
85 	SI_CRTC4_REGISTER_OFFSET,
86 	SI_CRTC5_REGISTER_OFFSET,
87 	(0x13830 - 0x7030) >> 2,
88 };
89 
90 static const struct {
91 	uint32_t	reg;
92 	uint32_t	vblank;
93 	uint32_t	vline;
94 	uint32_t	hpd;
95 
96 } interrupt_status_offsets[6] = { {
97 	.reg = mmDISP_INTERRUPT_STATUS,
98 	.vblank = DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK,
99 	.vline = DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK,
100 	.hpd = DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK
101 }, {
102 	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE,
103 	.vblank = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK,
104 	.vline = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK,
105 	.hpd = DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK
106 }, {
107 	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE2,
108 	.vblank = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK,
109 	.vline = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK,
110 	.hpd = DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK
111 }, {
112 	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE3,
113 	.vblank = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK,
114 	.vline = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK,
115 	.hpd = DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK
116 }, {
117 	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE4,
118 	.vblank = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK,
119 	.vline = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK,
120 	.hpd = DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK
121 }, {
122 	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE5,
123 	.vblank = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK,
124 	.vline = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK,
125 	.hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK
126 } };
127 
128 static u32 dce_v6_0_audio_endpt_rreg(struct amdgpu_device *adev,
129 				     u32 block_offset, u32 reg)
130 {
131 	unsigned long flags;
132 	u32 r;
133 
134 	spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
135 	WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
136 	r = RREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset);
137 	spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
138 
139 	return r;
140 }
141 
142 static void dce_v6_0_audio_endpt_wreg(struct amdgpu_device *adev,
143 				      u32 block_offset, u32 reg, u32 v)
144 {
145 	unsigned long flags;
146 
147 	spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
148 	WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset,
149 		reg | AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_WRITE_EN_MASK);
150 	WREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset, v);
151 	spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
152 }
153 
154 static u32 dce_v6_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
155 {
156 	if (crtc >= adev->mode_info.num_crtc)
157 		return 0;
158 	else
159 		return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
160 }
161 
162 static void dce_v6_0_pageflip_interrupt_init(struct amdgpu_device *adev)
163 {
164 	unsigned i;
165 
166 	/* Enable pflip interrupts */
167 	for (i = 0; i < adev->mode_info.num_crtc; i++)
168 		amdgpu_irq_get(adev, &adev->pageflip_irq, i);
169 }
170 
171 static void dce_v6_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
172 {
173 	unsigned i;
174 
175 	/* Disable pflip interrupts */
176 	for (i = 0; i < adev->mode_info.num_crtc; i++)
177 		amdgpu_irq_put(adev, &adev->pageflip_irq, i);
178 }
179 
180 /**
181  * dce_v6_0_page_flip - pageflip callback.
182  *
183  * @adev: amdgpu_device pointer
184  * @crtc_id: crtc to cleanup pageflip on
185  * @crtc_base: new address of the crtc (GPU MC address)
186  * @async: asynchronous flip
187  *
188  * Does the actual pageflip (evergreen+).
189  * During vblank we take the crtc lock and wait for the update_pending
190  * bit to go high, when it does, we release the lock, and allow the
191  * double buffered update to take place.
192  * Returns the current update pending status.
193  */
194 static void dce_v6_0_page_flip(struct amdgpu_device *adev,
195 			       int crtc_id, u64 crtc_base, bool async)
196 {
197 	struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
198 	struct drm_framebuffer *fb = amdgpu_crtc->base.primary->fb;
199 
200 	/* flip at hsync for async, default is vsync */
201 	WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, async ?
202 	       GRPH_FLIP_CONTROL__GRPH_SURFACE_UPDATE_H_RETRACE_EN_MASK : 0);
203 	/* update pitch */
204 	WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset,
205 	       fb->pitches[0] / fb->format->cpp[0]);
206 	/* update the scanout addresses */
207 	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
208 	       upper_32_bits(crtc_base));
209 	/* writing to the low address triggers the update */
210 	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
211 	       (u32)crtc_base);
212 	/* post the write */
213 	RREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset);
214 }
215 
216 static int dce_v6_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
217 					u32 *vbl, u32 *position)
218 {
219 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
220 		return -EINVAL;
221 
222 	*vbl = RREG32(mmCRTC_V_BLANK_START_END + crtc_offsets[crtc]);
223 	*position = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
224 
225 	return 0;
226 }
227 
228 /**
229  * dce_v6_0_hpd_sense - hpd sense callback.
230  *
231  * @adev: amdgpu_device pointer
232  * @hpd: hpd (hotplug detect) pin
233  *
234  * Checks if a digital monitor is connected (evergreen+).
235  * Returns true if connected, false if not connected.
236  */
237 static bool dce_v6_0_hpd_sense(struct amdgpu_device *adev,
238 			       enum amdgpu_hpd_id hpd)
239 {
240 	bool connected = false;
241 
242 	if (hpd >= adev->mode_info.num_hpd)
243 		return connected;
244 
245 	if (RREG32(mmDC_HPD1_INT_STATUS + hpd_offsets[hpd]) &
246 	    DC_HPD1_INT_STATUS__DC_HPD1_SENSE_MASK)
247 		connected = true;
248 
249 	return connected;
250 }
251 
252 /**
253  * dce_v6_0_hpd_set_polarity - hpd set polarity callback.
254  *
255  * @adev: amdgpu_device pointer
256  * @hpd: hpd (hotplug detect) pin
257  *
258  * Set the polarity of the hpd pin (evergreen+).
259  */
260 static void dce_v6_0_hpd_set_polarity(struct amdgpu_device *adev,
261 				      enum amdgpu_hpd_id hpd)
262 {
263 	u32 tmp;
264 	bool connected = dce_v6_0_hpd_sense(adev, hpd);
265 
266 	if (hpd >= adev->mode_info.num_hpd)
267 		return;
268 
269 	tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
270 	if (connected)
271 		tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
272 	else
273 		tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
274 	WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
275 }
276 
277 static void dce_v6_0_hpd_int_ack(struct amdgpu_device *adev,
278 				 int hpd)
279 {
280 	u32 tmp;
281 
282 	if (hpd >= adev->mode_info.num_hpd) {
283 		DRM_DEBUG("invalid hdp %d\n", hpd);
284 		return;
285 	}
286 
287 	tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
288 	tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
289 	WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
290 }
291 
292 /**
293  * dce_v6_0_hpd_init - hpd setup callback.
294  *
295  * @adev: amdgpu_device pointer
296  *
297  * Setup the hpd pins used by the card (evergreen+).
298  * Enable the pin, set the polarity, and enable the hpd interrupts.
299  */
300 static void dce_v6_0_hpd_init(struct amdgpu_device *adev)
301 {
302 	struct drm_device *dev = adev_to_drm(adev);
303 	struct drm_connector *connector;
304 	struct drm_connector_list_iter iter;
305 	u32 tmp;
306 
307 	drm_connector_list_iter_begin(dev, &iter);
308 	drm_for_each_connector_iter(connector, &iter) {
309 		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
310 
311 		if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
312 			continue;
313 
314 		tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
315 		tmp |= DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
316 		WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
317 
318 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
319 		    connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
320 			/* don't try to enable hpd on eDP or LVDS avoid breaking the
321 			 * aux dp channel on imac and help (but not completely fix)
322 			 * https://bugzilla.redhat.com/show_bug.cgi?id=726143
323 			 * also avoid interrupt storms during dpms.
324 			 */
325 			tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
326 			tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
327 			WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
328 			continue;
329 		}
330 
331 		dce_v6_0_hpd_int_ack(adev, amdgpu_connector->hpd.hpd);
332 		dce_v6_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
333 		amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
334 	}
335 	drm_connector_list_iter_end(&iter);
336 }
337 
338 /**
339  * dce_v6_0_hpd_fini - hpd tear down callback.
340  *
341  * @adev: amdgpu_device pointer
342  *
343  * Tear down the hpd pins used by the card (evergreen+).
344  * Disable the hpd interrupts.
345  */
346 static void dce_v6_0_hpd_fini(struct amdgpu_device *adev)
347 {
348 	struct drm_device *dev = adev_to_drm(adev);
349 	struct drm_connector *connector;
350 	struct drm_connector_list_iter iter;
351 	u32 tmp;
352 
353 	drm_connector_list_iter_begin(dev, &iter);
354 	drm_for_each_connector_iter(connector, &iter) {
355 		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
356 
357 		if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
358 			continue;
359 
360 		tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
361 		tmp &= ~DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
362 		WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
363 
364 		amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
365 	}
366 	drm_connector_list_iter_end(&iter);
367 }
368 
369 static u32 dce_v6_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
370 {
371 	return mmDC_GPIO_HPD_A;
372 }
373 
374 static bool dce_v6_0_is_display_hung(struct amdgpu_device *adev)
375 {
376 	u32 crtc_hung = 0;
377 	u32 crtc_status[6];
378 	u32 i, j, tmp;
379 
380 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
381 		if (RREG32(mmCRTC_CONTROL + crtc_offsets[i]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK) {
382 			crtc_status[i] = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
383 			crtc_hung |= (1 << i);
384 		}
385 	}
386 
387 	for (j = 0; j < 10; j++) {
388 		for (i = 0; i < adev->mode_info.num_crtc; i++) {
389 			if (crtc_hung & (1 << i)) {
390 				tmp = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
391 				if (tmp != crtc_status[i])
392 					crtc_hung &= ~(1 << i);
393 			}
394 		}
395 		if (crtc_hung == 0)
396 			return false;
397 		udelay(100);
398 	}
399 
400 	return true;
401 }
402 
403 static void dce_v6_0_set_vga_render_state(struct amdgpu_device *adev,
404 					  bool render)
405 {
406 	if (!render)
407 		WREG32(mmVGA_RENDER_CONTROL,
408 			RREG32(mmVGA_RENDER_CONTROL) & VGA_VSTATUS_CNTL);
409 }
410 
411 static int dce_v6_0_get_num_crtc(struct amdgpu_device *adev)
412 {
413 	switch (adev->asic_type) {
414 	case CHIP_TAHITI:
415 	case CHIP_PITCAIRN:
416 	case CHIP_VERDE:
417 		return 6;
418 	case CHIP_OLAND:
419 		return 2;
420 	default:
421 		return 0;
422 	}
423 }
424 
425 void dce_v6_0_disable_dce(struct amdgpu_device *adev)
426 {
427 	/*Disable VGA render and enabled crtc, if has DCE engine*/
428 	if (amdgpu_atombios_has_dce_engine_info(adev)) {
429 		u32 tmp;
430 		int crtc_enabled, i;
431 
432 		dce_v6_0_set_vga_render_state(adev, false);
433 
434 		/*Disable crtc*/
435 		for (i = 0; i < dce_v6_0_get_num_crtc(adev); i++) {
436 			crtc_enabled = RREG32(mmCRTC_CONTROL + crtc_offsets[i]) &
437 				CRTC_CONTROL__CRTC_MASTER_EN_MASK;
438 			if (crtc_enabled) {
439 				WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
440 				tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
441 				tmp &= ~CRTC_CONTROL__CRTC_MASTER_EN_MASK;
442 				WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
443 				WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
444 			}
445 		}
446 	}
447 }
448 
449 static void dce_v6_0_program_fmt(struct drm_encoder *encoder)
450 {
451 	struct drm_device *dev = encoder->dev;
452 	struct amdgpu_device *adev = drm_to_adev(dev);
453 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
454 	struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
455 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
456 	int bpc = 0;
457 	u32 tmp = 0;
458 	enum amdgpu_connector_dither dither = AMDGPU_FMT_DITHER_DISABLE;
459 
460 	if (connector) {
461 		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
462 		bpc = amdgpu_connector_get_monitor_bpc(connector);
463 		dither = amdgpu_connector->dither;
464 	}
465 
466 	/* LVDS FMT is set up by atom */
467 	if (amdgpu_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
468 		return;
469 
470 	if (bpc == 0)
471 		return;
472 
473 
474 	switch (bpc) {
475 	case 6:
476 		if (dither == AMDGPU_FMT_DITHER_ENABLE)
477 			/* XXX sort out optimal dither settings */
478 			tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
479 				FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
480 				FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK);
481 		else
482 			tmp |= FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK;
483 		break;
484 	case 8:
485 		if (dither == AMDGPU_FMT_DITHER_ENABLE)
486 			/* XXX sort out optimal dither settings */
487 			tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
488 				FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
489 				FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK |
490 				FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK |
491 				FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH_MASK);
492 		else
493 			tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK |
494 				FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH_MASK);
495 		break;
496 	case 10:
497 	default:
498 		/* not needed */
499 		break;
500 	}
501 
502 	WREG32(mmFMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
503 }
504 
505 /**
506  * si_get_number_of_dram_channels - get the number of dram channels
507  *
508  * @adev: amdgpu_device pointer
509  *
510  * Look up the number of video ram channels (CIK).
511  * Used for display watermark bandwidth calculations
512  * Returns the number of dram channels
513  */
514 static u32 si_get_number_of_dram_channels(struct amdgpu_device *adev)
515 {
516 	u32 tmp = RREG32(mmMC_SHARED_CHMAP);
517 
518 	switch ((tmp & MC_SHARED_CHMAP__NOOFCHAN_MASK) >> MC_SHARED_CHMAP__NOOFCHAN__SHIFT) {
519 	case 0:
520 	default:
521 		return 1;
522 	case 1:
523 		return 2;
524 	case 2:
525 		return 4;
526 	case 3:
527 		return 8;
528 	case 4:
529 		return 3;
530 	case 5:
531 		return 6;
532 	case 6:
533 		return 10;
534 	case 7:
535 		return 12;
536 	case 8:
537 		return 16;
538 	}
539 }
540 
541 struct dce6_wm_params {
542 	u32 dram_channels; /* number of dram channels */
543 	u32 yclk;          /* bandwidth per dram data pin in kHz */
544 	u32 sclk;          /* engine clock in kHz */
545 	u32 disp_clk;      /* display clock in kHz */
546 	u32 src_width;     /* viewport width */
547 	u32 active_time;   /* active display time in ns */
548 	u32 blank_time;    /* blank time in ns */
549 	bool interlaced;    /* mode is interlaced */
550 	fixed20_12 vsc;    /* vertical scale ratio */
551 	u32 num_heads;     /* number of active crtcs */
552 	u32 bytes_per_pixel; /* bytes per pixel display + overlay */
553 	u32 lb_size;       /* line buffer allocated to pipe */
554 	u32 vtaps;         /* vertical scaler taps */
555 };
556 
557 /**
558  * dce_v6_0_dram_bandwidth - get the dram bandwidth
559  *
560  * @wm: watermark calculation data
561  *
562  * Calculate the raw dram bandwidth (CIK).
563  * Used for display watermark bandwidth calculations
564  * Returns the dram bandwidth in MBytes/s
565  */
566 static u32 dce_v6_0_dram_bandwidth(struct dce6_wm_params *wm)
567 {
568 	/* Calculate raw DRAM Bandwidth */
569 	fixed20_12 dram_efficiency; /* 0.7 */
570 	fixed20_12 yclk, dram_channels, bandwidth;
571 	fixed20_12 a;
572 
573 	a.full = dfixed_const(1000);
574 	yclk.full = dfixed_const(wm->yclk);
575 	yclk.full = dfixed_div(yclk, a);
576 	dram_channels.full = dfixed_const(wm->dram_channels * 4);
577 	a.full = dfixed_const(10);
578 	dram_efficiency.full = dfixed_const(7);
579 	dram_efficiency.full = dfixed_div(dram_efficiency, a);
580 	bandwidth.full = dfixed_mul(dram_channels, yclk);
581 	bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
582 
583 	return dfixed_trunc(bandwidth);
584 }
585 
586 /**
587  * dce_v6_0_dram_bandwidth_for_display - get the dram bandwidth for display
588  *
589  * @wm: watermark calculation data
590  *
591  * Calculate the dram bandwidth used for display (CIK).
592  * Used for display watermark bandwidth calculations
593  * Returns the dram bandwidth for display in MBytes/s
594  */
595 static u32 dce_v6_0_dram_bandwidth_for_display(struct dce6_wm_params *wm)
596 {
597 	/* Calculate DRAM Bandwidth and the part allocated to display. */
598 	fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
599 	fixed20_12 yclk, dram_channels, bandwidth;
600 	fixed20_12 a;
601 
602 	a.full = dfixed_const(1000);
603 	yclk.full = dfixed_const(wm->yclk);
604 	yclk.full = dfixed_div(yclk, a);
605 	dram_channels.full = dfixed_const(wm->dram_channels * 4);
606 	a.full = dfixed_const(10);
607 	disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
608 	disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
609 	bandwidth.full = dfixed_mul(dram_channels, yclk);
610 	bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
611 
612 	return dfixed_trunc(bandwidth);
613 }
614 
615 /**
616  * dce_v6_0_data_return_bandwidth - get the data return bandwidth
617  *
618  * @wm: watermark calculation data
619  *
620  * Calculate the data return bandwidth used for display (CIK).
621  * Used for display watermark bandwidth calculations
622  * Returns the data return bandwidth in MBytes/s
623  */
624 static u32 dce_v6_0_data_return_bandwidth(struct dce6_wm_params *wm)
625 {
626 	/* Calculate the display Data return Bandwidth */
627 	fixed20_12 return_efficiency; /* 0.8 */
628 	fixed20_12 sclk, bandwidth;
629 	fixed20_12 a;
630 
631 	a.full = dfixed_const(1000);
632 	sclk.full = dfixed_const(wm->sclk);
633 	sclk.full = dfixed_div(sclk, a);
634 	a.full = dfixed_const(10);
635 	return_efficiency.full = dfixed_const(8);
636 	return_efficiency.full = dfixed_div(return_efficiency, a);
637 	a.full = dfixed_const(32);
638 	bandwidth.full = dfixed_mul(a, sclk);
639 	bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
640 
641 	return dfixed_trunc(bandwidth);
642 }
643 
644 /**
645  * dce_v6_0_dmif_request_bandwidth - get the dmif bandwidth
646  *
647  * @wm: watermark calculation data
648  *
649  * Calculate the dmif bandwidth used for display (CIK).
650  * Used for display watermark bandwidth calculations
651  * Returns the dmif bandwidth in MBytes/s
652  */
653 static u32 dce_v6_0_dmif_request_bandwidth(struct dce6_wm_params *wm)
654 {
655 	/* Calculate the DMIF Request Bandwidth */
656 	fixed20_12 disp_clk_request_efficiency; /* 0.8 */
657 	fixed20_12 disp_clk, bandwidth;
658 	fixed20_12 a, b;
659 
660 	a.full = dfixed_const(1000);
661 	disp_clk.full = dfixed_const(wm->disp_clk);
662 	disp_clk.full = dfixed_div(disp_clk, a);
663 	a.full = dfixed_const(32);
664 	b.full = dfixed_mul(a, disp_clk);
665 
666 	a.full = dfixed_const(10);
667 	disp_clk_request_efficiency.full = dfixed_const(8);
668 	disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
669 
670 	bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency);
671 
672 	return dfixed_trunc(bandwidth);
673 }
674 
675 /**
676  * dce_v6_0_available_bandwidth - get the min available bandwidth
677  *
678  * @wm: watermark calculation data
679  *
680  * Calculate the min available bandwidth used for display (CIK).
681  * Used for display watermark bandwidth calculations
682  * Returns the min available bandwidth in MBytes/s
683  */
684 static u32 dce_v6_0_available_bandwidth(struct dce6_wm_params *wm)
685 {
686 	/* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
687 	u32 dram_bandwidth = dce_v6_0_dram_bandwidth(wm);
688 	u32 data_return_bandwidth = dce_v6_0_data_return_bandwidth(wm);
689 	u32 dmif_req_bandwidth = dce_v6_0_dmif_request_bandwidth(wm);
690 
691 	return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
692 }
693 
694 /**
695  * dce_v6_0_average_bandwidth - get the average available bandwidth
696  *
697  * @wm: watermark calculation data
698  *
699  * Calculate the average available bandwidth used for display (CIK).
700  * Used for display watermark bandwidth calculations
701  * Returns the average available bandwidth in MBytes/s
702  */
703 static u32 dce_v6_0_average_bandwidth(struct dce6_wm_params *wm)
704 {
705 	/* Calculate the display mode Average Bandwidth
706 	 * DisplayMode should contain the source and destination dimensions,
707 	 * timing, etc.
708 	 */
709 	fixed20_12 bpp;
710 	fixed20_12 line_time;
711 	fixed20_12 src_width;
712 	fixed20_12 bandwidth;
713 	fixed20_12 a;
714 
715 	a.full = dfixed_const(1000);
716 	line_time.full = dfixed_const(wm->active_time + wm->blank_time);
717 	line_time.full = dfixed_div(line_time, a);
718 	bpp.full = dfixed_const(wm->bytes_per_pixel);
719 	src_width.full = dfixed_const(wm->src_width);
720 	bandwidth.full = dfixed_mul(src_width, bpp);
721 	bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
722 	bandwidth.full = dfixed_div(bandwidth, line_time);
723 
724 	return dfixed_trunc(bandwidth);
725 }
726 
727 /**
728  * dce_v6_0_latency_watermark - get the latency watermark
729  *
730  * @wm: watermark calculation data
731  *
732  * Calculate the latency watermark (CIK).
733  * Used for display watermark bandwidth calculations
734  * Returns the latency watermark in ns
735  */
736 static u32 dce_v6_0_latency_watermark(struct dce6_wm_params *wm)
737 {
738 	/* First calculate the latency in ns */
739 	u32 mc_latency = 2000; /* 2000 ns. */
740 	u32 available_bandwidth = dce_v6_0_available_bandwidth(wm);
741 	u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
742 	u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
743 	u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
744 	u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
745 		(wm->num_heads * cursor_line_pair_return_time);
746 	u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
747 	u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
748 	u32 tmp, dmif_size = 12288;
749 	fixed20_12 a, b, c;
750 
751 	if (wm->num_heads == 0)
752 		return 0;
753 
754 	a.full = dfixed_const(2);
755 	b.full = dfixed_const(1);
756 	if ((wm->vsc.full > a.full) ||
757 	    ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
758 	    (wm->vtaps >= 5) ||
759 	    ((wm->vsc.full >= a.full) && wm->interlaced))
760 		max_src_lines_per_dst_line = 4;
761 	else
762 		max_src_lines_per_dst_line = 2;
763 
764 	a.full = dfixed_const(available_bandwidth);
765 	b.full = dfixed_const(wm->num_heads);
766 	a.full = dfixed_div(a, b);
767 	tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512);
768 	tmp = min(dfixed_trunc(a), tmp);
769 
770 	lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000);
771 
772 	a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
773 	b.full = dfixed_const(1000);
774 	c.full = dfixed_const(lb_fill_bw);
775 	b.full = dfixed_div(c, b);
776 	a.full = dfixed_div(a, b);
777 	line_fill_time = dfixed_trunc(a);
778 
779 	if (line_fill_time < wm->active_time)
780 		return latency;
781 	else
782 		return latency + (line_fill_time - wm->active_time);
783 
784 }
785 
786 /**
787  * dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display - check
788  * average and available dram bandwidth
789  *
790  * @wm: watermark calculation data
791  *
792  * Check if the display average bandwidth fits in the display
793  * dram bandwidth (CIK).
794  * Used for display watermark bandwidth calculations
795  * Returns true if the display fits, false if not.
796  */
797 static bool dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(struct dce6_wm_params *wm)
798 {
799 	if (dce_v6_0_average_bandwidth(wm) <=
800 	    (dce_v6_0_dram_bandwidth_for_display(wm) / wm->num_heads))
801 		return true;
802 	else
803 		return false;
804 }
805 
806 /**
807  * dce_v6_0_average_bandwidth_vs_available_bandwidth - check
808  * average and available bandwidth
809  *
810  * @wm: watermark calculation data
811  *
812  * Check if the display average bandwidth fits in the display
813  * available bandwidth (CIK).
814  * Used for display watermark bandwidth calculations
815  * Returns true if the display fits, false if not.
816  */
817 static bool dce_v6_0_average_bandwidth_vs_available_bandwidth(struct dce6_wm_params *wm)
818 {
819 	if (dce_v6_0_average_bandwidth(wm) <=
820 	    (dce_v6_0_available_bandwidth(wm) / wm->num_heads))
821 		return true;
822 	else
823 		return false;
824 }
825 
826 /**
827  * dce_v6_0_check_latency_hiding - check latency hiding
828  *
829  * @wm: watermark calculation data
830  *
831  * Check latency hiding (CIK).
832  * Used for display watermark bandwidth calculations
833  * Returns true if the display fits, false if not.
834  */
835 static bool dce_v6_0_check_latency_hiding(struct dce6_wm_params *wm)
836 {
837 	u32 lb_partitions = wm->lb_size / wm->src_width;
838 	u32 line_time = wm->active_time + wm->blank_time;
839 	u32 latency_tolerant_lines;
840 	u32 latency_hiding;
841 	fixed20_12 a;
842 
843 	a.full = dfixed_const(1);
844 	if (wm->vsc.full > a.full)
845 		latency_tolerant_lines = 1;
846 	else {
847 		if (lb_partitions <= (wm->vtaps + 1))
848 			latency_tolerant_lines = 1;
849 		else
850 			latency_tolerant_lines = 2;
851 	}
852 
853 	latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
854 
855 	if (dce_v6_0_latency_watermark(wm) <= latency_hiding)
856 		return true;
857 	else
858 		return false;
859 }
860 
861 /**
862  * dce_v6_0_program_watermarks - program display watermarks
863  *
864  * @adev: amdgpu_device pointer
865  * @amdgpu_crtc: the selected display controller
866  * @lb_size: line buffer size
867  * @num_heads: number of display controllers in use
868  *
869  * Calculate and program the display watermarks for the
870  * selected display controller (CIK).
871  */
872 static void dce_v6_0_program_watermarks(struct amdgpu_device *adev,
873 					struct amdgpu_crtc *amdgpu_crtc,
874 					u32 lb_size, u32 num_heads)
875 {
876 	struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
877 	struct dce6_wm_params wm_low, wm_high;
878 	u32 dram_channels;
879 	u32 active_time;
880 	u32 line_time = 0;
881 	u32 latency_watermark_a = 0, latency_watermark_b = 0;
882 	u32 priority_a_mark = 0, priority_b_mark = 0;
883 	u32 priority_a_cnt = PRIORITY_OFF;
884 	u32 priority_b_cnt = PRIORITY_OFF;
885 	u32 tmp, arb_control3, lb_vblank_lead_lines = 0;
886 	fixed20_12 a, b, c;
887 
888 	if (amdgpu_crtc->base.enabled && num_heads && mode) {
889 		active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
890 					    (u32)mode->clock);
891 		line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
892 					  (u32)mode->clock);
893 		line_time = min_t(u32, line_time, 65535);
894 		priority_a_cnt = 0;
895 		priority_b_cnt = 0;
896 
897 		dram_channels = si_get_number_of_dram_channels(adev);
898 
899 		/* watermark for high clocks */
900 		if (adev->pm.dpm_enabled) {
901 			wm_high.yclk =
902 				amdgpu_dpm_get_mclk(adev, false) * 10;
903 			wm_high.sclk =
904 				amdgpu_dpm_get_sclk(adev, false) * 10;
905 		} else {
906 			wm_high.yclk = adev->pm.current_mclk * 10;
907 			wm_high.sclk = adev->pm.current_sclk * 10;
908 		}
909 
910 		wm_high.disp_clk = mode->clock;
911 		wm_high.src_width = mode->crtc_hdisplay;
912 		wm_high.active_time = active_time;
913 		wm_high.blank_time = line_time - wm_high.active_time;
914 		wm_high.interlaced = false;
915 		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
916 			wm_high.interlaced = true;
917 		wm_high.vsc = amdgpu_crtc->vsc;
918 		wm_high.vtaps = 1;
919 		if (amdgpu_crtc->rmx_type != RMX_OFF)
920 			wm_high.vtaps = 2;
921 		wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
922 		wm_high.lb_size = lb_size;
923 		wm_high.dram_channels = dram_channels;
924 		wm_high.num_heads = num_heads;
925 
926 		/* watermark for low clocks */
927 		if (adev->pm.dpm_enabled) {
928 			wm_low.yclk =
929 				amdgpu_dpm_get_mclk(adev, true) * 10;
930 			wm_low.sclk =
931 				amdgpu_dpm_get_sclk(adev, true) * 10;
932 		} else {
933 			wm_low.yclk = adev->pm.current_mclk * 10;
934 			wm_low.sclk = adev->pm.current_sclk * 10;
935 		}
936 
937 		wm_low.disp_clk = mode->clock;
938 		wm_low.src_width = mode->crtc_hdisplay;
939 		wm_low.active_time = active_time;
940 		wm_low.blank_time = line_time - wm_low.active_time;
941 		wm_low.interlaced = false;
942 		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
943 			wm_low.interlaced = true;
944 		wm_low.vsc = amdgpu_crtc->vsc;
945 		wm_low.vtaps = 1;
946 		if (amdgpu_crtc->rmx_type != RMX_OFF)
947 			wm_low.vtaps = 2;
948 		wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
949 		wm_low.lb_size = lb_size;
950 		wm_low.dram_channels = dram_channels;
951 		wm_low.num_heads = num_heads;
952 
953 		/* set for high clocks */
954 		latency_watermark_a = min_t(u32, dce_v6_0_latency_watermark(&wm_high), 65535);
955 		/* set for low clocks */
956 		latency_watermark_b = min_t(u32, dce_v6_0_latency_watermark(&wm_low), 65535);
957 
958 		/* possibly force display priority to high */
959 		/* should really do this at mode validation time... */
960 		if (!dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
961 		    !dce_v6_0_average_bandwidth_vs_available_bandwidth(&wm_high) ||
962 		    !dce_v6_0_check_latency_hiding(&wm_high) ||
963 		    (adev->mode_info.disp_priority == 2)) {
964 			DRM_DEBUG_KMS("force priority to high\n");
965 			priority_a_cnt |= PRIORITY_ALWAYS_ON;
966 			priority_b_cnt |= PRIORITY_ALWAYS_ON;
967 		}
968 		if (!dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
969 		    !dce_v6_0_average_bandwidth_vs_available_bandwidth(&wm_low) ||
970 		    !dce_v6_0_check_latency_hiding(&wm_low) ||
971 		    (adev->mode_info.disp_priority == 2)) {
972 			DRM_DEBUG_KMS("force priority to high\n");
973 			priority_a_cnt |= PRIORITY_ALWAYS_ON;
974 			priority_b_cnt |= PRIORITY_ALWAYS_ON;
975 		}
976 
977 		a.full = dfixed_const(1000);
978 		b.full = dfixed_const(mode->clock);
979 		b.full = dfixed_div(b, a);
980 		c.full = dfixed_const(latency_watermark_a);
981 		c.full = dfixed_mul(c, b);
982 		c.full = dfixed_mul(c, amdgpu_crtc->hsc);
983 		c.full = dfixed_div(c, a);
984 		a.full = dfixed_const(16);
985 		c.full = dfixed_div(c, a);
986 		priority_a_mark = dfixed_trunc(c);
987 		priority_a_cnt |= priority_a_mark & PRIORITY_MARK_MASK;
988 
989 		a.full = dfixed_const(1000);
990 		b.full = dfixed_const(mode->clock);
991 		b.full = dfixed_div(b, a);
992 		c.full = dfixed_const(latency_watermark_b);
993 		c.full = dfixed_mul(c, b);
994 		c.full = dfixed_mul(c, amdgpu_crtc->hsc);
995 		c.full = dfixed_div(c, a);
996 		a.full = dfixed_const(16);
997 		c.full = dfixed_div(c, a);
998 		priority_b_mark = dfixed_trunc(c);
999 		priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK;
1000 
1001 		lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
1002 	}
1003 
1004 	/* select wm A */
1005 	arb_control3 = RREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset);
1006 	tmp = arb_control3;
1007 	tmp &= ~LATENCY_WATERMARK_MASK(3);
1008 	tmp |= LATENCY_WATERMARK_MASK(1);
1009 	WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp);
1010 	WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
1011 	       ((latency_watermark_a << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT)  |
1012 		(line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT)));
1013 	/* select wm B */
1014 	tmp = RREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset);
1015 	tmp &= ~LATENCY_WATERMARK_MASK(3);
1016 	tmp |= LATENCY_WATERMARK_MASK(2);
1017 	WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp);
1018 	WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
1019 	       ((latency_watermark_b << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) |
1020 		(line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT)));
1021 	/* restore original selection */
1022 	WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, arb_control3);
1023 
1024 	/* write the priority marks */
1025 	WREG32(mmPRIORITY_A_CNT + amdgpu_crtc->crtc_offset, priority_a_cnt);
1026 	WREG32(mmPRIORITY_B_CNT + amdgpu_crtc->crtc_offset, priority_b_cnt);
1027 
1028 	/* save values for DPM */
1029 	amdgpu_crtc->line_time = line_time;
1030 	amdgpu_crtc->wm_high = latency_watermark_a;
1031 
1032 	/* Save number of lines the linebuffer leads before the scanout */
1033 	amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
1034 }
1035 
1036 /* watermark setup */
1037 /**
1038  * dce_v6_0_line_buffer_adjust - Set up the line buffer
1039  *
1040  * @adev: amdgpu_device pointer
1041  * @amdgpu_crtc: the selected display controller
1042  * @mode: the current display mode on the selected display
1043  * controller
1044  * @other_mode: the display mode of another display controller
1045  *              that may be sharing the line buffer
1046  *
1047  * Setup up the line buffer allocation for
1048  * the selected display controller (CIK).
1049  * Returns the line buffer size in pixels.
1050  */
1051 static u32 dce_v6_0_line_buffer_adjust(struct amdgpu_device *adev,
1052 				   struct amdgpu_crtc *amdgpu_crtc,
1053 				   struct drm_display_mode *mode,
1054 				   struct drm_display_mode *other_mode)
1055 {
1056 	u32 tmp, buffer_alloc, i;
1057 	u32 pipe_offset = amdgpu_crtc->crtc_id * 0x8;
1058 	/*
1059 	 * Line Buffer Setup
1060 	 * There are 3 line buffers, each one shared by 2 display controllers.
1061 	 * mmDC_LB_MEMORY_SPLIT controls how that line buffer is shared between
1062 	 * the display controllers.  The paritioning is done via one of four
1063 	 * preset allocations specified in bits 21:20:
1064 	 *  0 - half lb
1065 	 *  2 - whole lb, other crtc must be disabled
1066 	 */
1067 	/* this can get tricky if we have two large displays on a paired group
1068 	 * of crtcs.  Ideally for multiple large displays we'd assign them to
1069 	 * non-linked crtcs for maximum line buffer allocation.
1070 	 */
1071 	if (amdgpu_crtc->base.enabled && mode) {
1072 		if (other_mode) {
1073 			tmp = 0; /* 1/2 */
1074 			buffer_alloc = 1;
1075 		} else {
1076 			tmp = 2; /* whole */
1077 			buffer_alloc = 2;
1078 		}
1079 	} else {
1080 		tmp = 0;
1081 		buffer_alloc = 0;
1082 	}
1083 
1084 	WREG32(mmDC_LB_MEMORY_SPLIT + amdgpu_crtc->crtc_offset,
1085 	       DC_LB_MEMORY_CONFIG(tmp));
1086 
1087 	WREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
1088 	       (buffer_alloc << PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED__SHIFT));
1089 	for (i = 0; i < adev->usec_timeout; i++) {
1090 		if (RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
1091 		    PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED_MASK)
1092 			break;
1093 		udelay(1);
1094 	}
1095 
1096 	if (amdgpu_crtc->base.enabled && mode) {
1097 		switch (tmp) {
1098 		case 0:
1099 		default:
1100 			return 4096 * 2;
1101 		case 2:
1102 			return 8192 * 2;
1103 		}
1104 	}
1105 
1106 	/* controller not enabled, so no lb used */
1107 	return 0;
1108 }
1109 
1110 
1111 /**
1112  * dce_v6_0_bandwidth_update - program display watermarks
1113  *
1114  * @adev: amdgpu_device pointer
1115  *
1116  * Calculate and program the display watermarks and line
1117  * buffer allocation (CIK).
1118  */
1119 static void dce_v6_0_bandwidth_update(struct amdgpu_device *adev)
1120 {
1121 	struct drm_display_mode *mode0 = NULL;
1122 	struct drm_display_mode *mode1 = NULL;
1123 	u32 num_heads = 0, lb_size;
1124 	int i;
1125 
1126 	if (!adev->mode_info.mode_config_initialized)
1127 		return;
1128 
1129 	amdgpu_display_update_priority(adev);
1130 
1131 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
1132 		if (adev->mode_info.crtcs[i]->base.enabled)
1133 			num_heads++;
1134 	}
1135 	for (i = 0; i < adev->mode_info.num_crtc; i += 2) {
1136 		mode0 = &adev->mode_info.crtcs[i]->base.mode;
1137 		mode1 = &adev->mode_info.crtcs[i+1]->base.mode;
1138 		lb_size = dce_v6_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i], mode0, mode1);
1139 		dce_v6_0_program_watermarks(adev, adev->mode_info.crtcs[i], lb_size, num_heads);
1140 		lb_size = dce_v6_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i+1], mode1, mode0);
1141 		dce_v6_0_program_watermarks(adev, adev->mode_info.crtcs[i+1], lb_size, num_heads);
1142 	}
1143 }
1144 
1145 static void dce_v6_0_audio_get_connected_pins(struct amdgpu_device *adev)
1146 {
1147 	int i;
1148 	u32 tmp;
1149 
1150 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1151 		tmp = RREG32_AUDIO_ENDPT(adev->mode_info.audio.pin[i].offset,
1152 				ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT);
1153 		if (REG_GET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT,
1154 					PORT_CONNECTIVITY))
1155 			adev->mode_info.audio.pin[i].connected = false;
1156 		else
1157 			adev->mode_info.audio.pin[i].connected = true;
1158 	}
1159 
1160 }
1161 
1162 static struct amdgpu_audio_pin *dce_v6_0_audio_get_pin(struct amdgpu_device *adev)
1163 {
1164 	int i;
1165 
1166 	dce_v6_0_audio_get_connected_pins(adev);
1167 
1168 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1169 		if (adev->mode_info.audio.pin[i].connected)
1170 			return &adev->mode_info.audio.pin[i];
1171 	}
1172 	DRM_ERROR("No connected audio pins found!\n");
1173 	return NULL;
1174 }
1175 
1176 static void dce_v6_0_audio_select_pin(struct drm_encoder *encoder)
1177 {
1178 	struct amdgpu_device *adev = drm_to_adev(encoder->dev);
1179 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1180 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1181 
1182 	if (!dig || !dig->afmt || !dig->afmt->pin)
1183 		return;
1184 
1185 	WREG32(mmAFMT_AUDIO_SRC_CONTROL + dig->afmt->offset,
1186 	       REG_SET_FIELD(0, AFMT_AUDIO_SRC_CONTROL, AFMT_AUDIO_SRC_SELECT,
1187 		             dig->afmt->pin->id));
1188 }
1189 
1190 static void dce_v6_0_audio_write_latency_fields(struct drm_encoder *encoder,
1191 						struct drm_display_mode *mode)
1192 {
1193 	struct drm_device *dev = encoder->dev;
1194 	struct amdgpu_device *adev = drm_to_adev(dev);
1195 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1196 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1197 	struct drm_connector *connector;
1198 	struct drm_connector_list_iter iter;
1199 	struct amdgpu_connector *amdgpu_connector = NULL;
1200 	int interlace = 0;
1201 	u32 tmp;
1202 
1203 	drm_connector_list_iter_begin(dev, &iter);
1204 	drm_for_each_connector_iter(connector, &iter) {
1205 		if (connector->encoder == encoder) {
1206 			amdgpu_connector = to_amdgpu_connector(connector);
1207 			break;
1208 		}
1209 	}
1210 	drm_connector_list_iter_end(&iter);
1211 
1212 	if (!amdgpu_connector) {
1213 		DRM_ERROR("Couldn't find encoder's connector\n");
1214 		return;
1215 	}
1216 
1217 	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1218 		interlace = 1;
1219 
1220 	if (connector->latency_present[interlace]) {
1221 		tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
1222 				VIDEO_LIPSYNC, connector->video_latency[interlace]);
1223 		tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
1224 				AUDIO_LIPSYNC, connector->audio_latency[interlace]);
1225 	} else {
1226 		tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
1227 				VIDEO_LIPSYNC, 0);
1228 		tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
1229 				AUDIO_LIPSYNC, 0);
1230 	}
1231 	WREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
1232 			   ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
1233 }
1234 
1235 static void dce_v6_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
1236 {
1237 	struct drm_device *dev = encoder->dev;
1238 	struct amdgpu_device *adev = drm_to_adev(dev);
1239 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1240 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1241 	struct drm_connector *connector;
1242 	struct drm_connector_list_iter iter;
1243 	struct amdgpu_connector *amdgpu_connector = NULL;
1244 	u8 *sadb = NULL;
1245 	int sad_count;
1246 	u32 tmp;
1247 
1248 	drm_connector_list_iter_begin(dev, &iter);
1249 	drm_for_each_connector_iter(connector, &iter) {
1250 		if (connector->encoder == encoder) {
1251 			amdgpu_connector = to_amdgpu_connector(connector);
1252 			break;
1253 		}
1254 	}
1255 	drm_connector_list_iter_end(&iter);
1256 
1257 	if (!amdgpu_connector) {
1258 		DRM_ERROR("Couldn't find encoder's connector\n");
1259 		return;
1260 	}
1261 
1262 	sad_count = drm_edid_to_speaker_allocation(amdgpu_connector->edid, &sadb);
1263 	if (sad_count < 0) {
1264 		DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
1265 		sad_count = 0;
1266 	}
1267 
1268 	/* program the speaker allocation */
1269 	tmp = RREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
1270 			ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
1271 	tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1272 			HDMI_CONNECTION, 0);
1273 	tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1274 			DP_CONNECTION, 0);
1275 
1276 	if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort)
1277 		tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1278 				DP_CONNECTION, 1);
1279 	else
1280 		tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1281 				HDMI_CONNECTION, 1);
1282 
1283 	if (sad_count)
1284 		tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1285 				SPEAKER_ALLOCATION, sadb[0]);
1286 	else
1287 		tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1288 				SPEAKER_ALLOCATION, 5); /* stereo */
1289 
1290 	WREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
1291 			ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
1292 
1293 	kfree(sadb);
1294 }
1295 
1296 static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder)
1297 {
1298 	struct drm_device *dev = encoder->dev;
1299 	struct amdgpu_device *adev = drm_to_adev(dev);
1300 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1301 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1302 	struct drm_connector *connector;
1303 	struct drm_connector_list_iter iter;
1304 	struct amdgpu_connector *amdgpu_connector = NULL;
1305 	struct cea_sad *sads;
1306 	int i, sad_count;
1307 
1308 	static const u16 eld_reg_to_type[][2] = {
1309 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM },
1310 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 },
1311 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 },
1312 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 },
1313 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 },
1314 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC },
1315 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS },
1316 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC },
1317 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 },
1318 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD },
1319 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP },
1320 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
1321 	};
1322 
1323 	drm_connector_list_iter_begin(dev, &iter);
1324 	drm_for_each_connector_iter(connector, &iter) {
1325 		if (connector->encoder == encoder) {
1326 			amdgpu_connector = to_amdgpu_connector(connector);
1327 			break;
1328 		}
1329 	}
1330 	drm_connector_list_iter_end(&iter);
1331 
1332 	if (!amdgpu_connector) {
1333 		DRM_ERROR("Couldn't find encoder's connector\n");
1334 		return;
1335 	}
1336 
1337 	sad_count = drm_edid_to_sad(amdgpu_connector->edid, &sads);
1338 	if (sad_count < 0)
1339 		DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
1340 	if (sad_count <= 0)
1341 		return;
1342 
1343 	for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
1344 		u32 tmp = 0;
1345 		u8 stereo_freqs = 0;
1346 		int max_channels = -1;
1347 		int j;
1348 
1349 		for (j = 0; j < sad_count; j++) {
1350 			struct cea_sad *sad = &sads[j];
1351 
1352 			if (sad->format == eld_reg_to_type[i][1]) {
1353 				if (sad->channels > max_channels) {
1354 					tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
1355 							MAX_CHANNELS, sad->channels);
1356 					tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
1357 							DESCRIPTOR_BYTE_2, sad->byte2);
1358 					tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
1359 							SUPPORTED_FREQUENCIES, sad->freq);
1360 					max_channels = sad->channels;
1361 				}
1362 
1363 				if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
1364 					stereo_freqs |= sad->freq;
1365 				else
1366 					break;
1367 			}
1368 		}
1369 
1370 		tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
1371 				SUPPORTED_FREQUENCIES_STEREO, stereo_freqs);
1372 		WREG32_AUDIO_ENDPT(dig->afmt->pin->offset, eld_reg_to_type[i][0], tmp);
1373 	}
1374 
1375 	kfree(sads);
1376 
1377 }
1378 
1379 static void dce_v6_0_audio_enable(struct amdgpu_device *adev,
1380 				  struct amdgpu_audio_pin *pin,
1381 				  bool enable)
1382 {
1383 	if (!pin)
1384 		return;
1385 
1386 	WREG32_AUDIO_ENDPT(pin->offset, ixAZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
1387 			enable ? AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK : 0);
1388 }
1389 
1390 static const u32 pin_offsets[7] =
1391 {
1392 	(0x1780 - 0x1780),
1393 	(0x1786 - 0x1780),
1394 	(0x178c - 0x1780),
1395 	(0x1792 - 0x1780),
1396 	(0x1798 - 0x1780),
1397 	(0x179d - 0x1780),
1398 	(0x17a4 - 0x1780),
1399 };
1400 
1401 static int dce_v6_0_audio_init(struct amdgpu_device *adev)
1402 {
1403 	int i;
1404 
1405 	if (!amdgpu_audio)
1406 		return 0;
1407 
1408 	adev->mode_info.audio.enabled = true;
1409 
1410 	switch (adev->asic_type) {
1411 	case CHIP_TAHITI:
1412 	case CHIP_PITCAIRN:
1413 	case CHIP_VERDE:
1414 	default:
1415 		adev->mode_info.audio.num_pins = 6;
1416 		break;
1417 	case CHIP_OLAND:
1418 		adev->mode_info.audio.num_pins = 2;
1419 		break;
1420 	}
1421 
1422 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1423 		adev->mode_info.audio.pin[i].channels = -1;
1424 		adev->mode_info.audio.pin[i].rate = -1;
1425 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
1426 		adev->mode_info.audio.pin[i].status_bits = 0;
1427 		adev->mode_info.audio.pin[i].category_code = 0;
1428 		adev->mode_info.audio.pin[i].connected = false;
1429 		adev->mode_info.audio.pin[i].offset = pin_offsets[i];
1430 		adev->mode_info.audio.pin[i].id = i;
1431 		/* disable audio.  it will be set up later */
1432 		/* XXX remove once we switch to ip funcs */
1433 		dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
1434 	}
1435 
1436 	return 0;
1437 }
1438 
1439 static void dce_v6_0_audio_fini(struct amdgpu_device *adev)
1440 {
1441 	int i;
1442 
1443 	if (!amdgpu_audio)
1444 		return;
1445 
1446 	if (!adev->mode_info.audio.enabled)
1447 		return;
1448 
1449 	for (i = 0; i < adev->mode_info.audio.num_pins; i++)
1450 		dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
1451 
1452 	adev->mode_info.audio.enabled = false;
1453 }
1454 
1455 static void dce_v6_0_audio_set_vbi_packet(struct drm_encoder *encoder)
1456 {
1457 	struct drm_device *dev = encoder->dev;
1458 	struct amdgpu_device *adev = drm_to_adev(dev);
1459 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1460 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1461 	u32 tmp;
1462 
1463 	tmp = RREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset);
1464 	tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, 1);
1465 	tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_SEND, 1);
1466 	tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_CONT, 1);
1467 	WREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset, tmp);
1468 }
1469 
1470 static void dce_v6_0_audio_set_acr(struct drm_encoder *encoder,
1471 				   uint32_t clock, int bpc)
1472 {
1473 	struct drm_device *dev = encoder->dev;
1474 	struct amdgpu_device *adev = drm_to_adev(dev);
1475 	struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock);
1476 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1477 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1478 	u32 tmp;
1479 
1480 	tmp = RREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset);
1481 	tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_AUTO_SEND, 1);
1482 	tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE,
1483 			bpc > 8 ? 0 : 1);
1484 	WREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset, tmp);
1485 
1486 	tmp = RREG32(mmHDMI_ACR_32_0 + dig->afmt->offset);
1487 	tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_0, HDMI_ACR_CTS_32, acr.cts_32khz);
1488 	WREG32(mmHDMI_ACR_32_0 + dig->afmt->offset, tmp);
1489 	tmp = RREG32(mmHDMI_ACR_32_1 + dig->afmt->offset);
1490 	tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_1, HDMI_ACR_N_32, acr.n_32khz);
1491 	WREG32(mmHDMI_ACR_32_1 + dig->afmt->offset, tmp);
1492 
1493 	tmp = RREG32(mmHDMI_ACR_44_0 + dig->afmt->offset);
1494 	tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_0, HDMI_ACR_CTS_44, acr.cts_44_1khz);
1495 	WREG32(mmHDMI_ACR_44_0 + dig->afmt->offset, tmp);
1496 	tmp = RREG32(mmHDMI_ACR_44_1 + dig->afmt->offset);
1497 	tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_1, HDMI_ACR_N_44, acr.n_44_1khz);
1498 	WREG32(mmHDMI_ACR_44_1 + dig->afmt->offset, tmp);
1499 
1500 	tmp = RREG32(mmHDMI_ACR_48_0 + dig->afmt->offset);
1501 	tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_0, HDMI_ACR_CTS_48, acr.cts_48khz);
1502 	WREG32(mmHDMI_ACR_48_0 + dig->afmt->offset, tmp);
1503 	tmp = RREG32(mmHDMI_ACR_48_1 + dig->afmt->offset);
1504 	tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_1, HDMI_ACR_N_48, acr.n_48khz);
1505 	WREG32(mmHDMI_ACR_48_1 + dig->afmt->offset, tmp);
1506 }
1507 
1508 static void dce_v6_0_audio_set_avi_infoframe(struct drm_encoder *encoder,
1509 					       struct drm_display_mode *mode)
1510 {
1511 	struct drm_device *dev = encoder->dev;
1512 	struct amdgpu_device *adev = drm_to_adev(dev);
1513 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1514 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1515 	struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
1516 	struct hdmi_avi_infoframe frame;
1517 	u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
1518 	uint8_t *payload = buffer + 3;
1519 	uint8_t *header = buffer;
1520 	ssize_t err;
1521 	u32 tmp;
1522 
1523 	err = drm_hdmi_avi_infoframe_from_display_mode(&frame, connector, mode);
1524 	if (err < 0) {
1525 		DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
1526 		return;
1527 	}
1528 
1529 	err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
1530 	if (err < 0) {
1531 		DRM_ERROR("failed to pack AVI infoframe: %zd\n", err);
1532 		return;
1533 	}
1534 
1535 	WREG32(mmAFMT_AVI_INFO0 + dig->afmt->offset,
1536 	       payload[0x0] | (payload[0x1] << 8) | (payload[0x2] << 16) | (payload[0x3] << 24));
1537 	WREG32(mmAFMT_AVI_INFO1 + dig->afmt->offset,
1538 	       payload[0x4] | (payload[0x5] << 8) | (payload[0x6] << 16) | (payload[0x7] << 24));
1539 	WREG32(mmAFMT_AVI_INFO2 + dig->afmt->offset,
1540 	       payload[0x8] | (payload[0x9] << 8) | (payload[0xA] << 16) | (payload[0xB] << 24));
1541 	WREG32(mmAFMT_AVI_INFO3 + dig->afmt->offset,
1542 	       payload[0xC] | (payload[0xD] << 8) | (header[1] << 24));
1543 
1544 	tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset);
1545 	/* anything other than 0 */
1546 	tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1,
1547 			HDMI_AUDIO_INFO_LINE, 2);
1548 	WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp);
1549 }
1550 
1551 static void dce_v6_0_audio_set_dto(struct drm_encoder *encoder, u32 clock)
1552 {
1553 	struct drm_device *dev = encoder->dev;
1554 	struct amdgpu_device *adev = drm_to_adev(dev);
1555 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
1556 	int em = amdgpu_atombios_encoder_get_encoder_mode(encoder);
1557 	u32 tmp;
1558 
1559 	/*
1560 	 * Two dtos: generally use dto0 for hdmi, dto1 for dp.
1561 	 * Express [24MHz / target pixel clock] as an exact rational
1562 	 * number (coefficient of two integer numbers.  DCCG_AUDIO_DTOx_PHASE
1563 	 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
1564 	 */
1565 	tmp = RREG32(mmDCCG_AUDIO_DTO_SOURCE);
1566 	tmp = REG_SET_FIELD(tmp, DCCG_AUDIO_DTO_SOURCE,
1567 			DCCG_AUDIO_DTO0_SOURCE_SEL, amdgpu_crtc->crtc_id);
1568 	if (em == ATOM_ENCODER_MODE_HDMI) {
1569 		tmp = REG_SET_FIELD(tmp, DCCG_AUDIO_DTO_SOURCE,
1570 				DCCG_AUDIO_DTO_SEL, 0);
1571 	} else if (ENCODER_MODE_IS_DP(em)) {
1572 		tmp = REG_SET_FIELD(tmp, DCCG_AUDIO_DTO_SOURCE,
1573 				DCCG_AUDIO_DTO_SEL, 1);
1574 	}
1575 	WREG32(mmDCCG_AUDIO_DTO_SOURCE, tmp);
1576 	if (em == ATOM_ENCODER_MODE_HDMI) {
1577 		WREG32(mmDCCG_AUDIO_DTO0_PHASE, 24000);
1578 		WREG32(mmDCCG_AUDIO_DTO0_MODULE, clock);
1579 	} else if (ENCODER_MODE_IS_DP(em)) {
1580 		WREG32(mmDCCG_AUDIO_DTO1_PHASE, 24000);
1581 		WREG32(mmDCCG_AUDIO_DTO1_MODULE, clock);
1582 	}
1583 }
1584 
1585 static void dce_v6_0_audio_set_packet(struct drm_encoder *encoder)
1586 {
1587 	struct drm_device *dev = encoder->dev;
1588 	struct amdgpu_device *adev = drm_to_adev(dev);
1589 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1590 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1591 	u32 tmp;
1592 
1593 	tmp = RREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset);
1594 	tmp = REG_SET_FIELD(tmp, AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, 1);
1595 	WREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
1596 
1597 	tmp = RREG32(mmAFMT_60958_0 + dig->afmt->offset);
1598 	tmp = REG_SET_FIELD(tmp, AFMT_60958_0, AFMT_60958_CS_CHANNEL_NUMBER_L, 1);
1599 	WREG32(mmAFMT_60958_0 + dig->afmt->offset, tmp);
1600 
1601 	tmp = RREG32(mmAFMT_60958_1 + dig->afmt->offset);
1602 	tmp = REG_SET_FIELD(tmp, AFMT_60958_1, AFMT_60958_CS_CHANNEL_NUMBER_R, 2);
1603 	WREG32(mmAFMT_60958_1 + dig->afmt->offset, tmp);
1604 
1605 	tmp = RREG32(mmAFMT_60958_2 + dig->afmt->offset);
1606 	tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_2, 3);
1607 	tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_3, 4);
1608 	tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_4, 5);
1609 	tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_5, 6);
1610 	tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_6, 7);
1611 	tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_7, 8);
1612 	WREG32(mmAFMT_60958_2 + dig->afmt->offset, tmp);
1613 
1614 	tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL2 + dig->afmt->offset);
1615 	tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL2, AFMT_AUDIO_CHANNEL_ENABLE, 0xff);
1616 	WREG32(mmAFMT_AUDIO_PACKET_CONTROL2 + dig->afmt->offset, tmp);
1617 
1618 	tmp = RREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset);
1619 	tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_DELAY_EN, 1);
1620 	tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_PACKETS_PER_LINE, 3);
1621 	WREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
1622 
1623 	tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
1624 	tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_RESET_FIFO_WHEN_AUDIO_DIS, 1);
1625 	tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, 1);
1626 	WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
1627 }
1628 
1629 static void dce_v6_0_audio_set_mute(struct drm_encoder *encoder, bool mute)
1630 {
1631 	struct drm_device *dev = encoder->dev;
1632 	struct amdgpu_device *adev = drm_to_adev(dev);
1633 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1634 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1635 	u32 tmp;
1636 
1637 	tmp = RREG32(mmHDMI_GC + dig->afmt->offset);
1638 	tmp = REG_SET_FIELD(tmp, HDMI_GC, HDMI_GC_AVMUTE, mute ? 1 : 0);
1639 	WREG32(mmHDMI_GC + dig->afmt->offset, tmp);
1640 }
1641 
1642 static void dce_v6_0_audio_hdmi_enable(struct drm_encoder *encoder, bool enable)
1643 {
1644 	struct drm_device *dev = encoder->dev;
1645 	struct amdgpu_device *adev = drm_to_adev(dev);
1646 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1647 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1648 	u32 tmp;
1649 
1650 	if (enable) {
1651 		tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset);
1652 		tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_SEND, 1);
1653 		tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_CONT, 1);
1654 		tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, 1);
1655 		tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_CONT, 1);
1656 		WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
1657 
1658 		tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset);
1659 		tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1, HDMI_AVI_INFO_LINE, 2);
1660 		WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp);
1661 
1662 		tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
1663 		tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, 1);
1664 		WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
1665 	} else {
1666 		tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset);
1667 		tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_SEND, 0);
1668 		tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_CONT, 0);
1669 		tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, 0);
1670 		tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_CONT, 0);
1671 		WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
1672 
1673 		tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
1674 		tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, 0);
1675 		WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
1676 	}
1677 }
1678 
1679 static void dce_v6_0_audio_dp_enable(struct drm_encoder *encoder, bool enable)
1680 {
1681 	struct drm_device *dev = encoder->dev;
1682 	struct amdgpu_device *adev = drm_to_adev(dev);
1683 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1684 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1685 	u32 tmp;
1686 
1687 	if (enable) {
1688 		tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
1689 		tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, 1);
1690 		WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
1691 
1692 		tmp = RREG32(mmDP_SEC_TIMESTAMP + dig->afmt->offset);
1693 		tmp = REG_SET_FIELD(tmp, DP_SEC_TIMESTAMP, DP_SEC_TIMESTAMP_MODE, 1);
1694 		WREG32(mmDP_SEC_TIMESTAMP + dig->afmt->offset, tmp);
1695 
1696 		tmp = RREG32(mmDP_SEC_CNTL + dig->afmt->offset);
1697 		tmp = REG_SET_FIELD(tmp, DP_SEC_CNTL, DP_SEC_ASP_ENABLE, 1);
1698 		tmp = REG_SET_FIELD(tmp, DP_SEC_CNTL, DP_SEC_ATP_ENABLE, 1);
1699 		tmp = REG_SET_FIELD(tmp, DP_SEC_CNTL, DP_SEC_AIP_ENABLE, 1);
1700 		tmp = REG_SET_FIELD(tmp, DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1);
1701 		WREG32(mmDP_SEC_CNTL + dig->afmt->offset, tmp);
1702 	} else {
1703 		WREG32(mmDP_SEC_CNTL + dig->afmt->offset, 0);
1704 	}
1705 }
1706 
1707 static void dce_v6_0_afmt_setmode(struct drm_encoder *encoder,
1708 				  struct drm_display_mode *mode)
1709 {
1710 	struct drm_device *dev = encoder->dev;
1711 	struct amdgpu_device *adev = drm_to_adev(dev);
1712 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1713 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1714 	struct drm_connector *connector;
1715 	struct drm_connector_list_iter iter;
1716 	struct amdgpu_connector *amdgpu_connector = NULL;
1717 	int em = amdgpu_atombios_encoder_get_encoder_mode(encoder);
1718 	int bpc = 8;
1719 
1720 	if (!dig || !dig->afmt)
1721 		return;
1722 
1723 	drm_connector_list_iter_begin(dev, &iter);
1724 	drm_for_each_connector_iter(connector, &iter) {
1725 		if (connector->encoder == encoder) {
1726 			amdgpu_connector = to_amdgpu_connector(connector);
1727 			break;
1728 		}
1729 	}
1730 	drm_connector_list_iter_end(&iter);
1731 
1732 	if (!amdgpu_connector) {
1733 		DRM_ERROR("Couldn't find encoder's connector\n");
1734 		return;
1735 	}
1736 
1737 	if (!dig->afmt->enabled)
1738 		return;
1739 
1740 	dig->afmt->pin = dce_v6_0_audio_get_pin(adev);
1741 	if (!dig->afmt->pin)
1742 		return;
1743 
1744 	if (encoder->crtc) {
1745 		struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
1746 		bpc = amdgpu_crtc->bpc;
1747 	}
1748 
1749 	/* disable audio before setting up hw */
1750 	dce_v6_0_audio_enable(adev, dig->afmt->pin, false);
1751 
1752 	dce_v6_0_audio_set_mute(encoder, true);
1753 	dce_v6_0_audio_write_speaker_allocation(encoder);
1754 	dce_v6_0_audio_write_sad_regs(encoder);
1755 	dce_v6_0_audio_write_latency_fields(encoder, mode);
1756 	if (em == ATOM_ENCODER_MODE_HDMI) {
1757 		dce_v6_0_audio_set_dto(encoder, mode->clock);
1758 		dce_v6_0_audio_set_vbi_packet(encoder);
1759 		dce_v6_0_audio_set_acr(encoder, mode->clock, bpc);
1760 	} else if (ENCODER_MODE_IS_DP(em)) {
1761 		dce_v6_0_audio_set_dto(encoder, adev->clock.default_dispclk * 10);
1762 	}
1763 	dce_v6_0_audio_set_packet(encoder);
1764 	dce_v6_0_audio_select_pin(encoder);
1765 	dce_v6_0_audio_set_avi_infoframe(encoder, mode);
1766 	dce_v6_0_audio_set_mute(encoder, false);
1767 	if (em == ATOM_ENCODER_MODE_HDMI) {
1768 		dce_v6_0_audio_hdmi_enable(encoder, 1);
1769 	} else if (ENCODER_MODE_IS_DP(em)) {
1770 		dce_v6_0_audio_dp_enable(encoder, 1);
1771 	}
1772 
1773 	/* enable audio after setting up hw */
1774 	dce_v6_0_audio_enable(adev, dig->afmt->pin, true);
1775 }
1776 
1777 static void dce_v6_0_afmt_enable(struct drm_encoder *encoder, bool enable)
1778 {
1779 	struct drm_device *dev = encoder->dev;
1780 	struct amdgpu_device *adev = drm_to_adev(dev);
1781 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1782 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1783 
1784 	if (!dig || !dig->afmt)
1785 		return;
1786 
1787 	/* Silent, r600_hdmi_enable will raise WARN for us */
1788 	if (enable && dig->afmt->enabled)
1789 		return;
1790 
1791 	if (!enable && !dig->afmt->enabled)
1792 		return;
1793 
1794 	if (!enable && dig->afmt->pin) {
1795 		dce_v6_0_audio_enable(adev, dig->afmt->pin, false);
1796 		dig->afmt->pin = NULL;
1797 	}
1798 
1799 	dig->afmt->enabled = enable;
1800 
1801 	DRM_DEBUG("%sabling AFMT interface @ 0x%04X for encoder 0x%x\n",
1802 		  enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id);
1803 }
1804 
1805 static int dce_v6_0_afmt_init(struct amdgpu_device *adev)
1806 {
1807 	int i, j;
1808 
1809 	for (i = 0; i < adev->mode_info.num_dig; i++)
1810 		adev->mode_info.afmt[i] = NULL;
1811 
1812 	/* DCE6 has audio blocks tied to DIG encoders */
1813 	for (i = 0; i < adev->mode_info.num_dig; i++) {
1814 		adev->mode_info.afmt[i] = kzalloc(sizeof(struct amdgpu_afmt), GFP_KERNEL);
1815 		if (adev->mode_info.afmt[i]) {
1816 			adev->mode_info.afmt[i]->offset = dig_offsets[i];
1817 			adev->mode_info.afmt[i]->id = i;
1818 		} else {
1819 			for (j = 0; j < i; j++) {
1820 				kfree(adev->mode_info.afmt[j]);
1821 				adev->mode_info.afmt[j] = NULL;
1822 			}
1823 			DRM_ERROR("Out of memory allocating afmt table\n");
1824 			return -ENOMEM;
1825 		}
1826 	}
1827 	return 0;
1828 }
1829 
1830 static void dce_v6_0_afmt_fini(struct amdgpu_device *adev)
1831 {
1832 	int i;
1833 
1834 	for (i = 0; i < adev->mode_info.num_dig; i++) {
1835 		kfree(adev->mode_info.afmt[i]);
1836 		adev->mode_info.afmt[i] = NULL;
1837 	}
1838 }
1839 
1840 static const u32 vga_control_regs[6] =
1841 {
1842 	mmD1VGA_CONTROL,
1843 	mmD2VGA_CONTROL,
1844 	mmD3VGA_CONTROL,
1845 	mmD4VGA_CONTROL,
1846 	mmD5VGA_CONTROL,
1847 	mmD6VGA_CONTROL,
1848 };
1849 
1850 static void dce_v6_0_vga_enable(struct drm_crtc *crtc, bool enable)
1851 {
1852 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1853 	struct drm_device *dev = crtc->dev;
1854 	struct amdgpu_device *adev = drm_to_adev(dev);
1855 	u32 vga_control;
1856 
1857 	vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1;
1858 	WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control | (enable ? 1 : 0));
1859 }
1860 
1861 static void dce_v6_0_grph_enable(struct drm_crtc *crtc, bool enable)
1862 {
1863 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1864 	struct drm_device *dev = crtc->dev;
1865 	struct amdgpu_device *adev = drm_to_adev(dev);
1866 
1867 	WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, enable ? 1 : 0);
1868 }
1869 
1870 static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
1871 				     struct drm_framebuffer *fb,
1872 				     int x, int y, int atomic)
1873 {
1874 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1875 	struct drm_device *dev = crtc->dev;
1876 	struct amdgpu_device *adev = drm_to_adev(dev);
1877 	struct drm_framebuffer *target_fb;
1878 	struct drm_gem_object *obj;
1879 	struct amdgpu_bo *abo;
1880 	uint64_t fb_location, tiling_flags;
1881 	uint32_t fb_format, fb_pitch_pixels, pipe_config;
1882 	u32 fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_NONE);
1883 	u32 viewport_w, viewport_h;
1884 	int r;
1885 	bool bypass_lut = false;
1886 
1887 	/* no fb bound */
1888 	if (!atomic && !crtc->primary->fb) {
1889 		DRM_DEBUG_KMS("No FB bound\n");
1890 		return 0;
1891 	}
1892 
1893 	if (atomic)
1894 		target_fb = fb;
1895 	else
1896 		target_fb = crtc->primary->fb;
1897 
1898 	/* If atomic, assume fb object is pinned & idle & fenced and
1899 	 * just update base pointers
1900 	 */
1901 	obj = target_fb->obj[0];
1902 	abo = gem_to_amdgpu_bo(obj);
1903 	r = amdgpu_bo_reserve(abo, false);
1904 	if (unlikely(r != 0))
1905 		return r;
1906 
1907 	if (!atomic) {
1908 		abo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
1909 		r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM);
1910 		if (unlikely(r != 0)) {
1911 			amdgpu_bo_unreserve(abo);
1912 			return -EINVAL;
1913 		}
1914 	}
1915 	fb_location = amdgpu_bo_gpu_offset(abo);
1916 
1917 	amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
1918 	amdgpu_bo_unreserve(abo);
1919 
1920 	switch (target_fb->format->format) {
1921 	case DRM_FORMAT_C8:
1922 		fb_format = (GRPH_DEPTH(GRPH_DEPTH_8BPP) |
1923 			     GRPH_FORMAT(GRPH_FORMAT_INDEXED));
1924 		break;
1925 	case DRM_FORMAT_XRGB4444:
1926 	case DRM_FORMAT_ARGB4444:
1927 		fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
1928 			     GRPH_FORMAT(GRPH_FORMAT_ARGB4444));
1929 #ifdef __BIG_ENDIAN
1930 		fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
1931 #endif
1932 		break;
1933 	case DRM_FORMAT_XRGB1555:
1934 	case DRM_FORMAT_ARGB1555:
1935 		fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
1936 			     GRPH_FORMAT(GRPH_FORMAT_ARGB1555));
1937 #ifdef __BIG_ENDIAN
1938 		fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
1939 #endif
1940 		break;
1941 	case DRM_FORMAT_BGRX5551:
1942 	case DRM_FORMAT_BGRA5551:
1943 		fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
1944 			     GRPH_FORMAT(GRPH_FORMAT_BGRA5551));
1945 #ifdef __BIG_ENDIAN
1946 		fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
1947 #endif
1948 		break;
1949 	case DRM_FORMAT_RGB565:
1950 		fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
1951 			     GRPH_FORMAT(GRPH_FORMAT_ARGB565));
1952 #ifdef __BIG_ENDIAN
1953 		fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
1954 #endif
1955 		break;
1956 	case DRM_FORMAT_XRGB8888:
1957 	case DRM_FORMAT_ARGB8888:
1958 		fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) |
1959 			     GRPH_FORMAT(GRPH_FORMAT_ARGB8888));
1960 #ifdef __BIG_ENDIAN
1961 		fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32);
1962 #endif
1963 		break;
1964 	case DRM_FORMAT_XRGB2101010:
1965 	case DRM_FORMAT_ARGB2101010:
1966 		fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) |
1967 			     GRPH_FORMAT(GRPH_FORMAT_ARGB2101010));
1968 #ifdef __BIG_ENDIAN
1969 		fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32);
1970 #endif
1971 		/* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
1972 		bypass_lut = true;
1973 		break;
1974 	case DRM_FORMAT_BGRX1010102:
1975 	case DRM_FORMAT_BGRA1010102:
1976 		fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) |
1977 			     GRPH_FORMAT(GRPH_FORMAT_BGRA1010102));
1978 #ifdef __BIG_ENDIAN
1979 		fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32);
1980 #endif
1981 		/* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
1982 		bypass_lut = true;
1983 		break;
1984 	case DRM_FORMAT_XBGR8888:
1985 	case DRM_FORMAT_ABGR8888:
1986 		fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) |
1987 			     GRPH_FORMAT(GRPH_FORMAT_ARGB8888));
1988 		fb_swap = (GRPH_RED_CROSSBAR(GRPH_RED_SEL_B) |
1989 			   GRPH_BLUE_CROSSBAR(GRPH_BLUE_SEL_R));
1990 #ifdef __BIG_ENDIAN
1991 		fb_swap |= GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32);
1992 #endif
1993 		break;
1994 	default:
1995 		DRM_ERROR("Unsupported screen format %p4cc\n",
1996 			  &target_fb->format->format);
1997 		return -EINVAL;
1998 	}
1999 
2000 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_2D_TILED_THIN1) {
2001 		unsigned bankw, bankh, mtaspect, tile_split, num_banks;
2002 
2003 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
2004 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
2005 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
2006 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
2007 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
2008 
2009 		fb_format |= GRPH_NUM_BANKS(num_banks);
2010 		fb_format |= GRPH_ARRAY_MODE(GRPH_ARRAY_2D_TILED_THIN1);
2011 		fb_format |= GRPH_TILE_SPLIT(tile_split);
2012 		fb_format |= GRPH_BANK_WIDTH(bankw);
2013 		fb_format |= GRPH_BANK_HEIGHT(bankh);
2014 		fb_format |= GRPH_MACRO_TILE_ASPECT(mtaspect);
2015 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1) {
2016 		fb_format |= GRPH_ARRAY_MODE(GRPH_ARRAY_1D_TILED_THIN1);
2017 	}
2018 
2019 	pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
2020 	fb_format |= GRPH_PIPE_CONFIG(pipe_config);
2021 
2022 	dce_v6_0_vga_enable(crtc, false);
2023 
2024 	/* Make sure surface address is updated at vertical blank rather than
2025 	 * horizontal blank
2026 	 */
2027 	WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, 0);
2028 
2029 	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
2030 	       upper_32_bits(fb_location));
2031 	WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
2032 	       upper_32_bits(fb_location));
2033 	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
2034 	       (u32)fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK);
2035 	WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
2036 	       (u32) fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK);
2037 	WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
2038 	WREG32(mmGRPH_SWAP_CNTL + amdgpu_crtc->crtc_offset, fb_swap);
2039 
2040 	/*
2041 	 * The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT
2042 	 * for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to
2043 	 * retain the full precision throughout the pipeline.
2044 	 */
2045 	WREG32_P(mmGRPH_LUT_10BIT_BYPASS + amdgpu_crtc->crtc_offset,
2046 		 (bypass_lut ? GRPH_LUT_10BIT_BYPASS__GRPH_LUT_10BIT_BYPASS_EN_MASK : 0),
2047 		 ~GRPH_LUT_10BIT_BYPASS__GRPH_LUT_10BIT_BYPASS_EN_MASK);
2048 
2049 	if (bypass_lut)
2050 		DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n");
2051 
2052 	WREG32(mmGRPH_SURFACE_OFFSET_X + amdgpu_crtc->crtc_offset, 0);
2053 	WREG32(mmGRPH_SURFACE_OFFSET_Y + amdgpu_crtc->crtc_offset, 0);
2054 	WREG32(mmGRPH_X_START + amdgpu_crtc->crtc_offset, 0);
2055 	WREG32(mmGRPH_Y_START + amdgpu_crtc->crtc_offset, 0);
2056 	WREG32(mmGRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width);
2057 	WREG32(mmGRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height);
2058 
2059 	fb_pitch_pixels = target_fb->pitches[0] / target_fb->format->cpp[0];
2060 	WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels);
2061 
2062 	dce_v6_0_grph_enable(crtc, true);
2063 
2064 	WREG32(mmDESKTOP_HEIGHT + amdgpu_crtc->crtc_offset,
2065 		       target_fb->height);
2066 	x &= ~3;
2067 	y &= ~1;
2068 	WREG32(mmVIEWPORT_START + amdgpu_crtc->crtc_offset,
2069 	       (x << 16) | y);
2070 	viewport_w = crtc->mode.hdisplay;
2071 	viewport_h = (crtc->mode.vdisplay + 1) & ~1;
2072 
2073 	WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
2074 	       (viewport_w << 16) | viewport_h);
2075 
2076 	/* set pageflip to happen anywhere in vblank interval */
2077 	WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
2078 
2079 	if (!atomic && fb && fb != crtc->primary->fb) {
2080 		abo = gem_to_amdgpu_bo(fb->obj[0]);
2081 		r = amdgpu_bo_reserve(abo, true);
2082 		if (unlikely(r != 0))
2083 			return r;
2084 		amdgpu_bo_unpin(abo);
2085 		amdgpu_bo_unreserve(abo);
2086 	}
2087 
2088 	/* Bytes per pixel may have changed */
2089 	dce_v6_0_bandwidth_update(adev);
2090 
2091 	return 0;
2092 
2093 }
2094 
2095 static void dce_v6_0_set_interleave(struct drm_crtc *crtc,
2096 				    struct drm_display_mode *mode)
2097 {
2098 	struct drm_device *dev = crtc->dev;
2099 	struct amdgpu_device *adev = drm_to_adev(dev);
2100 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2101 
2102 	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
2103 		WREG32(mmDATA_FORMAT + amdgpu_crtc->crtc_offset,
2104 		       INTERLEAVE_EN);
2105 	else
2106 		WREG32(mmDATA_FORMAT + amdgpu_crtc->crtc_offset, 0);
2107 }
2108 
2109 static void dce_v6_0_crtc_load_lut(struct drm_crtc *crtc)
2110 {
2111 
2112 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2113 	struct drm_device *dev = crtc->dev;
2114 	struct amdgpu_device *adev = drm_to_adev(dev);
2115 	u16 *r, *g, *b;
2116 	int i;
2117 
2118 	DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id);
2119 
2120 	WREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
2121 	       ((0 << INPUT_CSC_CONTROL__INPUT_CSC_GRPH_MODE__SHIFT) |
2122 		(0 << INPUT_CSC_CONTROL__INPUT_CSC_OVL_MODE__SHIFT)));
2123 	WREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset,
2124 	       PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_BYPASS_MASK);
2125 	WREG32(mmPRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset,
2126 	       PRESCALE_OVL_CONTROL__OVL_PRESCALE_BYPASS_MASK);
2127 	WREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset,
2128 	       ((0 << INPUT_GAMMA_CONTROL__GRPH_INPUT_GAMMA_MODE__SHIFT) |
2129 		(0 << INPUT_GAMMA_CONTROL__OVL_INPUT_GAMMA_MODE__SHIFT)));
2130 
2131 	WREG32(mmDC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0);
2132 
2133 	WREG32(mmDC_LUT_BLACK_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0);
2134 	WREG32(mmDC_LUT_BLACK_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0);
2135 	WREG32(mmDC_LUT_BLACK_OFFSET_RED + amdgpu_crtc->crtc_offset, 0);
2136 
2137 	WREG32(mmDC_LUT_WHITE_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0xffff);
2138 	WREG32(mmDC_LUT_WHITE_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0xffff);
2139 	WREG32(mmDC_LUT_WHITE_OFFSET_RED + amdgpu_crtc->crtc_offset, 0xffff);
2140 
2141 	WREG32(mmDC_LUT_RW_MODE + amdgpu_crtc->crtc_offset, 0);
2142 	WREG32(mmDC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007);
2143 
2144 	WREG32(mmDC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0);
2145 	r = crtc->gamma_store;
2146 	g = r + crtc->gamma_size;
2147 	b = g + crtc->gamma_size;
2148 	for (i = 0; i < 256; i++) {
2149 		WREG32(mmDC_LUT_30_COLOR + amdgpu_crtc->crtc_offset,
2150 		       ((*r++ & 0xffc0) << 14) |
2151 		       ((*g++ & 0xffc0) << 4) |
2152 		       (*b++ >> 6));
2153 	}
2154 
2155 	WREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
2156 	       ((0 << DEGAMMA_CONTROL__GRPH_DEGAMMA_MODE__SHIFT) |
2157 		(0 << DEGAMMA_CONTROL__OVL_DEGAMMA_MODE__SHIFT) |
2158 		ICON_DEGAMMA_MODE(0) |
2159 		(0 << DEGAMMA_CONTROL__CURSOR_DEGAMMA_MODE__SHIFT)));
2160 	WREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset,
2161 	       ((0 << GAMUT_REMAP_CONTROL__GRPH_GAMUT_REMAP_MODE__SHIFT) |
2162 		(0 << GAMUT_REMAP_CONTROL__OVL_GAMUT_REMAP_MODE__SHIFT)));
2163 	WREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
2164 	       ((0 << REGAMMA_CONTROL__GRPH_REGAMMA_MODE__SHIFT) |
2165 		(0 << REGAMMA_CONTROL__OVL_REGAMMA_MODE__SHIFT)));
2166 	WREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
2167 	       ((0 << OUTPUT_CSC_CONTROL__OUTPUT_CSC_GRPH_MODE__SHIFT) |
2168 		(0 << OUTPUT_CSC_CONTROL__OUTPUT_CSC_OVL_MODE__SHIFT)));
2169 	/* XXX match this to the depth of the crtc fmt block, move to modeset? */
2170 	WREG32(0x1a50 + amdgpu_crtc->crtc_offset, 0);
2171 
2172 
2173 }
2174 
2175 static int dce_v6_0_pick_dig_encoder(struct drm_encoder *encoder)
2176 {
2177 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
2178 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
2179 
2180 	switch (amdgpu_encoder->encoder_id) {
2181 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
2182 		return dig->linkb ? 1 : 0;
2183 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
2184 		return dig->linkb ? 3 : 2;
2185 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
2186 		return dig->linkb ? 5 : 4;
2187 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
2188 		return 6;
2189 	default:
2190 		DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id);
2191 		return 0;
2192 	}
2193 }
2194 
2195 /**
2196  * dce_v6_0_pick_pll - Allocate a PPLL for use by the crtc.
2197  *
2198  * @crtc: drm crtc
2199  *
2200  * Returns the PPLL (Pixel PLL) to be used by the crtc.  For DP monitors
2201  * a single PPLL can be used for all DP crtcs/encoders.  For non-DP
2202  * monitors a dedicated PPLL must be used.  If a particular board has
2203  * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming
2204  * as there is no need to program the PLL itself.  If we are not able to
2205  * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to
2206  * avoid messing up an existing monitor.
2207  *
2208  *
2209  */
2210 static u32 dce_v6_0_pick_pll(struct drm_crtc *crtc)
2211 {
2212 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2213 	struct drm_device *dev = crtc->dev;
2214 	struct amdgpu_device *adev = drm_to_adev(dev);
2215 	u32 pll_in_use;
2216 	int pll;
2217 
2218 	if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) {
2219 		if (adev->clock.dp_extclk)
2220 			/* skip PPLL programming if using ext clock */
2221 			return ATOM_PPLL_INVALID;
2222 		else
2223 			return ATOM_PPLL0;
2224 	} else {
2225 		/* use the same PPLL for all monitors with the same clock */
2226 		pll = amdgpu_pll_get_shared_nondp_ppll(crtc);
2227 		if (pll != ATOM_PPLL_INVALID)
2228 			return pll;
2229 	}
2230 
2231 	/*  PPLL1, and PPLL2 */
2232 	pll_in_use = amdgpu_pll_get_use_mask(crtc);
2233 	if (!(pll_in_use & (1 << ATOM_PPLL2)))
2234 		return ATOM_PPLL2;
2235 	if (!(pll_in_use & (1 << ATOM_PPLL1)))
2236 		return ATOM_PPLL1;
2237 	DRM_ERROR("unable to allocate a PPLL\n");
2238 	return ATOM_PPLL_INVALID;
2239 }
2240 
2241 static void dce_v6_0_lock_cursor(struct drm_crtc *crtc, bool lock)
2242 {
2243 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
2244 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2245 	uint32_t cur_lock;
2246 
2247 	cur_lock = RREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset);
2248 	if (lock)
2249 		cur_lock |= CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK;
2250 	else
2251 		cur_lock &= ~CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK;
2252 	WREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset, cur_lock);
2253 }
2254 
2255 static void dce_v6_0_hide_cursor(struct drm_crtc *crtc)
2256 {
2257 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2258 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
2259 
2260 	WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
2261 	       (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
2262 	       (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
2263 
2264 
2265 }
2266 
2267 static void dce_v6_0_show_cursor(struct drm_crtc *crtc)
2268 {
2269 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2270 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
2271 
2272 	WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
2273 	       upper_32_bits(amdgpu_crtc->cursor_addr));
2274 	WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
2275 	       lower_32_bits(amdgpu_crtc->cursor_addr));
2276 
2277 	WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
2278 	       CUR_CONTROL__CURSOR_EN_MASK |
2279 	       (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
2280 	       (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
2281 
2282 }
2283 
2284 static int dce_v6_0_cursor_move_locked(struct drm_crtc *crtc,
2285 				       int x, int y)
2286 {
2287 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2288 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
2289 	int xorigin = 0, yorigin = 0;
2290 
2291 	int w = amdgpu_crtc->cursor_width;
2292 
2293 	amdgpu_crtc->cursor_x = x;
2294 	amdgpu_crtc->cursor_y = y;
2295 
2296 	/* avivo cursor are offset into the total surface */
2297 	x += crtc->x;
2298 	y += crtc->y;
2299 	DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
2300 
2301 	if (x < 0) {
2302 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
2303 		x = 0;
2304 	}
2305 	if (y < 0) {
2306 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
2307 		y = 0;
2308 	}
2309 
2310 	WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
2311 	WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
2312 	WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
2313 	       ((w - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
2314 
2315 	return 0;
2316 }
2317 
2318 static int dce_v6_0_crtc_cursor_move(struct drm_crtc *crtc,
2319 				     int x, int y)
2320 {
2321 	int ret;
2322 
2323 	dce_v6_0_lock_cursor(crtc, true);
2324 	ret = dce_v6_0_cursor_move_locked(crtc, x, y);
2325 	dce_v6_0_lock_cursor(crtc, false);
2326 
2327 	return ret;
2328 }
2329 
2330 static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
2331 				     struct drm_file *file_priv,
2332 				     uint32_t handle,
2333 				     uint32_t width,
2334 				     uint32_t height,
2335 				     int32_t hot_x,
2336 				     int32_t hot_y)
2337 {
2338 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2339 	struct drm_gem_object *obj;
2340 	struct amdgpu_bo *aobj;
2341 	int ret;
2342 
2343 	if (!handle) {
2344 		/* turn off cursor */
2345 		dce_v6_0_hide_cursor(crtc);
2346 		obj = NULL;
2347 		goto unpin;
2348 	}
2349 
2350 	if ((width > amdgpu_crtc->max_cursor_width) ||
2351 	    (height > amdgpu_crtc->max_cursor_height)) {
2352 		DRM_ERROR("bad cursor width or height %d x %d\n", width, height);
2353 		return -EINVAL;
2354 	}
2355 
2356 	obj = drm_gem_object_lookup(file_priv, handle);
2357 	if (!obj) {
2358 		DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id);
2359 		return -ENOENT;
2360 	}
2361 
2362 	aobj = gem_to_amdgpu_bo(obj);
2363 	ret = amdgpu_bo_reserve(aobj, false);
2364 	if (ret != 0) {
2365 		drm_gem_object_put(obj);
2366 		return ret;
2367 	}
2368 
2369 	aobj->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
2370 	ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
2371 	amdgpu_bo_unreserve(aobj);
2372 	if (ret) {
2373 		DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
2374 		drm_gem_object_put(obj);
2375 		return ret;
2376 	}
2377 	amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
2378 
2379 	dce_v6_0_lock_cursor(crtc, true);
2380 
2381 	if (width != amdgpu_crtc->cursor_width ||
2382 	    height != amdgpu_crtc->cursor_height ||
2383 	    hot_x != amdgpu_crtc->cursor_hot_x ||
2384 	    hot_y != amdgpu_crtc->cursor_hot_y) {
2385 		int x, y;
2386 
2387 		x = amdgpu_crtc->cursor_x + amdgpu_crtc->cursor_hot_x - hot_x;
2388 		y = amdgpu_crtc->cursor_y + amdgpu_crtc->cursor_hot_y - hot_y;
2389 
2390 		dce_v6_0_cursor_move_locked(crtc, x, y);
2391 
2392 		amdgpu_crtc->cursor_width = width;
2393 		amdgpu_crtc->cursor_height = height;
2394 		amdgpu_crtc->cursor_hot_x = hot_x;
2395 		amdgpu_crtc->cursor_hot_y = hot_y;
2396 	}
2397 
2398 	dce_v6_0_show_cursor(crtc);
2399 	dce_v6_0_lock_cursor(crtc, false);
2400 
2401 unpin:
2402 	if (amdgpu_crtc->cursor_bo) {
2403 		struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2404 		ret = amdgpu_bo_reserve(aobj, true);
2405 		if (likely(ret == 0)) {
2406 			amdgpu_bo_unpin(aobj);
2407 			amdgpu_bo_unreserve(aobj);
2408 		}
2409 		drm_gem_object_put(amdgpu_crtc->cursor_bo);
2410 	}
2411 
2412 	amdgpu_crtc->cursor_bo = obj;
2413 	return 0;
2414 }
2415 
2416 static void dce_v6_0_cursor_reset(struct drm_crtc *crtc)
2417 {
2418 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2419 
2420 	if (amdgpu_crtc->cursor_bo) {
2421 		dce_v6_0_lock_cursor(crtc, true);
2422 
2423 		dce_v6_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
2424 					    amdgpu_crtc->cursor_y);
2425 
2426 		dce_v6_0_show_cursor(crtc);
2427 		dce_v6_0_lock_cursor(crtc, false);
2428 	}
2429 }
2430 
2431 static int dce_v6_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
2432 				   u16 *blue, uint32_t size,
2433 				   struct drm_modeset_acquire_ctx *ctx)
2434 {
2435 	dce_v6_0_crtc_load_lut(crtc);
2436 
2437 	return 0;
2438 }
2439 
2440 static void dce_v6_0_crtc_destroy(struct drm_crtc *crtc)
2441 {
2442 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2443 
2444 	drm_crtc_cleanup(crtc);
2445 	kfree(amdgpu_crtc);
2446 }
2447 
2448 static const struct drm_crtc_funcs dce_v6_0_crtc_funcs = {
2449 	.cursor_set2 = dce_v6_0_crtc_cursor_set2,
2450 	.cursor_move = dce_v6_0_crtc_cursor_move,
2451 	.gamma_set = dce_v6_0_crtc_gamma_set,
2452 	.set_config = amdgpu_display_crtc_set_config,
2453 	.destroy = dce_v6_0_crtc_destroy,
2454 	.page_flip_target = amdgpu_display_crtc_page_flip_target,
2455 	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
2456 	.enable_vblank = amdgpu_enable_vblank_kms,
2457 	.disable_vblank = amdgpu_disable_vblank_kms,
2458 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
2459 };
2460 
2461 static void dce_v6_0_crtc_dpms(struct drm_crtc *crtc, int mode)
2462 {
2463 	struct drm_device *dev = crtc->dev;
2464 	struct amdgpu_device *adev = drm_to_adev(dev);
2465 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2466 	unsigned type;
2467 
2468 	switch (mode) {
2469 	case DRM_MODE_DPMS_ON:
2470 		amdgpu_crtc->enabled = true;
2471 		amdgpu_atombios_crtc_enable(crtc, ATOM_ENABLE);
2472 		amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
2473 		/* Make sure VBLANK and PFLIP interrupts are still enabled */
2474 		type = amdgpu_display_crtc_idx_to_irq_type(adev,
2475 						amdgpu_crtc->crtc_id);
2476 		amdgpu_irq_update(adev, &adev->crtc_irq, type);
2477 		amdgpu_irq_update(adev, &adev->pageflip_irq, type);
2478 		drm_crtc_vblank_on(crtc);
2479 		dce_v6_0_crtc_load_lut(crtc);
2480 		break;
2481 	case DRM_MODE_DPMS_STANDBY:
2482 	case DRM_MODE_DPMS_SUSPEND:
2483 	case DRM_MODE_DPMS_OFF:
2484 		drm_crtc_vblank_off(crtc);
2485 		if (amdgpu_crtc->enabled)
2486 			amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
2487 		amdgpu_atombios_crtc_enable(crtc, ATOM_DISABLE);
2488 		amdgpu_crtc->enabled = false;
2489 		break;
2490 	}
2491 	/* adjust pm to dpms */
2492 	amdgpu_dpm_compute_clocks(adev);
2493 }
2494 
2495 static void dce_v6_0_crtc_prepare(struct drm_crtc *crtc)
2496 {
2497 	/* disable crtc pair power gating before programming */
2498 	amdgpu_atombios_crtc_powergate(crtc, ATOM_DISABLE);
2499 	amdgpu_atombios_crtc_lock(crtc, ATOM_ENABLE);
2500 	dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2501 }
2502 
2503 static void dce_v6_0_crtc_commit(struct drm_crtc *crtc)
2504 {
2505 	dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
2506 	amdgpu_atombios_crtc_lock(crtc, ATOM_DISABLE);
2507 }
2508 
2509 static void dce_v6_0_crtc_disable(struct drm_crtc *crtc)
2510 {
2511 
2512 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2513 	struct drm_device *dev = crtc->dev;
2514 	struct amdgpu_device *adev = drm_to_adev(dev);
2515 	struct amdgpu_atom_ss ss;
2516 	int i;
2517 
2518 	dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2519 	if (crtc->primary->fb) {
2520 		int r;
2521 		struct amdgpu_bo *abo;
2522 
2523 		abo = gem_to_amdgpu_bo(crtc->primary->fb->obj[0]);
2524 		r = amdgpu_bo_reserve(abo, true);
2525 		if (unlikely(r))
2526 			DRM_ERROR("failed to reserve abo before unpin\n");
2527 		else {
2528 			amdgpu_bo_unpin(abo);
2529 			amdgpu_bo_unreserve(abo);
2530 		}
2531 	}
2532 	/* disable the GRPH */
2533 	dce_v6_0_grph_enable(crtc, false);
2534 
2535 	amdgpu_atombios_crtc_powergate(crtc, ATOM_ENABLE);
2536 
2537 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2538 		if (adev->mode_info.crtcs[i] &&
2539 		    adev->mode_info.crtcs[i]->enabled &&
2540 		    i != amdgpu_crtc->crtc_id &&
2541 		    amdgpu_crtc->pll_id == adev->mode_info.crtcs[i]->pll_id) {
2542 			/* one other crtc is using this pll don't turn
2543 			 * off the pll
2544 			 */
2545 			goto done;
2546 		}
2547 	}
2548 
2549 	switch (amdgpu_crtc->pll_id) {
2550 	case ATOM_PPLL1:
2551 	case ATOM_PPLL2:
2552 		/* disable the ppll */
2553 		amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
2554 						 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
2555 		break;
2556 	default:
2557 		break;
2558 	}
2559 done:
2560 	amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2561 	amdgpu_crtc->adjusted_clock = 0;
2562 	amdgpu_crtc->encoder = NULL;
2563 	amdgpu_crtc->connector = NULL;
2564 }
2565 
2566 static int dce_v6_0_crtc_mode_set(struct drm_crtc *crtc,
2567 				  struct drm_display_mode *mode,
2568 				  struct drm_display_mode *adjusted_mode,
2569 				  int x, int y, struct drm_framebuffer *old_fb)
2570 {
2571 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2572 
2573 	if (!amdgpu_crtc->adjusted_clock)
2574 		return -EINVAL;
2575 
2576 	amdgpu_atombios_crtc_set_pll(crtc, adjusted_mode);
2577 	amdgpu_atombios_crtc_set_dtd_timing(crtc, adjusted_mode);
2578 	dce_v6_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2579 	amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode);
2580 	amdgpu_atombios_crtc_scaler_setup(crtc);
2581 	dce_v6_0_cursor_reset(crtc);
2582 	/* update the hw version fpr dpm */
2583 	amdgpu_crtc->hw_mode = *adjusted_mode;
2584 
2585 	return 0;
2586 }
2587 
2588 static bool dce_v6_0_crtc_mode_fixup(struct drm_crtc *crtc,
2589 				     const struct drm_display_mode *mode,
2590 				     struct drm_display_mode *adjusted_mode)
2591 {
2592 
2593 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2594 	struct drm_device *dev = crtc->dev;
2595 	struct drm_encoder *encoder;
2596 
2597 	/* assign the encoder to the amdgpu crtc to avoid repeated lookups later */
2598 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
2599 		if (encoder->crtc == crtc) {
2600 			amdgpu_crtc->encoder = encoder;
2601 			amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder);
2602 			break;
2603 		}
2604 	}
2605 	if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) {
2606 		amdgpu_crtc->encoder = NULL;
2607 		amdgpu_crtc->connector = NULL;
2608 		return false;
2609 	}
2610 	if (!amdgpu_display_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
2611 		return false;
2612 	if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode))
2613 		return false;
2614 	/* pick pll */
2615 	amdgpu_crtc->pll_id = dce_v6_0_pick_pll(crtc);
2616 	/* if we can't get a PPLL for a non-DP encoder, fail */
2617 	if ((amdgpu_crtc->pll_id == ATOM_PPLL_INVALID) &&
2618 	    !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder)))
2619 		return false;
2620 
2621 	return true;
2622 }
2623 
2624 static int dce_v6_0_crtc_set_base(struct drm_crtc *crtc, int x, int y,
2625 				  struct drm_framebuffer *old_fb)
2626 {
2627 	return dce_v6_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2628 }
2629 
2630 static int dce_v6_0_crtc_set_base_atomic(struct drm_crtc *crtc,
2631 					 struct drm_framebuffer *fb,
2632 					 int x, int y, enum mode_set_atomic state)
2633 {
2634 	return dce_v6_0_crtc_do_set_base(crtc, fb, x, y, 1);
2635 }
2636 
2637 static const struct drm_crtc_helper_funcs dce_v6_0_crtc_helper_funcs = {
2638 	.dpms = dce_v6_0_crtc_dpms,
2639 	.mode_fixup = dce_v6_0_crtc_mode_fixup,
2640 	.mode_set = dce_v6_0_crtc_mode_set,
2641 	.mode_set_base = dce_v6_0_crtc_set_base,
2642 	.mode_set_base_atomic = dce_v6_0_crtc_set_base_atomic,
2643 	.prepare = dce_v6_0_crtc_prepare,
2644 	.commit = dce_v6_0_crtc_commit,
2645 	.disable = dce_v6_0_crtc_disable,
2646 	.get_scanout_position = amdgpu_crtc_get_scanout_position,
2647 };
2648 
2649 static void dce_v6_0_panic_flush(struct drm_plane *plane)
2650 {
2651 	struct drm_framebuffer *fb;
2652 	struct amdgpu_crtc *amdgpu_crtc;
2653 	struct amdgpu_device *adev;
2654 	uint32_t fb_format;
2655 
2656 	if (!plane->fb)
2657 		return;
2658 
2659 	fb = plane->fb;
2660 	amdgpu_crtc = to_amdgpu_crtc(plane->crtc);
2661 	adev = drm_to_adev(fb->dev);
2662 
2663 	/* Disable DC tiling */
2664 	fb_format = RREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset);
2665 	fb_format &= ~GRPH_ARRAY_MODE(0x7);
2666 	WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
2667 
2668 }
2669 
2670 static const struct drm_plane_helper_funcs dce_v6_0_drm_primary_plane_helper_funcs = {
2671 	.get_scanout_buffer = amdgpu_display_get_scanout_buffer,
2672 	.panic_flush = dce_v6_0_panic_flush,
2673 };
2674 
2675 static int dce_v6_0_crtc_init(struct amdgpu_device *adev, int index)
2676 {
2677 	struct amdgpu_crtc *amdgpu_crtc;
2678 
2679 	amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) +
2680 			      (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
2681 	if (amdgpu_crtc == NULL)
2682 		return -ENOMEM;
2683 
2684 	drm_crtc_init(adev_to_drm(adev), &amdgpu_crtc->base, &dce_v6_0_crtc_funcs);
2685 
2686 	drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
2687 	amdgpu_crtc->crtc_id = index;
2688 	adev->mode_info.crtcs[index] = amdgpu_crtc;
2689 
2690 	amdgpu_crtc->max_cursor_width = CURSOR_WIDTH;
2691 	amdgpu_crtc->max_cursor_height = CURSOR_HEIGHT;
2692 	adev_to_drm(adev)->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
2693 	adev_to_drm(adev)->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
2694 
2695 	amdgpu_crtc->crtc_offset = crtc_offsets[amdgpu_crtc->crtc_id];
2696 
2697 	amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2698 	amdgpu_crtc->adjusted_clock = 0;
2699 	amdgpu_crtc->encoder = NULL;
2700 	amdgpu_crtc->connector = NULL;
2701 	drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v6_0_crtc_helper_funcs);
2702 	drm_plane_helper_add(amdgpu_crtc->base.primary, &dce_v6_0_drm_primary_plane_helper_funcs);
2703 
2704 	return 0;
2705 }
2706 
2707 static int dce_v6_0_early_init(struct amdgpu_ip_block *ip_block)
2708 {
2709 	struct amdgpu_device *adev = ip_block->adev;
2710 
2711 	adev->audio_endpt_rreg = &dce_v6_0_audio_endpt_rreg;
2712 	adev->audio_endpt_wreg = &dce_v6_0_audio_endpt_wreg;
2713 
2714 	dce_v6_0_set_display_funcs(adev);
2715 
2716 	adev->mode_info.num_crtc = dce_v6_0_get_num_crtc(adev);
2717 
2718 	switch (adev->asic_type) {
2719 	case CHIP_TAHITI:
2720 	case CHIP_PITCAIRN:
2721 	case CHIP_VERDE:
2722 		adev->mode_info.num_hpd = 6;
2723 		adev->mode_info.num_dig = 6;
2724 		break;
2725 	case CHIP_OLAND:
2726 		adev->mode_info.num_hpd = 2;
2727 		adev->mode_info.num_dig = 2;
2728 		break;
2729 	default:
2730 		return -EINVAL;
2731 	}
2732 
2733 	dce_v6_0_set_irq_funcs(adev);
2734 
2735 	return 0;
2736 }
2737 
2738 static int dce_v6_0_sw_init(struct amdgpu_ip_block *ip_block)
2739 {
2740 	int r, i;
2741 	bool ret;
2742 	struct amdgpu_device *adev = ip_block->adev;
2743 
2744 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2745 		r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + 1, &adev->crtc_irq);
2746 		if (r)
2747 			return r;
2748 	}
2749 
2750 	for (i = 8; i < 20; i += 2) {
2751 		r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i, &adev->pageflip_irq);
2752 		if (r)
2753 			return r;
2754 	}
2755 
2756 	/* HPD hotplug */
2757 	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 42, &adev->hpd_irq);
2758 	if (r)
2759 		return r;
2760 
2761 	adev->mode_info.mode_config_initialized = true;
2762 
2763 	adev_to_drm(adev)->mode_config.funcs = &amdgpu_mode_funcs;
2764 	adev_to_drm(adev)->mode_config.async_page_flip = true;
2765 	adev_to_drm(adev)->mode_config.max_width = 16384;
2766 	adev_to_drm(adev)->mode_config.max_height = 16384;
2767 	adev_to_drm(adev)->mode_config.preferred_depth = 24;
2768 	adev_to_drm(adev)->mode_config.prefer_shadow = 1;
2769 	adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true;
2770 
2771 	r = amdgpu_display_modeset_create_props(adev);
2772 	if (r)
2773 		return r;
2774 
2775 	adev_to_drm(adev)->mode_config.max_width = 16384;
2776 	adev_to_drm(adev)->mode_config.max_height = 16384;
2777 
2778 	/* allocate crtcs */
2779 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2780 		r = dce_v6_0_crtc_init(adev, i);
2781 		if (r)
2782 			return r;
2783 	}
2784 
2785 	ret = amdgpu_atombios_get_connector_info_from_object_table(adev);
2786 	if (ret)
2787 		amdgpu_display_print_display_setup(adev_to_drm(adev));
2788 	else
2789 		return -EINVAL;
2790 
2791 	/* setup afmt */
2792 	r = dce_v6_0_afmt_init(adev);
2793 	if (r)
2794 		return r;
2795 
2796 	r = dce_v6_0_audio_init(adev);
2797 	if (r)
2798 		return r;
2799 
2800 	/* Disable vblank IRQs aggressively for power-saving */
2801 	/* XXX: can this be enabled for DC? */
2802 	adev_to_drm(adev)->vblank_disable_immediate = true;
2803 
2804 	r = drm_vblank_init(adev_to_drm(adev), adev->mode_info.num_crtc);
2805 	if (r)
2806 		return r;
2807 
2808 	/* Pre-DCE11 */
2809 	INIT_DELAYED_WORK(&adev->hotplug_work,
2810 		  amdgpu_display_hotplug_work_func);
2811 
2812 	drm_kms_helper_poll_init(adev_to_drm(adev));
2813 
2814 	return r;
2815 }
2816 
2817 static int dce_v6_0_sw_fini(struct amdgpu_ip_block *ip_block)
2818 {
2819 	struct amdgpu_device *adev = ip_block->adev;
2820 
2821 	drm_edid_free(adev->mode_info.bios_hardcoded_edid);
2822 
2823 	drm_kms_helper_poll_fini(adev_to_drm(adev));
2824 
2825 	dce_v6_0_audio_fini(adev);
2826 	dce_v6_0_afmt_fini(adev);
2827 
2828 	drm_mode_config_cleanup(adev_to_drm(adev));
2829 	adev->mode_info.mode_config_initialized = false;
2830 
2831 	return 0;
2832 }
2833 
2834 static int dce_v6_0_hw_init(struct amdgpu_ip_block *ip_block)
2835 {
2836 	int i;
2837 	struct amdgpu_device *adev = ip_block->adev;
2838 
2839 	/* disable vga render */
2840 	dce_v6_0_set_vga_render_state(adev, false);
2841 	/* init dig PHYs, disp eng pll */
2842 	amdgpu_atombios_encoder_init_dig(adev);
2843 	amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
2844 
2845 	/* initialize hpd */
2846 	dce_v6_0_hpd_init(adev);
2847 
2848 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
2849 		dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
2850 	}
2851 
2852 	dce_v6_0_pageflip_interrupt_init(adev);
2853 
2854 	return 0;
2855 }
2856 
2857 static int dce_v6_0_hw_fini(struct amdgpu_ip_block *ip_block)
2858 {
2859 	int i;
2860 	struct amdgpu_device *adev = ip_block->adev;
2861 
2862 	dce_v6_0_hpd_fini(adev);
2863 
2864 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
2865 		dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
2866 	}
2867 
2868 	dce_v6_0_pageflip_interrupt_fini(adev);
2869 
2870 	flush_delayed_work(&adev->hotplug_work);
2871 
2872 	return 0;
2873 }
2874 
2875 static int dce_v6_0_suspend(struct amdgpu_ip_block *ip_block)
2876 {
2877 	struct amdgpu_device *adev = ip_block->adev;
2878 	int r;
2879 
2880 	r = amdgpu_display_suspend_helper(adev);
2881 	if (r)
2882 		return r;
2883 	adev->mode_info.bl_level =
2884 		amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
2885 
2886 	return dce_v6_0_hw_fini(ip_block);
2887 }
2888 
2889 static int dce_v6_0_resume(struct amdgpu_ip_block *ip_block)
2890 {
2891 	struct amdgpu_device *adev = ip_block->adev;
2892 	int ret;
2893 
2894 	amdgpu_atombios_encoder_set_backlight_level_to_reg(adev,
2895 							   adev->mode_info.bl_level);
2896 
2897 	ret = dce_v6_0_hw_init(ip_block);
2898 
2899 	/* turn on the BL */
2900 	if (adev->mode_info.bl_encoder) {
2901 		u8 bl_level = amdgpu_display_backlight_get_level(adev,
2902 								  adev->mode_info.bl_encoder);
2903 		amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
2904 						    bl_level);
2905 	}
2906 	if (ret)
2907 		return ret;
2908 
2909 	return amdgpu_display_resume_helper(adev);
2910 }
2911 
2912 static bool dce_v6_0_is_idle(struct amdgpu_ip_block *ip_block)
2913 {
2914 	return true;
2915 }
2916 
2917 static int dce_v6_0_soft_reset(struct amdgpu_ip_block *ip_block)
2918 {
2919 	u32 srbm_soft_reset = 0, tmp;
2920 	struct amdgpu_device *adev = ip_block->adev;
2921 
2922 	if (dce_v6_0_is_display_hung(adev))
2923 		srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK;
2924 
2925 	if (srbm_soft_reset) {
2926 		tmp = RREG32(mmSRBM_SOFT_RESET);
2927 		tmp |= srbm_soft_reset;
2928 		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
2929 		WREG32(mmSRBM_SOFT_RESET, tmp);
2930 		tmp = RREG32(mmSRBM_SOFT_RESET);
2931 
2932 		udelay(50);
2933 
2934 		tmp &= ~srbm_soft_reset;
2935 		WREG32(mmSRBM_SOFT_RESET, tmp);
2936 		tmp = RREG32(mmSRBM_SOFT_RESET);
2937 
2938 		/* Wait a little for things to settle down */
2939 		udelay(50);
2940 	}
2941 	return 0;
2942 }
2943 
2944 static void dce_v6_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
2945 						     int crtc,
2946 						     enum amdgpu_interrupt_state state)
2947 {
2948 	u32 reg_block, interrupt_mask;
2949 
2950 	if (crtc >= adev->mode_info.num_crtc) {
2951 		DRM_DEBUG("invalid crtc %d\n", crtc);
2952 		return;
2953 	}
2954 
2955 	switch (crtc) {
2956 	case 0:
2957 		reg_block = SI_CRTC0_REGISTER_OFFSET;
2958 		break;
2959 	case 1:
2960 		reg_block = SI_CRTC1_REGISTER_OFFSET;
2961 		break;
2962 	case 2:
2963 		reg_block = SI_CRTC2_REGISTER_OFFSET;
2964 		break;
2965 	case 3:
2966 		reg_block = SI_CRTC3_REGISTER_OFFSET;
2967 		break;
2968 	case 4:
2969 		reg_block = SI_CRTC4_REGISTER_OFFSET;
2970 		break;
2971 	case 5:
2972 		reg_block = SI_CRTC5_REGISTER_OFFSET;
2973 		break;
2974 	default:
2975 		DRM_DEBUG("invalid crtc %d\n", crtc);
2976 		return;
2977 	}
2978 
2979 	switch (state) {
2980 	case AMDGPU_IRQ_STATE_DISABLE:
2981 		interrupt_mask = RREG32(mmINT_MASK + reg_block);
2982 		interrupt_mask &= ~VBLANK_INT_MASK;
2983 		WREG32(mmINT_MASK + reg_block, interrupt_mask);
2984 		break;
2985 	case AMDGPU_IRQ_STATE_ENABLE:
2986 		interrupt_mask = RREG32(mmINT_MASK + reg_block);
2987 		interrupt_mask |= VBLANK_INT_MASK;
2988 		WREG32(mmINT_MASK + reg_block, interrupt_mask);
2989 		break;
2990 	default:
2991 		break;
2992 	}
2993 }
2994 
2995 static void dce_v6_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev,
2996 						    int crtc,
2997 						    enum amdgpu_interrupt_state state)
2998 {
2999 
3000 }
3001 
3002 static int dce_v6_0_set_hpd_interrupt_state(struct amdgpu_device *adev,
3003 					    struct amdgpu_irq_src *src,
3004 					    unsigned type,
3005 					    enum amdgpu_interrupt_state state)
3006 {
3007 	u32 dc_hpd_int_cntl;
3008 
3009 	if (type >= adev->mode_info.num_hpd) {
3010 		DRM_DEBUG("invalid hdp %d\n", type);
3011 		return 0;
3012 	}
3013 
3014 	switch (state) {
3015 	case AMDGPU_IRQ_STATE_DISABLE:
3016 		dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
3017 		dc_hpd_int_cntl &= ~DC_HPDx_INT_EN;
3018 		WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
3019 		break;
3020 	case AMDGPU_IRQ_STATE_ENABLE:
3021 		dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
3022 		dc_hpd_int_cntl |= DC_HPDx_INT_EN;
3023 		WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
3024 		break;
3025 	default:
3026 		break;
3027 	}
3028 
3029 	return 0;
3030 }
3031 
3032 static int dce_v6_0_set_crtc_interrupt_state(struct amdgpu_device *adev,
3033 					     struct amdgpu_irq_src *src,
3034 					     unsigned type,
3035 					     enum amdgpu_interrupt_state state)
3036 {
3037 	switch (type) {
3038 	case AMDGPU_CRTC_IRQ_VBLANK1:
3039 		dce_v6_0_set_crtc_vblank_interrupt_state(adev, 0, state);
3040 		break;
3041 	case AMDGPU_CRTC_IRQ_VBLANK2:
3042 		dce_v6_0_set_crtc_vblank_interrupt_state(adev, 1, state);
3043 		break;
3044 	case AMDGPU_CRTC_IRQ_VBLANK3:
3045 		dce_v6_0_set_crtc_vblank_interrupt_state(adev, 2, state);
3046 		break;
3047 	case AMDGPU_CRTC_IRQ_VBLANK4:
3048 		dce_v6_0_set_crtc_vblank_interrupt_state(adev, 3, state);
3049 		break;
3050 	case AMDGPU_CRTC_IRQ_VBLANK5:
3051 		dce_v6_0_set_crtc_vblank_interrupt_state(adev, 4, state);
3052 		break;
3053 	case AMDGPU_CRTC_IRQ_VBLANK6:
3054 		dce_v6_0_set_crtc_vblank_interrupt_state(adev, 5, state);
3055 		break;
3056 	case AMDGPU_CRTC_IRQ_VLINE1:
3057 		dce_v6_0_set_crtc_vline_interrupt_state(adev, 0, state);
3058 		break;
3059 	case AMDGPU_CRTC_IRQ_VLINE2:
3060 		dce_v6_0_set_crtc_vline_interrupt_state(adev, 1, state);
3061 		break;
3062 	case AMDGPU_CRTC_IRQ_VLINE3:
3063 		dce_v6_0_set_crtc_vline_interrupt_state(adev, 2, state);
3064 		break;
3065 	case AMDGPU_CRTC_IRQ_VLINE4:
3066 		dce_v6_0_set_crtc_vline_interrupt_state(adev, 3, state);
3067 		break;
3068 	case AMDGPU_CRTC_IRQ_VLINE5:
3069 		dce_v6_0_set_crtc_vline_interrupt_state(adev, 4, state);
3070 		break;
3071 	case AMDGPU_CRTC_IRQ_VLINE6:
3072 		dce_v6_0_set_crtc_vline_interrupt_state(adev, 5, state);
3073 		break;
3074 	default:
3075 		break;
3076 	}
3077 	return 0;
3078 }
3079 
3080 static int dce_v6_0_crtc_irq(struct amdgpu_device *adev,
3081 			     struct amdgpu_irq_src *source,
3082 			     struct amdgpu_iv_entry *entry)
3083 {
3084 	unsigned crtc = entry->src_id - 1;
3085 	uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg);
3086 	unsigned int irq_type = amdgpu_display_crtc_idx_to_irq_type(adev,
3087 								    crtc);
3088 
3089 	switch (entry->src_data[0]) {
3090 	case 0: /* vblank */
3091 		if (disp_int & interrupt_status_offsets[crtc].vblank)
3092 			WREG32(mmVBLANK_STATUS + crtc_offsets[crtc], VBLANK_ACK);
3093 		else
3094 			DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3095 
3096 		if (amdgpu_irq_enabled(adev, source, irq_type)) {
3097 			drm_handle_vblank(adev_to_drm(adev), crtc);
3098 		}
3099 		DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
3100 		break;
3101 	case 1: /* vline */
3102 		if (disp_int & interrupt_status_offsets[crtc].vline)
3103 			WREG32(mmVLINE_STATUS + crtc_offsets[crtc], VLINE_ACK);
3104 		else
3105 			DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3106 
3107 		DRM_DEBUG("IH: D%d vline\n", crtc + 1);
3108 		break;
3109 	default:
3110 		DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]);
3111 		break;
3112 	}
3113 
3114 	return 0;
3115 }
3116 
3117 static int dce_v6_0_set_pageflip_interrupt_state(struct amdgpu_device *adev,
3118 						 struct amdgpu_irq_src *src,
3119 						 unsigned type,
3120 						 enum amdgpu_interrupt_state state)
3121 {
3122 	u32 reg;
3123 
3124 	if (type >= adev->mode_info.num_crtc) {
3125 		DRM_ERROR("invalid pageflip crtc %d\n", type);
3126 		return -EINVAL;
3127 	}
3128 
3129 	reg = RREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type]);
3130 	if (state == AMDGPU_IRQ_STATE_DISABLE)
3131 		WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
3132 		       reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
3133 	else
3134 		WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
3135 		       reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
3136 
3137 	return 0;
3138 }
3139 
3140 static int dce_v6_0_pageflip_irq(struct amdgpu_device *adev,
3141 				 struct amdgpu_irq_src *source,
3142 				 struct amdgpu_iv_entry *entry)
3143 {
3144 	unsigned long flags;
3145 	unsigned crtc_id;
3146 	struct amdgpu_crtc *amdgpu_crtc;
3147 	struct amdgpu_flip_work *works;
3148 
3149 	crtc_id = (entry->src_id - 8) >> 1;
3150 	amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
3151 
3152 	if (crtc_id >= adev->mode_info.num_crtc) {
3153 		DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
3154 		return -EINVAL;
3155 	}
3156 
3157 	if (RREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id]) &
3158 	    GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK)
3159 		WREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id],
3160 		       GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK);
3161 
3162 	/* IRQ could occur when in initial stage */
3163 	if (amdgpu_crtc == NULL)
3164 		return 0;
3165 
3166 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
3167 	works = amdgpu_crtc->pflip_works;
3168 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
3169 		DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
3170 						"AMDGPU_FLIP_SUBMITTED(%d)\n",
3171 						amdgpu_crtc->pflip_status,
3172 						AMDGPU_FLIP_SUBMITTED);
3173 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
3174 		return 0;
3175 	}
3176 
3177 	/* page flip completed. clean up */
3178 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
3179 	amdgpu_crtc->pflip_works = NULL;
3180 
3181 	/* wakeup usersapce */
3182 	if (works->event)
3183 		drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
3184 
3185 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
3186 
3187 	drm_crtc_vblank_put(&amdgpu_crtc->base);
3188 	schedule_work(&works->unpin_work);
3189 
3190 	return 0;
3191 }
3192 
3193 static int dce_v6_0_hpd_irq(struct amdgpu_device *adev,
3194 			    struct amdgpu_irq_src *source,
3195 			    struct amdgpu_iv_entry *entry)
3196 {
3197 	uint32_t disp_int, mask;
3198 	unsigned hpd;
3199 
3200 	if (entry->src_data[0] >= adev->mode_info.num_hpd) {
3201 		DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]);
3202 		return 0;
3203 	}
3204 
3205 	hpd = entry->src_data[0];
3206 	disp_int = RREG32(interrupt_status_offsets[hpd].reg);
3207 	mask = interrupt_status_offsets[hpd].hpd;
3208 
3209 	if (disp_int & mask) {
3210 		dce_v6_0_hpd_int_ack(adev, hpd);
3211 		schedule_delayed_work(&adev->hotplug_work, 0);
3212 		DRM_DEBUG("IH: HPD%d\n", hpd + 1);
3213 	}
3214 
3215 	return 0;
3216 }
3217 
3218 static int dce_v6_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
3219 					  enum amd_clockgating_state state)
3220 {
3221 	return 0;
3222 }
3223 
3224 static int dce_v6_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
3225 					  enum amd_powergating_state state)
3226 {
3227 	return 0;
3228 }
3229 
3230 static const struct amd_ip_funcs dce_v6_0_ip_funcs = {
3231 	.name = "dce_v6_0",
3232 	.early_init = dce_v6_0_early_init,
3233 	.sw_init = dce_v6_0_sw_init,
3234 	.sw_fini = dce_v6_0_sw_fini,
3235 	.hw_init = dce_v6_0_hw_init,
3236 	.hw_fini = dce_v6_0_hw_fini,
3237 	.suspend = dce_v6_0_suspend,
3238 	.resume = dce_v6_0_resume,
3239 	.is_idle = dce_v6_0_is_idle,
3240 	.soft_reset = dce_v6_0_soft_reset,
3241 	.set_clockgating_state = dce_v6_0_set_clockgating_state,
3242 	.set_powergating_state = dce_v6_0_set_powergating_state,
3243 };
3244 
3245 static void
3246 dce_v6_0_encoder_mode_set(struct drm_encoder *encoder,
3247 			  struct drm_display_mode *mode,
3248 			  struct drm_display_mode *adjusted_mode)
3249 {
3250 
3251 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3252 	int em = amdgpu_atombios_encoder_get_encoder_mode(encoder);
3253 
3254 	amdgpu_encoder->pixel_clock = adjusted_mode->clock;
3255 
3256 	/* need to call this here rather than in prepare() since we need some crtc info */
3257 	amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
3258 
3259 	/* set scaler clears this on some chips */
3260 	dce_v6_0_set_interleave(encoder->crtc, mode);
3261 
3262 	if (em == ATOM_ENCODER_MODE_HDMI || ENCODER_MODE_IS_DP(em)) {
3263 		dce_v6_0_afmt_enable(encoder, true);
3264 		dce_v6_0_afmt_setmode(encoder, adjusted_mode);
3265 	}
3266 }
3267 
3268 static void dce_v6_0_encoder_prepare(struct drm_encoder *encoder)
3269 {
3270 
3271 	struct amdgpu_device *adev = drm_to_adev(encoder->dev);
3272 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3273 	struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
3274 
3275 	if ((amdgpu_encoder->active_device &
3276 	     (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
3277 	    (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) !=
3278 	     ENCODER_OBJECT_ID_NONE)) {
3279 		struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
3280 		if (dig) {
3281 			dig->dig_encoder = dce_v6_0_pick_dig_encoder(encoder);
3282 			if (amdgpu_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT)
3283 				dig->afmt = adev->mode_info.afmt[dig->dig_encoder];
3284 		}
3285 	}
3286 
3287 	amdgpu_atombios_scratch_regs_lock(adev, true);
3288 
3289 	if (connector) {
3290 		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
3291 
3292 		/* select the clock/data port if it uses a router */
3293 		if (amdgpu_connector->router.cd_valid)
3294 			amdgpu_i2c_router_select_cd_port(amdgpu_connector);
3295 
3296 		/* turn eDP panel on for mode set */
3297 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
3298 			amdgpu_atombios_encoder_set_edp_panel_power(connector,
3299 							     ATOM_TRANSMITTER_ACTION_POWER_ON);
3300 	}
3301 
3302 	/* this is needed for the pll/ss setup to work correctly in some cases */
3303 	amdgpu_atombios_encoder_set_crtc_source(encoder);
3304 	/* set up the FMT blocks */
3305 	dce_v6_0_program_fmt(encoder);
3306 }
3307 
3308 static void dce_v6_0_encoder_commit(struct drm_encoder *encoder)
3309 {
3310 
3311 	struct drm_device *dev = encoder->dev;
3312 	struct amdgpu_device *adev = drm_to_adev(dev);
3313 
3314 	/* need to call this here as we need the crtc set up */
3315 	amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
3316 	amdgpu_atombios_scratch_regs_lock(adev, false);
3317 }
3318 
3319 static void dce_v6_0_encoder_disable(struct drm_encoder *encoder)
3320 {
3321 
3322 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3323 	struct amdgpu_encoder_atom_dig *dig;
3324 	int em = amdgpu_atombios_encoder_get_encoder_mode(encoder);
3325 
3326 	amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
3327 
3328 	if (amdgpu_atombios_encoder_is_digital(encoder)) {
3329 		if (em == ATOM_ENCODER_MODE_HDMI || ENCODER_MODE_IS_DP(em))
3330 			dce_v6_0_afmt_enable(encoder, false);
3331 		dig = amdgpu_encoder->enc_priv;
3332 		dig->dig_encoder = -1;
3333 	}
3334 	amdgpu_encoder->active_device = 0;
3335 }
3336 
3337 /* these are handled by the primary encoders */
3338 static void dce_v6_0_ext_prepare(struct drm_encoder *encoder)
3339 {
3340 
3341 }
3342 
3343 static void dce_v6_0_ext_commit(struct drm_encoder *encoder)
3344 {
3345 
3346 }
3347 
3348 static void dce_v6_0_ext_mode_set(struct drm_encoder *encoder,
3349 		      struct drm_display_mode *mode,
3350 		      struct drm_display_mode *adjusted_mode)
3351 {
3352 
3353 }
3354 
3355 static void dce_v6_0_ext_disable(struct drm_encoder *encoder)
3356 {
3357 
3358 }
3359 
3360 static void dce_v6_0_ext_dpms(struct drm_encoder *encoder, int mode)
3361 {
3362 
3363 }
3364 
3365 static bool dce_v6_0_ext_mode_fixup(struct drm_encoder *encoder,
3366 				    const struct drm_display_mode *mode,
3367 				    struct drm_display_mode *adjusted_mode)
3368 {
3369 	return true;
3370 }
3371 
3372 static const struct drm_encoder_helper_funcs dce_v6_0_ext_helper_funcs = {
3373 	.dpms = dce_v6_0_ext_dpms,
3374 	.mode_fixup = dce_v6_0_ext_mode_fixup,
3375 	.prepare = dce_v6_0_ext_prepare,
3376 	.mode_set = dce_v6_0_ext_mode_set,
3377 	.commit = dce_v6_0_ext_commit,
3378 	.disable = dce_v6_0_ext_disable,
3379 	/* no detect for TMDS/LVDS yet */
3380 };
3381 
3382 static const struct drm_encoder_helper_funcs dce_v6_0_dig_helper_funcs = {
3383 	.dpms = amdgpu_atombios_encoder_dpms,
3384 	.mode_fixup = amdgpu_atombios_encoder_mode_fixup,
3385 	.prepare = dce_v6_0_encoder_prepare,
3386 	.mode_set = dce_v6_0_encoder_mode_set,
3387 	.commit = dce_v6_0_encoder_commit,
3388 	.disable = dce_v6_0_encoder_disable,
3389 	.detect = amdgpu_atombios_encoder_dig_detect,
3390 };
3391 
3392 static const struct drm_encoder_helper_funcs dce_v6_0_dac_helper_funcs = {
3393 	.dpms = amdgpu_atombios_encoder_dpms,
3394 	.mode_fixup = amdgpu_atombios_encoder_mode_fixup,
3395 	.prepare = dce_v6_0_encoder_prepare,
3396 	.mode_set = dce_v6_0_encoder_mode_set,
3397 	.commit = dce_v6_0_encoder_commit,
3398 	.detect = amdgpu_atombios_encoder_dac_detect,
3399 };
3400 
3401 static void dce_v6_0_encoder_destroy(struct drm_encoder *encoder)
3402 {
3403 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3404 	if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3405 		amdgpu_atombios_encoder_fini_backlight(amdgpu_encoder);
3406 	kfree(amdgpu_encoder->enc_priv);
3407 	drm_encoder_cleanup(encoder);
3408 	kfree(amdgpu_encoder);
3409 }
3410 
3411 static const struct drm_encoder_funcs dce_v6_0_encoder_funcs = {
3412 	.destroy = dce_v6_0_encoder_destroy,
3413 };
3414 
3415 static void dce_v6_0_encoder_add(struct amdgpu_device *adev,
3416 				 uint32_t encoder_enum,
3417 				 uint32_t supported_device,
3418 				 u16 caps)
3419 {
3420 	struct drm_device *dev = adev_to_drm(adev);
3421 	struct drm_encoder *encoder;
3422 	struct amdgpu_encoder *amdgpu_encoder;
3423 
3424 	/* see if we already added it */
3425 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
3426 		amdgpu_encoder = to_amdgpu_encoder(encoder);
3427 		if (amdgpu_encoder->encoder_enum == encoder_enum) {
3428 			amdgpu_encoder->devices |= supported_device;
3429 			return;
3430 		}
3431 	}
3432 
3433 	/* add a new one */
3434 	amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL);
3435 	if (!amdgpu_encoder)
3436 		return;
3437 
3438 	encoder = &amdgpu_encoder->base;
3439 	switch (adev->mode_info.num_crtc) {
3440 	case 1:
3441 		encoder->possible_crtcs = 0x1;
3442 		break;
3443 	case 2:
3444 	default:
3445 		encoder->possible_crtcs = 0x3;
3446 		break;
3447 	case 4:
3448 		encoder->possible_crtcs = 0xf;
3449 		break;
3450 	case 6:
3451 		encoder->possible_crtcs = 0x3f;
3452 		break;
3453 	}
3454 
3455 	amdgpu_encoder->enc_priv = NULL;
3456 	amdgpu_encoder->encoder_enum = encoder_enum;
3457 	amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
3458 	amdgpu_encoder->devices = supported_device;
3459 	amdgpu_encoder->rmx_type = RMX_OFF;
3460 	amdgpu_encoder->underscan_type = UNDERSCAN_OFF;
3461 	amdgpu_encoder->is_ext_encoder = false;
3462 	amdgpu_encoder->caps = caps;
3463 
3464 	switch (amdgpu_encoder->encoder_id) {
3465 	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
3466 	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
3467 		drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3468 				 DRM_MODE_ENCODER_DAC, NULL);
3469 		drm_encoder_helper_add(encoder, &dce_v6_0_dac_helper_funcs);
3470 		break;
3471 	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
3472 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
3473 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
3474 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
3475 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
3476 		if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
3477 			amdgpu_encoder->rmx_type = RMX_FULL;
3478 			drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3479 					 DRM_MODE_ENCODER_LVDS, NULL);
3480 			amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder);
3481 		} else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
3482 			drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3483 					 DRM_MODE_ENCODER_DAC, NULL);
3484 			amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3485 		} else {
3486 			drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3487 					 DRM_MODE_ENCODER_TMDS, NULL);
3488 			amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3489 		}
3490 		drm_encoder_helper_add(encoder, &dce_v6_0_dig_helper_funcs);
3491 		break;
3492 	case ENCODER_OBJECT_ID_SI170B:
3493 	case ENCODER_OBJECT_ID_CH7303:
3494 	case ENCODER_OBJECT_ID_EXTERNAL_SDVOA:
3495 	case ENCODER_OBJECT_ID_EXTERNAL_SDVOB:
3496 	case ENCODER_OBJECT_ID_TITFP513:
3497 	case ENCODER_OBJECT_ID_VT1623:
3498 	case ENCODER_OBJECT_ID_HDMI_SI1930:
3499 	case ENCODER_OBJECT_ID_TRAVIS:
3500 	case ENCODER_OBJECT_ID_NUTMEG:
3501 		/* these are handled by the primary encoders */
3502 		amdgpu_encoder->is_ext_encoder = true;
3503 		if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3504 			drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3505 					 DRM_MODE_ENCODER_LVDS, NULL);
3506 		else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
3507 			drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3508 					 DRM_MODE_ENCODER_DAC, NULL);
3509 		else
3510 			drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3511 					 DRM_MODE_ENCODER_TMDS, NULL);
3512 		drm_encoder_helper_add(encoder, &dce_v6_0_ext_helper_funcs);
3513 		break;
3514 	}
3515 }
3516 
3517 static const struct amdgpu_display_funcs dce_v6_0_display_funcs = {
3518 	.bandwidth_update = &dce_v6_0_bandwidth_update,
3519 	.vblank_get_counter = &dce_v6_0_vblank_get_counter,
3520 	.backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level,
3521 	.backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level,
3522 	.hpd_sense = &dce_v6_0_hpd_sense,
3523 	.hpd_set_polarity = &dce_v6_0_hpd_set_polarity,
3524 	.hpd_get_gpio_reg = &dce_v6_0_hpd_get_gpio_reg,
3525 	.page_flip = &dce_v6_0_page_flip,
3526 	.page_flip_get_scanoutpos = &dce_v6_0_crtc_get_scanoutpos,
3527 	.add_encoder = &dce_v6_0_encoder_add,
3528 	.add_connector = &amdgpu_connector_add,
3529 };
3530 
3531 static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev)
3532 {
3533 	adev->mode_info.funcs = &dce_v6_0_display_funcs;
3534 }
3535 
3536 static const struct amdgpu_irq_src_funcs dce_v6_0_crtc_irq_funcs = {
3537 	.set = dce_v6_0_set_crtc_interrupt_state,
3538 	.process = dce_v6_0_crtc_irq,
3539 };
3540 
3541 static const struct amdgpu_irq_src_funcs dce_v6_0_pageflip_irq_funcs = {
3542 	.set = dce_v6_0_set_pageflip_interrupt_state,
3543 	.process = dce_v6_0_pageflip_irq,
3544 };
3545 
3546 static const struct amdgpu_irq_src_funcs dce_v6_0_hpd_irq_funcs = {
3547 	.set = dce_v6_0_set_hpd_interrupt_state,
3548 	.process = dce_v6_0_hpd_irq,
3549 };
3550 
3551 static void dce_v6_0_set_irq_funcs(struct amdgpu_device *adev)
3552 {
3553 	if (adev->mode_info.num_crtc > 0)
3554 		adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_VLINE1 + adev->mode_info.num_crtc;
3555 	else
3556 		adev->crtc_irq.num_types = 0;
3557 	adev->crtc_irq.funcs = &dce_v6_0_crtc_irq_funcs;
3558 
3559 	adev->pageflip_irq.num_types = adev->mode_info.num_crtc;
3560 	adev->pageflip_irq.funcs = &dce_v6_0_pageflip_irq_funcs;
3561 
3562 	adev->hpd_irq.num_types = adev->mode_info.num_hpd;
3563 	adev->hpd_irq.funcs = &dce_v6_0_hpd_irq_funcs;
3564 }
3565 
3566 const struct amdgpu_ip_block_version dce_v6_0_ip_block =
3567 {
3568 	.type = AMD_IP_BLOCK_TYPE_DCE,
3569 	.major = 6,
3570 	.minor = 0,
3571 	.rev = 0,
3572 	.funcs = &dce_v6_0_ip_funcs,
3573 };
3574 
3575 const struct amdgpu_ip_block_version dce_v6_4_ip_block =
3576 {
3577 	.type = AMD_IP_BLOCK_TYPE_DCE,
3578 	.major = 6,
3579 	.minor = 4,
3580 	.rev = 0,
3581 	.funcs = &dce_v6_0_ip_funcs,
3582 };
3583