xref: /linux/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c (revision 69050f8d6d075dc01af7a5f2f550a8067510366f)
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/pci.h>
25 
26 #include <drm/drm_edid.h>
27 #include <drm/drm_fourcc.h>
28 #include <drm/drm_modeset_helper.h>
29 #include <drm/drm_modeset_helper_vtables.h>
30 #include <drm/drm_vblank.h>
31 
32 #include "amdgpu.h"
33 #include "amdgpu_pm.h"
34 #include "amdgpu_i2c.h"
35 #include "atom.h"
36 #include "amdgpu_atombios.h"
37 #include "atombios_crtc.h"
38 #include "atombios_encoders.h"
39 #include "amdgpu_pll.h"
40 #include "amdgpu_connectors.h"
41 #include "amdgpu_display.h"
42 
43 #include "dce_v6_0.h"
44 #include "sid.h"
45 
46 #include "bif/bif_3_0_d.h"
47 #include "bif/bif_3_0_sh_mask.h"
48 
49 #include "oss/oss_1_0_d.h"
50 #include "oss/oss_1_0_sh_mask.h"
51 
52 #include "gca/gfx_6_0_d.h"
53 #include "gca/gfx_6_0_sh_mask.h"
54 #include "gca/gfx_7_2_enum.h"
55 
56 #include "gmc/gmc_6_0_d.h"
57 #include "gmc/gmc_6_0_sh_mask.h"
58 
59 #include "dce/dce_6_0_d.h"
60 #include "dce/dce_6_0_sh_mask.h"
61 
62 #include "si_enums.h"
63 
64 static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev);
65 static void dce_v6_0_set_irq_funcs(struct amdgpu_device *adev);
66 
67 static const u32 crtc_offsets[6] =
68 {
69 	CRTC0_REGISTER_OFFSET,
70 	CRTC1_REGISTER_OFFSET,
71 	CRTC2_REGISTER_OFFSET,
72 	CRTC3_REGISTER_OFFSET,
73 	CRTC4_REGISTER_OFFSET,
74 	CRTC5_REGISTER_OFFSET
75 };
76 
77 static const u32 hpd_offsets[] =
78 {
79 	HPD0_REGISTER_OFFSET,
80 	HPD1_REGISTER_OFFSET,
81 	HPD2_REGISTER_OFFSET,
82 	HPD3_REGISTER_OFFSET,
83 	HPD4_REGISTER_OFFSET,
84 	HPD5_REGISTER_OFFSET
85 };
86 
87 static const uint32_t dig_offsets[] = {
88 	CRTC0_REGISTER_OFFSET,
89 	CRTC1_REGISTER_OFFSET,
90 	CRTC2_REGISTER_OFFSET,
91 	CRTC3_REGISTER_OFFSET,
92 	CRTC4_REGISTER_OFFSET,
93 	CRTC5_REGISTER_OFFSET,
94 	(0x13830 - 0x7030) >> 2,
95 };
96 
97 static const struct {
98 	uint32_t	reg;
99 	uint32_t	vblank;
100 	uint32_t	vline;
101 	uint32_t	hpd;
102 
103 } interrupt_status_offsets[6] = { {
104 	.reg = mmDISP_INTERRUPT_STATUS,
105 	.vblank = DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK,
106 	.vline = DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK,
107 	.hpd = DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK
108 }, {
109 	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE,
110 	.vblank = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK,
111 	.vline = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK,
112 	.hpd = DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK
113 }, {
114 	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE2,
115 	.vblank = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK,
116 	.vline = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK,
117 	.hpd = DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK
118 }, {
119 	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE3,
120 	.vblank = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK,
121 	.vline = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK,
122 	.hpd = DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK
123 }, {
124 	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE4,
125 	.vblank = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK,
126 	.vline = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK,
127 	.hpd = DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK
128 }, {
129 	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE5,
130 	.vblank = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK,
131 	.vline = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK,
132 	.hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK
133 } };
134 
135 static u32 dce_v6_0_audio_endpt_rreg(struct amdgpu_device *adev,
136 				     u32 block_offset, u32 reg)
137 {
138 	unsigned long flags;
139 	u32 r;
140 
141 	spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
142 	WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
143 	r = RREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset);
144 	spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
145 
146 	return r;
147 }
148 
149 static void dce_v6_0_audio_endpt_wreg(struct amdgpu_device *adev,
150 				      u32 block_offset, u32 reg, u32 v)
151 {
152 	unsigned long flags;
153 
154 	spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
155 	WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset,
156 		reg | AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_WRITE_EN_MASK);
157 	WREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset, v);
158 	spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
159 }
160 
161 static u32 dce_v6_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
162 {
163 	if (crtc >= adev->mode_info.num_crtc)
164 		return 0;
165 	else
166 		return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
167 }
168 
169 static void dce_v6_0_pageflip_interrupt_init(struct amdgpu_device *adev)
170 {
171 	unsigned i;
172 
173 	/* Enable pflip interrupts */
174 	for (i = 0; i < adev->mode_info.num_crtc; i++)
175 		amdgpu_irq_get(adev, &adev->pageflip_irq, i);
176 }
177 
178 static void dce_v6_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
179 {
180 	unsigned i;
181 
182 	/* Disable pflip interrupts */
183 	for (i = 0; i < adev->mode_info.num_crtc; i++)
184 		amdgpu_irq_put(adev, &adev->pageflip_irq, i);
185 }
186 
187 /**
188  * dce_v6_0_page_flip - pageflip callback.
189  *
190  * @adev: amdgpu_device pointer
191  * @crtc_id: crtc to cleanup pageflip on
192  * @crtc_base: new address of the crtc (GPU MC address)
193  * @async: asynchronous flip
194  *
195  * Does the actual pageflip (evergreen+).
196  * During vblank we take the crtc lock and wait for the update_pending
197  * bit to go high, when it does, we release the lock, and allow the
198  * double buffered update to take place.
199  * Returns the current update pending status.
200  */
201 static void dce_v6_0_page_flip(struct amdgpu_device *adev,
202 			       int crtc_id, u64 crtc_base, bool async)
203 {
204 	struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
205 	struct drm_framebuffer *fb = amdgpu_crtc->base.primary->fb;
206 
207 	/* flip at hsync for async, default is vsync */
208 	WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, async ?
209 	       GRPH_FLIP_CONTROL__GRPH_SURFACE_UPDATE_H_RETRACE_EN_MASK : 0);
210 	/* update pitch */
211 	WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset,
212 	       fb->pitches[0] / fb->format->cpp[0]);
213 	/* update the scanout addresses */
214 	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
215 	       upper_32_bits(crtc_base));
216 	/* writing to the low address triggers the update */
217 	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
218 	       (u32)crtc_base);
219 	/* post the write */
220 	RREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset);
221 }
222 
223 static int dce_v6_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
224 					u32 *vbl, u32 *position)
225 {
226 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
227 		return -EINVAL;
228 
229 	*vbl = RREG32(mmCRTC_V_BLANK_START_END + crtc_offsets[crtc]);
230 	*position = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
231 
232 	return 0;
233 }
234 
235 /**
236  * dce_v6_0_hpd_sense - hpd sense callback.
237  *
238  * @adev: amdgpu_device pointer
239  * @hpd: hpd (hotplug detect) pin
240  *
241  * Checks if a digital monitor is connected (evergreen+).
242  * Returns true if connected, false if not connected.
243  */
244 static bool dce_v6_0_hpd_sense(struct amdgpu_device *adev,
245 			       enum amdgpu_hpd_id hpd)
246 {
247 	bool connected = false;
248 
249 	if (hpd >= adev->mode_info.num_hpd)
250 		return connected;
251 
252 	if (RREG32(mmDC_HPD1_INT_STATUS + hpd_offsets[hpd]) &
253 	    DC_HPD1_INT_STATUS__DC_HPD1_SENSE_MASK)
254 		connected = true;
255 
256 	return connected;
257 }
258 
259 /**
260  * dce_v6_0_hpd_set_polarity - hpd set polarity callback.
261  *
262  * @adev: amdgpu_device pointer
263  * @hpd: hpd (hotplug detect) pin
264  *
265  * Set the polarity of the hpd pin (evergreen+).
266  */
267 static void dce_v6_0_hpd_set_polarity(struct amdgpu_device *adev,
268 				      enum amdgpu_hpd_id hpd)
269 {
270 	u32 tmp;
271 	bool connected = dce_v6_0_hpd_sense(adev, hpd);
272 
273 	if (hpd >= adev->mode_info.num_hpd)
274 		return;
275 
276 	tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
277 	if (connected)
278 		tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
279 	else
280 		tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
281 	WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
282 }
283 
284 static void dce_v6_0_hpd_int_ack(struct amdgpu_device *adev,
285 				 int hpd)
286 {
287 	u32 tmp;
288 
289 	if (hpd >= adev->mode_info.num_hpd) {
290 		DRM_DEBUG("invalid hpd %d\n", hpd);
291 		return;
292 	}
293 
294 	tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
295 	tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
296 	WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
297 }
298 
299 /**
300  * dce_v6_0_hpd_init - hpd setup callback.
301  *
302  * @adev: amdgpu_device pointer
303  *
304  * Setup the hpd pins used by the card (evergreen+).
305  * Enable the pin, set the polarity, and enable the hpd interrupts.
306  */
307 static void dce_v6_0_hpd_init(struct amdgpu_device *adev)
308 {
309 	struct drm_device *dev = adev_to_drm(adev);
310 	struct drm_connector *connector;
311 	struct drm_connector_list_iter iter;
312 	u32 tmp;
313 
314 	drm_connector_list_iter_begin(dev, &iter);
315 	drm_for_each_connector_iter(connector, &iter) {
316 		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
317 
318 		if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
319 			continue;
320 
321 		tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
322 		tmp |= DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
323 		WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
324 
325 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
326 		    connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
327 			/* don't try to enable hpd on eDP or LVDS avoid breaking the
328 			 * aux dp channel on imac and help (but not completely fix)
329 			 * https://bugzilla.redhat.com/show_bug.cgi?id=726143
330 			 * also avoid interrupt storms during dpms.
331 			 */
332 			tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
333 			tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
334 			WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
335 			continue;
336 		}
337 
338 		dce_v6_0_hpd_int_ack(adev, amdgpu_connector->hpd.hpd);
339 		dce_v6_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
340 		amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
341 	}
342 	drm_connector_list_iter_end(&iter);
343 }
344 
345 /**
346  * dce_v6_0_hpd_fini - hpd tear down callback.
347  *
348  * @adev: amdgpu_device pointer
349  *
350  * Tear down the hpd pins used by the card (evergreen+).
351  * Disable the hpd interrupts.
352  */
353 static void dce_v6_0_hpd_fini(struct amdgpu_device *adev)
354 {
355 	struct drm_device *dev = adev_to_drm(adev);
356 	struct drm_connector *connector;
357 	struct drm_connector_list_iter iter;
358 	u32 tmp;
359 
360 	drm_connector_list_iter_begin(dev, &iter);
361 	drm_for_each_connector_iter(connector, &iter) {
362 		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
363 
364 		if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
365 			continue;
366 
367 		tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
368 		tmp &= ~DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
369 		WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
370 
371 		amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
372 	}
373 	drm_connector_list_iter_end(&iter);
374 }
375 
376 static u32 dce_v6_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
377 {
378 	return mmDC_GPIO_HPD_A;
379 }
380 
381 static bool dce_v6_0_is_display_hung(struct amdgpu_device *adev)
382 {
383 	u32 crtc_hung = 0;
384 	u32 crtc_status[6];
385 	u32 i, j, tmp;
386 
387 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
388 		if (RREG32(mmCRTC_CONTROL + crtc_offsets[i]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK) {
389 			crtc_status[i] = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
390 			crtc_hung |= (1 << i);
391 		}
392 	}
393 
394 	for (j = 0; j < 10; j++) {
395 		for (i = 0; i < adev->mode_info.num_crtc; i++) {
396 			if (crtc_hung & (1 << i)) {
397 				tmp = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
398 				if (tmp != crtc_status[i])
399 					crtc_hung &= ~(1 << i);
400 			}
401 		}
402 		if (crtc_hung == 0)
403 			return false;
404 		udelay(100);
405 	}
406 
407 	return true;
408 }
409 
410 static void dce_v6_0_set_vga_render_state(struct amdgpu_device *adev,
411 					  bool render)
412 {
413 	if (!render)
414 		WREG32(mmVGA_RENDER_CONTROL,
415 		       RREG32(mmVGA_RENDER_CONTROL) & ~VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL_MASK);
416 }
417 
418 static int dce_v6_0_get_num_crtc(struct amdgpu_device *adev)
419 {
420 	switch (adev->asic_type) {
421 	case CHIP_TAHITI:
422 	case CHIP_PITCAIRN:
423 	case CHIP_VERDE:
424 		return 6;
425 	case CHIP_OLAND:
426 		return 2;
427 	default:
428 		return 0;
429 	}
430 }
431 
432 void dce_v6_0_disable_dce(struct amdgpu_device *adev)
433 {
434 	/*Disable VGA render and enabled crtc, if has DCE engine*/
435 	if (amdgpu_atombios_has_dce_engine_info(adev)) {
436 		u32 tmp;
437 		int crtc_enabled, i;
438 
439 		dce_v6_0_set_vga_render_state(adev, false);
440 
441 		/*Disable crtc*/
442 		for (i = 0; i < dce_v6_0_get_num_crtc(adev); i++) {
443 			crtc_enabled = RREG32(mmCRTC_CONTROL + crtc_offsets[i]) &
444 				CRTC_CONTROL__CRTC_MASTER_EN_MASK;
445 			if (crtc_enabled) {
446 				WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
447 				tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
448 				tmp &= ~CRTC_CONTROL__CRTC_MASTER_EN_MASK;
449 				WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
450 				WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
451 			}
452 		}
453 	}
454 }
455 
456 static void dce_v6_0_program_fmt(struct drm_encoder *encoder)
457 {
458 	struct drm_device *dev = encoder->dev;
459 	struct amdgpu_device *adev = drm_to_adev(dev);
460 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
461 	struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
462 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
463 	int bpc = 0;
464 	u32 tmp = 0;
465 	enum amdgpu_connector_dither dither = AMDGPU_FMT_DITHER_DISABLE;
466 
467 	if (connector) {
468 		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
469 		bpc = amdgpu_connector_get_monitor_bpc(connector);
470 		dither = amdgpu_connector->dither;
471 	}
472 
473 	/* LVDS FMT is set up by atom */
474 	if (amdgpu_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
475 		return;
476 
477 	if (bpc == 0)
478 		return;
479 
480 
481 	switch (bpc) {
482 	case 6:
483 		if (dither == AMDGPU_FMT_DITHER_ENABLE)
484 			/* XXX sort out optimal dither settings */
485 			tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
486 				FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
487 				FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK);
488 		else
489 			tmp |= FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK;
490 		break;
491 	case 8:
492 		if (dither == AMDGPU_FMT_DITHER_ENABLE)
493 			/* XXX sort out optimal dither settings */
494 			tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
495 				FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
496 				FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK |
497 				FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK |
498 				FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH_MASK);
499 		else
500 			tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK |
501 				FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH_MASK);
502 		break;
503 	case 10:
504 	default:
505 		/* not needed */
506 		break;
507 	}
508 
509 	WREG32(mmFMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
510 }
511 
512 /**
513  * si_get_number_of_dram_channels - get the number of dram channels
514  *
515  * @adev: amdgpu_device pointer
516  *
517  * Look up the number of video ram channels (CIK).
518  * Used for display watermark bandwidth calculations
519  * Returns the number of dram channels
520  */
521 static u32 si_get_number_of_dram_channels(struct amdgpu_device *adev)
522 {
523 	u32 tmp = RREG32(mmMC_SHARED_CHMAP);
524 
525 	switch ((tmp & MC_SHARED_CHMAP__NOOFCHAN_MASK) >> MC_SHARED_CHMAP__NOOFCHAN__SHIFT) {
526 	case 0:
527 	default:
528 		return 1;
529 	case 1:
530 		return 2;
531 	case 2:
532 		return 4;
533 	case 3:
534 		return 8;
535 	case 4:
536 		return 3;
537 	case 5:
538 		return 6;
539 	case 6:
540 		return 10;
541 	case 7:
542 		return 12;
543 	case 8:
544 		return 16;
545 	}
546 }
547 
548 struct dce6_wm_params {
549 	u32 dram_channels; /* number of dram channels */
550 	u32 yclk;          /* bandwidth per dram data pin in kHz */
551 	u32 sclk;          /* engine clock in kHz */
552 	u32 disp_clk;      /* display clock in kHz */
553 	u32 src_width;     /* viewport width */
554 	u32 active_time;   /* active display time in ns */
555 	u32 blank_time;    /* blank time in ns */
556 	bool interlaced;    /* mode is interlaced */
557 	fixed20_12 vsc;    /* vertical scale ratio */
558 	u32 num_heads;     /* number of active crtcs */
559 	u32 bytes_per_pixel; /* bytes per pixel display + overlay */
560 	u32 lb_size;       /* line buffer allocated to pipe */
561 	u32 vtaps;         /* vertical scaler taps */
562 };
563 
564 /**
565  * dce_v6_0_dram_bandwidth - get the dram bandwidth
566  *
567  * @wm: watermark calculation data
568  *
569  * Calculate the raw dram bandwidth (CIK).
570  * Used for display watermark bandwidth calculations
571  * Returns the dram bandwidth in MBytes/s
572  */
573 static u32 dce_v6_0_dram_bandwidth(struct dce6_wm_params *wm)
574 {
575 	/* Calculate raw DRAM Bandwidth */
576 	fixed20_12 dram_efficiency; /* 0.7 */
577 	fixed20_12 yclk, dram_channels, bandwidth;
578 	fixed20_12 a;
579 
580 	a.full = dfixed_const(1000);
581 	yclk.full = dfixed_const(wm->yclk);
582 	yclk.full = dfixed_div(yclk, a);
583 	dram_channels.full = dfixed_const(wm->dram_channels * 4);
584 	a.full = dfixed_const(10);
585 	dram_efficiency.full = dfixed_const(7);
586 	dram_efficiency.full = dfixed_div(dram_efficiency, a);
587 	bandwidth.full = dfixed_mul(dram_channels, yclk);
588 	bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
589 
590 	return dfixed_trunc(bandwidth);
591 }
592 
593 /**
594  * dce_v6_0_dram_bandwidth_for_display - get the dram bandwidth for display
595  *
596  * @wm: watermark calculation data
597  *
598  * Calculate the dram bandwidth used for display (CIK).
599  * Used for display watermark bandwidth calculations
600  * Returns the dram bandwidth for display in MBytes/s
601  */
602 static u32 dce_v6_0_dram_bandwidth_for_display(struct dce6_wm_params *wm)
603 {
604 	/* Calculate DRAM Bandwidth and the part allocated to display. */
605 	fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
606 	fixed20_12 yclk, dram_channels, bandwidth;
607 	fixed20_12 a;
608 
609 	a.full = dfixed_const(1000);
610 	yclk.full = dfixed_const(wm->yclk);
611 	yclk.full = dfixed_div(yclk, a);
612 	dram_channels.full = dfixed_const(wm->dram_channels * 4);
613 	a.full = dfixed_const(10);
614 	disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
615 	disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
616 	bandwidth.full = dfixed_mul(dram_channels, yclk);
617 	bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
618 
619 	return dfixed_trunc(bandwidth);
620 }
621 
622 /**
623  * dce_v6_0_data_return_bandwidth - get the data return bandwidth
624  *
625  * @wm: watermark calculation data
626  *
627  * Calculate the data return bandwidth used for display (CIK).
628  * Used for display watermark bandwidth calculations
629  * Returns the data return bandwidth in MBytes/s
630  */
631 static u32 dce_v6_0_data_return_bandwidth(struct dce6_wm_params *wm)
632 {
633 	/* Calculate the display Data return Bandwidth */
634 	fixed20_12 return_efficiency; /* 0.8 */
635 	fixed20_12 sclk, bandwidth;
636 	fixed20_12 a;
637 
638 	a.full = dfixed_const(1000);
639 	sclk.full = dfixed_const(wm->sclk);
640 	sclk.full = dfixed_div(sclk, a);
641 	a.full = dfixed_const(10);
642 	return_efficiency.full = dfixed_const(8);
643 	return_efficiency.full = dfixed_div(return_efficiency, a);
644 	a.full = dfixed_const(32);
645 	bandwidth.full = dfixed_mul(a, sclk);
646 	bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
647 
648 	return dfixed_trunc(bandwidth);
649 }
650 
651 /**
652  * dce_v6_0_dmif_request_bandwidth - get the dmif bandwidth
653  *
654  * @wm: watermark calculation data
655  *
656  * Calculate the dmif bandwidth used for display (CIK).
657  * Used for display watermark bandwidth calculations
658  * Returns the dmif bandwidth in MBytes/s
659  */
660 static u32 dce_v6_0_dmif_request_bandwidth(struct dce6_wm_params *wm)
661 {
662 	/* Calculate the DMIF Request Bandwidth */
663 	fixed20_12 disp_clk_request_efficiency; /* 0.8 */
664 	fixed20_12 disp_clk, bandwidth;
665 	fixed20_12 a, b;
666 
667 	a.full = dfixed_const(1000);
668 	disp_clk.full = dfixed_const(wm->disp_clk);
669 	disp_clk.full = dfixed_div(disp_clk, a);
670 	a.full = dfixed_const(32);
671 	b.full = dfixed_mul(a, disp_clk);
672 
673 	a.full = dfixed_const(10);
674 	disp_clk_request_efficiency.full = dfixed_const(8);
675 	disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
676 
677 	bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency);
678 
679 	return dfixed_trunc(bandwidth);
680 }
681 
682 /**
683  * dce_v6_0_available_bandwidth - get the min available bandwidth
684  *
685  * @wm: watermark calculation data
686  *
687  * Calculate the min available bandwidth used for display (CIK).
688  * Used for display watermark bandwidth calculations
689  * Returns the min available bandwidth in MBytes/s
690  */
691 static u32 dce_v6_0_available_bandwidth(struct dce6_wm_params *wm)
692 {
693 	/* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
694 	u32 dram_bandwidth = dce_v6_0_dram_bandwidth(wm);
695 	u32 data_return_bandwidth = dce_v6_0_data_return_bandwidth(wm);
696 	u32 dmif_req_bandwidth = dce_v6_0_dmif_request_bandwidth(wm);
697 
698 	return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
699 }
700 
701 /**
702  * dce_v6_0_average_bandwidth - get the average available bandwidth
703  *
704  * @wm: watermark calculation data
705  *
706  * Calculate the average available bandwidth used for display (CIK).
707  * Used for display watermark bandwidth calculations
708  * Returns the average available bandwidth in MBytes/s
709  */
710 static u32 dce_v6_0_average_bandwidth(struct dce6_wm_params *wm)
711 {
712 	/* Calculate the display mode Average Bandwidth
713 	 * DisplayMode should contain the source and destination dimensions,
714 	 * timing, etc.
715 	 */
716 	fixed20_12 bpp;
717 	fixed20_12 line_time;
718 	fixed20_12 src_width;
719 	fixed20_12 bandwidth;
720 	fixed20_12 a;
721 
722 	a.full = dfixed_const(1000);
723 	line_time.full = dfixed_const(wm->active_time + wm->blank_time);
724 	line_time.full = dfixed_div(line_time, a);
725 	bpp.full = dfixed_const(wm->bytes_per_pixel);
726 	src_width.full = dfixed_const(wm->src_width);
727 	bandwidth.full = dfixed_mul(src_width, bpp);
728 	bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
729 	bandwidth.full = dfixed_div(bandwidth, line_time);
730 
731 	return dfixed_trunc(bandwidth);
732 }
733 
734 /**
735  * dce_v6_0_latency_watermark - get the latency watermark
736  *
737  * @wm: watermark calculation data
738  *
739  * Calculate the latency watermark (CIK).
740  * Used for display watermark bandwidth calculations
741  * Returns the latency watermark in ns
742  */
743 static u32 dce_v6_0_latency_watermark(struct dce6_wm_params *wm)
744 {
745 	/* First calculate the latency in ns */
746 	u32 mc_latency = 2000; /* 2000 ns. */
747 	u32 available_bandwidth = dce_v6_0_available_bandwidth(wm);
748 	u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
749 	u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
750 	u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
751 	u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
752 		(wm->num_heads * cursor_line_pair_return_time);
753 	u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
754 	u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
755 	u32 tmp, dmif_size = 12288;
756 	fixed20_12 a, b, c;
757 
758 	if (wm->num_heads == 0)
759 		return 0;
760 
761 	a.full = dfixed_const(2);
762 	b.full = dfixed_const(1);
763 	if ((wm->vsc.full > a.full) ||
764 	    ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
765 	    (wm->vtaps >= 5) ||
766 	    ((wm->vsc.full >= a.full) && wm->interlaced))
767 		max_src_lines_per_dst_line = 4;
768 	else
769 		max_src_lines_per_dst_line = 2;
770 
771 	a.full = dfixed_const(available_bandwidth);
772 	b.full = dfixed_const(wm->num_heads);
773 	a.full = dfixed_div(a, b);
774 	tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512);
775 	tmp = min(dfixed_trunc(a), tmp);
776 
777 	lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000);
778 
779 	a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
780 	b.full = dfixed_const(1000);
781 	c.full = dfixed_const(lb_fill_bw);
782 	b.full = dfixed_div(c, b);
783 	a.full = dfixed_div(a, b);
784 	line_fill_time = dfixed_trunc(a);
785 
786 	if (line_fill_time < wm->active_time)
787 		return latency;
788 	else
789 		return latency + (line_fill_time - wm->active_time);
790 
791 }
792 
793 /**
794  * dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display - check
795  * average and available dram bandwidth
796  *
797  * @wm: watermark calculation data
798  *
799  * Check if the display average bandwidth fits in the display
800  * dram bandwidth (CIK).
801  * Used for display watermark bandwidth calculations
802  * Returns true if the display fits, false if not.
803  */
804 static bool dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(struct dce6_wm_params *wm)
805 {
806 	if (dce_v6_0_average_bandwidth(wm) <=
807 	    (dce_v6_0_dram_bandwidth_for_display(wm) / wm->num_heads))
808 		return true;
809 	else
810 		return false;
811 }
812 
813 /**
814  * dce_v6_0_average_bandwidth_vs_available_bandwidth - check
815  * average and available bandwidth
816  *
817  * @wm: watermark calculation data
818  *
819  * Check if the display average bandwidth fits in the display
820  * available bandwidth (CIK).
821  * Used for display watermark bandwidth calculations
822  * Returns true if the display fits, false if not.
823  */
824 static bool dce_v6_0_average_bandwidth_vs_available_bandwidth(struct dce6_wm_params *wm)
825 {
826 	if (dce_v6_0_average_bandwidth(wm) <=
827 	    (dce_v6_0_available_bandwidth(wm) / wm->num_heads))
828 		return true;
829 	else
830 		return false;
831 }
832 
833 /**
834  * dce_v6_0_check_latency_hiding - check latency hiding
835  *
836  * @wm: watermark calculation data
837  *
838  * Check latency hiding (CIK).
839  * Used for display watermark bandwidth calculations
840  * Returns true if the display fits, false if not.
841  */
842 static bool dce_v6_0_check_latency_hiding(struct dce6_wm_params *wm)
843 {
844 	u32 lb_partitions = wm->lb_size / wm->src_width;
845 	u32 line_time = wm->active_time + wm->blank_time;
846 	u32 latency_tolerant_lines;
847 	u32 latency_hiding;
848 	fixed20_12 a;
849 
850 	a.full = dfixed_const(1);
851 	if (wm->vsc.full > a.full)
852 		latency_tolerant_lines = 1;
853 	else {
854 		if (lb_partitions <= (wm->vtaps + 1))
855 			latency_tolerant_lines = 1;
856 		else
857 			latency_tolerant_lines = 2;
858 	}
859 
860 	latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
861 
862 	if (dce_v6_0_latency_watermark(wm) <= latency_hiding)
863 		return true;
864 	else
865 		return false;
866 }
867 
868 /**
869  * dce_v6_0_program_watermarks - program display watermarks
870  *
871  * @adev: amdgpu_device pointer
872  * @amdgpu_crtc: the selected display controller
873  * @lb_size: line buffer size
874  * @num_heads: number of display controllers in use
875  *
876  * Calculate and program the display watermarks for the
877  * selected display controller (CIK).
878  */
879 static void dce_v6_0_program_watermarks(struct amdgpu_device *adev,
880 					struct amdgpu_crtc *amdgpu_crtc,
881 					u32 lb_size, u32 num_heads)
882 {
883 	struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
884 	struct dce6_wm_params wm_low, wm_high;
885 	u32 dram_channels;
886 	u32 active_time;
887 	u32 line_time = 0;
888 	u32 latency_watermark_a = 0, latency_watermark_b = 0;
889 	u32 priority_a_mark = 0, priority_b_mark = 0;
890 	u32 priority_a_cnt = PRIORITY_OFF;
891 	u32 priority_b_cnt = PRIORITY_OFF;
892 	u32 tmp, arb_control3, lb_vblank_lead_lines = 0;
893 	fixed20_12 a, b, c;
894 
895 	if (amdgpu_crtc->base.enabled && num_heads && mode) {
896 		active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
897 					    (u32)mode->clock);
898 		line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
899 					  (u32)mode->clock);
900 		line_time = min_t(u32, line_time, 65535);
901 		priority_a_cnt = 0;
902 		priority_b_cnt = 0;
903 
904 		dram_channels = si_get_number_of_dram_channels(adev);
905 
906 		/* watermark for high clocks */
907 		if (adev->pm.dpm_enabled) {
908 			wm_high.yclk =
909 				amdgpu_dpm_get_mclk(adev, false) * 10;
910 			wm_high.sclk =
911 				amdgpu_dpm_get_sclk(adev, false) * 10;
912 		} else {
913 			wm_high.yclk = adev->pm.current_mclk * 10;
914 			wm_high.sclk = adev->pm.current_sclk * 10;
915 		}
916 
917 		wm_high.disp_clk = mode->clock;
918 		wm_high.src_width = mode->crtc_hdisplay;
919 		wm_high.active_time = active_time;
920 		wm_high.blank_time = line_time - wm_high.active_time;
921 		wm_high.interlaced = false;
922 		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
923 			wm_high.interlaced = true;
924 		wm_high.vsc = amdgpu_crtc->vsc;
925 		wm_high.vtaps = 1;
926 		if (amdgpu_crtc->rmx_type != RMX_OFF)
927 			wm_high.vtaps = 2;
928 		wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
929 		wm_high.lb_size = lb_size;
930 		wm_high.dram_channels = dram_channels;
931 		wm_high.num_heads = num_heads;
932 
933 		/* watermark for low clocks */
934 		if (adev->pm.dpm_enabled) {
935 			wm_low.yclk =
936 				amdgpu_dpm_get_mclk(adev, true) * 10;
937 			wm_low.sclk =
938 				amdgpu_dpm_get_sclk(adev, true) * 10;
939 		} else {
940 			wm_low.yclk = adev->pm.current_mclk * 10;
941 			wm_low.sclk = adev->pm.current_sclk * 10;
942 		}
943 
944 		wm_low.disp_clk = mode->clock;
945 		wm_low.src_width = mode->crtc_hdisplay;
946 		wm_low.active_time = active_time;
947 		wm_low.blank_time = line_time - wm_low.active_time;
948 		wm_low.interlaced = false;
949 		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
950 			wm_low.interlaced = true;
951 		wm_low.vsc = amdgpu_crtc->vsc;
952 		wm_low.vtaps = 1;
953 		if (amdgpu_crtc->rmx_type != RMX_OFF)
954 			wm_low.vtaps = 2;
955 		wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
956 		wm_low.lb_size = lb_size;
957 		wm_low.dram_channels = dram_channels;
958 		wm_low.num_heads = num_heads;
959 
960 		/* set for high clocks */
961 		latency_watermark_a = min_t(u32, dce_v6_0_latency_watermark(&wm_high), 65535);
962 		/* set for low clocks */
963 		latency_watermark_b = min_t(u32, dce_v6_0_latency_watermark(&wm_low), 65535);
964 
965 		/* possibly force display priority to high */
966 		/* should really do this at mode validation time... */
967 		if (!dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
968 		    !dce_v6_0_average_bandwidth_vs_available_bandwidth(&wm_high) ||
969 		    !dce_v6_0_check_latency_hiding(&wm_high) ||
970 		    (adev->mode_info.disp_priority == 2)) {
971 			DRM_DEBUG_KMS("force priority to high\n");
972 			priority_a_cnt |= PRIORITY_ALWAYS_ON;
973 			priority_b_cnt |= PRIORITY_ALWAYS_ON;
974 		}
975 		if (!dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
976 		    !dce_v6_0_average_bandwidth_vs_available_bandwidth(&wm_low) ||
977 		    !dce_v6_0_check_latency_hiding(&wm_low) ||
978 		    (adev->mode_info.disp_priority == 2)) {
979 			DRM_DEBUG_KMS("force priority to high\n");
980 			priority_a_cnt |= PRIORITY_ALWAYS_ON;
981 			priority_b_cnt |= PRIORITY_ALWAYS_ON;
982 		}
983 
984 		a.full = dfixed_const(1000);
985 		b.full = dfixed_const(mode->clock);
986 		b.full = dfixed_div(b, a);
987 		c.full = dfixed_const(latency_watermark_a);
988 		c.full = dfixed_mul(c, b);
989 		c.full = dfixed_mul(c, amdgpu_crtc->hsc);
990 		c.full = dfixed_div(c, a);
991 		a.full = dfixed_const(16);
992 		c.full = dfixed_div(c, a);
993 		priority_a_mark = dfixed_trunc(c);
994 		priority_a_cnt |= priority_a_mark & PRIORITY_MARK_MASK;
995 
996 		a.full = dfixed_const(1000);
997 		b.full = dfixed_const(mode->clock);
998 		b.full = dfixed_div(b, a);
999 		c.full = dfixed_const(latency_watermark_b);
1000 		c.full = dfixed_mul(c, b);
1001 		c.full = dfixed_mul(c, amdgpu_crtc->hsc);
1002 		c.full = dfixed_div(c, a);
1003 		a.full = dfixed_const(16);
1004 		c.full = dfixed_div(c, a);
1005 		priority_b_mark = dfixed_trunc(c);
1006 		priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK;
1007 
1008 		lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
1009 	}
1010 
1011 	/* select wm A */
1012 	arb_control3 = RREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset);
1013 	tmp = arb_control3;
1014 	tmp &= ~(3 << DPG_PIPE_ARBITRATION_CONTROL3__URGENCY_WATERMARK_MASK__SHIFT);
1015 	tmp |= (1 << DPG_PIPE_ARBITRATION_CONTROL3__URGENCY_WATERMARK_MASK__SHIFT);
1016 	WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp);
1017 	WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
1018 	       ((latency_watermark_a << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT)  |
1019 		(line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT)));
1020 	/* select wm B */
1021 	tmp = RREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset);
1022 	tmp &= ~(3 << DPG_PIPE_ARBITRATION_CONTROL3__URGENCY_WATERMARK_MASK__SHIFT);
1023 	tmp |= (2 << DPG_PIPE_ARBITRATION_CONTROL3__URGENCY_WATERMARK_MASK__SHIFT);
1024 	WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp);
1025 	WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
1026 	       ((latency_watermark_b << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) |
1027 		(line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT)));
1028 	/* restore original selection */
1029 	WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, arb_control3);
1030 
1031 	/* write the priority marks */
1032 	WREG32(mmPRIORITY_A_CNT + amdgpu_crtc->crtc_offset, priority_a_cnt);
1033 	WREG32(mmPRIORITY_B_CNT + amdgpu_crtc->crtc_offset, priority_b_cnt);
1034 
1035 	/* save values for DPM */
1036 	amdgpu_crtc->line_time = line_time;
1037 
1038 	/* Save number of lines the linebuffer leads before the scanout */
1039 	amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
1040 }
1041 
1042 /* watermark setup */
1043 /**
1044  * dce_v6_0_line_buffer_adjust - Set up the line buffer
1045  *
1046  * @adev: amdgpu_device pointer
1047  * @amdgpu_crtc: the selected display controller
1048  * @mode: the current display mode on the selected display
1049  * controller
1050  * @other_mode: the display mode of another display controller
1051  *              that may be sharing the line buffer
1052  *
1053  * Setup up the line buffer allocation for
1054  * the selected display controller (CIK).
1055  * Returns the line buffer size in pixels.
1056  */
1057 static u32 dce_v6_0_line_buffer_adjust(struct amdgpu_device *adev,
1058 				   struct amdgpu_crtc *amdgpu_crtc,
1059 				   struct drm_display_mode *mode,
1060 				   struct drm_display_mode *other_mode)
1061 {
1062 	u32 tmp, buffer_alloc, i;
1063 	u32 pipe_offset = amdgpu_crtc->crtc_id * 0x8;
1064 	/*
1065 	 * Line Buffer Setup
1066 	 * There are 3 line buffers, each one shared by 2 display controllers.
1067 	 * mmDC_LB_MEMORY_SPLIT controls how that line buffer is shared between
1068 	 * the display controllers.  The paritioning is done via one of four
1069 	 * preset allocations specified in bits 21:20:
1070 	 *  0 - half lb
1071 	 *  2 - whole lb, other crtc must be disabled
1072 	 */
1073 	/* this can get tricky if we have two large displays on a paired group
1074 	 * of crtcs.  Ideally for multiple large displays we'd assign them to
1075 	 * non-linked crtcs for maximum line buffer allocation.
1076 	 */
1077 	if (amdgpu_crtc->base.enabled && mode) {
1078 		if (other_mode) {
1079 			tmp = 0; /* 1/2 */
1080 			buffer_alloc = 1;
1081 		} else {
1082 			tmp = 2; /* whole */
1083 			buffer_alloc = 2;
1084 		}
1085 	} else {
1086 		tmp = 0;
1087 		buffer_alloc = 0;
1088 	}
1089 
1090 	WREG32(mmDC_LB_MEMORY_SPLIT + amdgpu_crtc->crtc_offset,
1091 	       (tmp << DC_LB_MEMORY_SPLIT__DC_LB_MEMORY_CONFIG__SHIFT));
1092 
1093 	WREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
1094 	       (buffer_alloc << PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED__SHIFT));
1095 	for (i = 0; i < adev->usec_timeout; i++) {
1096 		if (RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
1097 		    PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED_MASK)
1098 			break;
1099 		udelay(1);
1100 	}
1101 
1102 	if (amdgpu_crtc->base.enabled && mode) {
1103 		switch (tmp) {
1104 		case 0:
1105 		default:
1106 			return 4096 * 2;
1107 		case 2:
1108 			return 8192 * 2;
1109 		}
1110 	}
1111 
1112 	/* controller not enabled, so no lb used */
1113 	return 0;
1114 }
1115 
1116 
1117 /**
1118  * dce_v6_0_bandwidth_update - program display watermarks
1119  *
1120  * @adev: amdgpu_device pointer
1121  *
1122  * Calculate and program the display watermarks and line
1123  * buffer allocation (CIK).
1124  */
1125 static void dce_v6_0_bandwidth_update(struct amdgpu_device *adev)
1126 {
1127 	struct drm_display_mode *mode0 = NULL;
1128 	struct drm_display_mode *mode1 = NULL;
1129 	u32 num_heads = 0, lb_size;
1130 	int i;
1131 
1132 	if (!adev->mode_info.mode_config_initialized)
1133 		return;
1134 
1135 	amdgpu_display_update_priority(adev);
1136 
1137 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
1138 		if (adev->mode_info.crtcs[i]->base.enabled)
1139 			num_heads++;
1140 	}
1141 	for (i = 0; i < adev->mode_info.num_crtc; i += 2) {
1142 		mode0 = &adev->mode_info.crtcs[i]->base.mode;
1143 		mode1 = &adev->mode_info.crtcs[i+1]->base.mode;
1144 		lb_size = dce_v6_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i], mode0, mode1);
1145 		dce_v6_0_program_watermarks(adev, adev->mode_info.crtcs[i], lb_size, num_heads);
1146 		lb_size = dce_v6_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i+1], mode1, mode0);
1147 		dce_v6_0_program_watermarks(adev, adev->mode_info.crtcs[i+1], lb_size, num_heads);
1148 	}
1149 }
1150 
1151 static void dce_v6_0_audio_get_connected_pins(struct amdgpu_device *adev)
1152 {
1153 	int i;
1154 	u32 tmp;
1155 
1156 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1157 		tmp = RREG32_AUDIO_ENDPT(adev->mode_info.audio.pin[i].offset,
1158 				ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT);
1159 		if (REG_GET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT,
1160 					PORT_CONNECTIVITY))
1161 			adev->mode_info.audio.pin[i].connected = false;
1162 		else
1163 			adev->mode_info.audio.pin[i].connected = true;
1164 	}
1165 
1166 }
1167 
1168 static struct amdgpu_audio_pin *dce_v6_0_audio_get_pin(struct amdgpu_device *adev)
1169 {
1170 	int i;
1171 
1172 	dce_v6_0_audio_get_connected_pins(adev);
1173 
1174 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1175 		if (adev->mode_info.audio.pin[i].connected)
1176 			return &adev->mode_info.audio.pin[i];
1177 	}
1178 	DRM_ERROR("No connected audio pins found!\n");
1179 	return NULL;
1180 }
1181 
1182 static void dce_v6_0_audio_select_pin(struct drm_encoder *encoder)
1183 {
1184 	struct amdgpu_device *adev = drm_to_adev(encoder->dev);
1185 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1186 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1187 
1188 	if (!dig || !dig->afmt || !dig->afmt->pin)
1189 		return;
1190 
1191 	WREG32(mmAFMT_AUDIO_SRC_CONTROL + dig->afmt->offset,
1192 	       REG_SET_FIELD(0, AFMT_AUDIO_SRC_CONTROL, AFMT_AUDIO_SRC_SELECT,
1193 		             dig->afmt->pin->id));
1194 }
1195 
1196 static void dce_v6_0_audio_write_latency_fields(struct drm_encoder *encoder,
1197 						struct drm_display_mode *mode)
1198 {
1199 	struct drm_device *dev = encoder->dev;
1200 	struct amdgpu_device *adev = drm_to_adev(dev);
1201 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1202 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1203 	struct drm_connector *connector;
1204 	struct drm_connector_list_iter iter;
1205 	struct amdgpu_connector *amdgpu_connector = NULL;
1206 	int interlace = 0;
1207 	u32 tmp;
1208 
1209 	drm_connector_list_iter_begin(dev, &iter);
1210 	drm_for_each_connector_iter(connector, &iter) {
1211 		if (connector->encoder == encoder) {
1212 			amdgpu_connector = to_amdgpu_connector(connector);
1213 			break;
1214 		}
1215 	}
1216 	drm_connector_list_iter_end(&iter);
1217 
1218 	if (!amdgpu_connector) {
1219 		DRM_ERROR("Couldn't find encoder's connector\n");
1220 		return;
1221 	}
1222 
1223 	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1224 		interlace = 1;
1225 
1226 	if (connector->latency_present[interlace]) {
1227 		tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
1228 				VIDEO_LIPSYNC, connector->video_latency[interlace]);
1229 		tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
1230 				AUDIO_LIPSYNC, connector->audio_latency[interlace]);
1231 	} else {
1232 		tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
1233 				VIDEO_LIPSYNC, 0);
1234 		tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
1235 				AUDIO_LIPSYNC, 0);
1236 	}
1237 	WREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
1238 			   ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
1239 }
1240 
1241 static void dce_v6_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
1242 {
1243 	struct drm_device *dev = encoder->dev;
1244 	struct amdgpu_device *adev = drm_to_adev(dev);
1245 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1246 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1247 	struct drm_connector *connector;
1248 	struct drm_connector_list_iter iter;
1249 	struct amdgpu_connector *amdgpu_connector = NULL;
1250 	u8 *sadb = NULL;
1251 	int sad_count;
1252 	u32 tmp;
1253 
1254 	drm_connector_list_iter_begin(dev, &iter);
1255 	drm_for_each_connector_iter(connector, &iter) {
1256 		if (connector->encoder == encoder) {
1257 			amdgpu_connector = to_amdgpu_connector(connector);
1258 			break;
1259 		}
1260 	}
1261 	drm_connector_list_iter_end(&iter);
1262 
1263 	if (!amdgpu_connector) {
1264 		DRM_ERROR("Couldn't find encoder's connector\n");
1265 		return;
1266 	}
1267 
1268 	sad_count = drm_edid_to_speaker_allocation(amdgpu_connector->edid, &sadb);
1269 	if (sad_count < 0) {
1270 		DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
1271 		sad_count = 0;
1272 	}
1273 
1274 	/* program the speaker allocation */
1275 	tmp = RREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
1276 			ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
1277 	tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1278 			HDMI_CONNECTION, 0);
1279 	tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1280 			DP_CONNECTION, 0);
1281 
1282 	if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort)
1283 		tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1284 				DP_CONNECTION, 1);
1285 	else
1286 		tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1287 				HDMI_CONNECTION, 1);
1288 
1289 	if (sad_count)
1290 		tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1291 				SPEAKER_ALLOCATION, sadb[0]);
1292 	else
1293 		tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1294 				SPEAKER_ALLOCATION, 5); /* stereo */
1295 
1296 	WREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
1297 			ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
1298 
1299 	kfree(sadb);
1300 }
1301 
1302 static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder)
1303 {
1304 	struct drm_device *dev = encoder->dev;
1305 	struct amdgpu_device *adev = drm_to_adev(dev);
1306 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1307 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1308 	u32 offset;
1309 	struct drm_connector *connector;
1310 	struct drm_connector_list_iter iter;
1311 	struct amdgpu_connector *amdgpu_connector = NULL;
1312 	struct cea_sad *sads;
1313 	int i, sad_count;
1314 
1315 	static const u16 eld_reg_to_type[][2] = {
1316 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM },
1317 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 },
1318 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 },
1319 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 },
1320 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 },
1321 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC },
1322 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS },
1323 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC },
1324 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 },
1325 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD },
1326 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP },
1327 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
1328 	};
1329 
1330 	if (!dig || !dig->afmt || !dig->afmt->pin)
1331 		return;
1332 
1333 	offset = dig->afmt->pin->offset;
1334 
1335 	drm_connector_list_iter_begin(dev, &iter);
1336 	drm_for_each_connector_iter(connector, &iter) {
1337 		if (connector->encoder == encoder) {
1338 			amdgpu_connector = to_amdgpu_connector(connector);
1339 			break;
1340 		}
1341 	}
1342 	drm_connector_list_iter_end(&iter);
1343 
1344 	if (!amdgpu_connector) {
1345 		DRM_ERROR("Couldn't find encoder's connector\n");
1346 		return;
1347 	}
1348 
1349 	sad_count = drm_edid_to_sad(amdgpu_connector->edid, &sads);
1350 	if (sad_count < 0)
1351 		DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
1352 	if (sad_count <= 0)
1353 		return;
1354 
1355 	for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
1356 		u32 value = 0;
1357 		u8 stereo_freqs = 0;
1358 		int max_channels = -1;
1359 		int j;
1360 
1361 		for (j = 0; j < sad_count; j++) {
1362 			struct cea_sad *sad = &sads[j];
1363 
1364 			if (sad->format == eld_reg_to_type[i][1]) {
1365 				if (sad->channels > max_channels) {
1366 					value = (sad->channels <<
1367 						AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT) |
1368 					       (sad->byte2 <<
1369 						AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT) |
1370 					       (sad->freq <<
1371 						AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT);
1372 					max_channels = sad->channels;
1373 				}
1374 
1375 				if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
1376 					stereo_freqs |= sad->freq;
1377 				else
1378 					break;
1379 			}
1380 		}
1381 
1382 		value |= (stereo_freqs <<
1383 			AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO__SHIFT);
1384 
1385 		WREG32_AUDIO_ENDPT(offset, eld_reg_to_type[i][0], value);
1386 	}
1387 
1388 	kfree(sads);
1389 }
1390 
1391 static void dce_v6_0_audio_enable(struct amdgpu_device *adev,
1392 				  struct amdgpu_audio_pin *pin,
1393 				  bool enable)
1394 {
1395 	if (!pin)
1396 		return;
1397 
1398 	WREG32_AUDIO_ENDPT(pin->offset, ixAZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
1399 			enable ? AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK : 0);
1400 }
1401 
1402 static const u32 pin_offsets[7] =
1403 {
1404 	AUD0_REGISTER_OFFSET,
1405 	AUD1_REGISTER_OFFSET,
1406 	AUD2_REGISTER_OFFSET,
1407 	AUD3_REGISTER_OFFSET,
1408 	AUD4_REGISTER_OFFSET,
1409 	AUD5_REGISTER_OFFSET,
1410 	AUD6_REGISTER_OFFSET,
1411 };
1412 
1413 static int dce_v6_0_audio_init(struct amdgpu_device *adev)
1414 {
1415 	int i;
1416 
1417 	if (!amdgpu_audio)
1418 		return 0;
1419 
1420 	adev->mode_info.audio.enabled = true;
1421 
1422 	switch (adev->asic_type) {
1423 	case CHIP_TAHITI:
1424 	case CHIP_PITCAIRN:
1425 	case CHIP_VERDE:
1426 	default:
1427 		adev->mode_info.audio.num_pins = 6;
1428 		break;
1429 	case CHIP_OLAND:
1430 		adev->mode_info.audio.num_pins = 2;
1431 		break;
1432 	}
1433 
1434 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1435 		adev->mode_info.audio.pin[i].channels = -1;
1436 		adev->mode_info.audio.pin[i].rate = -1;
1437 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
1438 		adev->mode_info.audio.pin[i].status_bits = 0;
1439 		adev->mode_info.audio.pin[i].category_code = 0;
1440 		adev->mode_info.audio.pin[i].connected = false;
1441 		adev->mode_info.audio.pin[i].offset = pin_offsets[i];
1442 		adev->mode_info.audio.pin[i].id = i;
1443 		/* disable audio.  it will be set up later */
1444 		/* XXX remove once we switch to ip funcs */
1445 		dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
1446 	}
1447 
1448 	return 0;
1449 }
1450 
1451 static void dce_v6_0_audio_fini(struct amdgpu_device *adev)
1452 {
1453 	if (!amdgpu_audio)
1454 		return;
1455 
1456 	if (!adev->mode_info.audio.enabled)
1457 		return;
1458 
1459 	adev->mode_info.audio.enabled = false;
1460 }
1461 
1462 static void dce_v6_0_audio_set_vbi_packet(struct drm_encoder *encoder)
1463 {
1464 	struct drm_device *dev = encoder->dev;
1465 	struct amdgpu_device *adev = drm_to_adev(dev);
1466 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1467 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1468 	u32 tmp;
1469 
1470 	tmp = RREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset);
1471 	tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, 1);
1472 	tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_SEND, 1);
1473 	tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_CONT, 1);
1474 	WREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset, tmp);
1475 }
1476 
1477 static void dce_v6_0_audio_set_acr(struct drm_encoder *encoder,
1478 				   uint32_t clock, int bpc)
1479 {
1480 	struct drm_device *dev = encoder->dev;
1481 	struct amdgpu_device *adev = drm_to_adev(dev);
1482 	struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock);
1483 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1484 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1485 	u32 tmp;
1486 
1487 	tmp = RREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset);
1488 	tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_AUTO_SEND, 1);
1489 	tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE,
1490 			bpc > 8 ? 0 : 1);
1491 	WREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset, tmp);
1492 
1493 	tmp = RREG32(mmHDMI_ACR_32_0 + dig->afmt->offset);
1494 	tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_0, HDMI_ACR_CTS_32, acr.cts_32khz);
1495 	WREG32(mmHDMI_ACR_32_0 + dig->afmt->offset, tmp);
1496 	tmp = RREG32(mmHDMI_ACR_32_1 + dig->afmt->offset);
1497 	tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_1, HDMI_ACR_N_32, acr.n_32khz);
1498 	WREG32(mmHDMI_ACR_32_1 + dig->afmt->offset, tmp);
1499 
1500 	tmp = RREG32(mmHDMI_ACR_44_0 + dig->afmt->offset);
1501 	tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_0, HDMI_ACR_CTS_44, acr.cts_44_1khz);
1502 	WREG32(mmHDMI_ACR_44_0 + dig->afmt->offset, tmp);
1503 	tmp = RREG32(mmHDMI_ACR_44_1 + dig->afmt->offset);
1504 	tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_1, HDMI_ACR_N_44, acr.n_44_1khz);
1505 	WREG32(mmHDMI_ACR_44_1 + dig->afmt->offset, tmp);
1506 
1507 	tmp = RREG32(mmHDMI_ACR_48_0 + dig->afmt->offset);
1508 	tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_0, HDMI_ACR_CTS_48, acr.cts_48khz);
1509 	WREG32(mmHDMI_ACR_48_0 + dig->afmt->offset, tmp);
1510 	tmp = RREG32(mmHDMI_ACR_48_1 + dig->afmt->offset);
1511 	tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_1, HDMI_ACR_N_48, acr.n_48khz);
1512 	WREG32(mmHDMI_ACR_48_1 + dig->afmt->offset, tmp);
1513 }
1514 
1515 static void dce_v6_0_audio_set_avi_infoframe(struct drm_encoder *encoder,
1516 					       struct drm_display_mode *mode)
1517 {
1518 	struct drm_device *dev = encoder->dev;
1519 	struct amdgpu_device *adev = drm_to_adev(dev);
1520 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1521 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1522 	struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
1523 	struct hdmi_avi_infoframe frame;
1524 	u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
1525 	uint8_t *payload = buffer + 3;
1526 	uint8_t *header = buffer;
1527 	ssize_t err;
1528 	u32 tmp;
1529 
1530 	err = drm_hdmi_avi_infoframe_from_display_mode(&frame, connector, mode);
1531 	if (err < 0) {
1532 		DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
1533 		return;
1534 	}
1535 
1536 	err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
1537 	if (err < 0) {
1538 		DRM_ERROR("failed to pack AVI infoframe: %zd\n", err);
1539 		return;
1540 	}
1541 
1542 	WREG32(mmAFMT_AVI_INFO0 + dig->afmt->offset,
1543 	       payload[0x0] | (payload[0x1] << 8) | (payload[0x2] << 16) | (payload[0x3] << 24));
1544 	WREG32(mmAFMT_AVI_INFO1 + dig->afmt->offset,
1545 	       payload[0x4] | (payload[0x5] << 8) | (payload[0x6] << 16) | (payload[0x7] << 24));
1546 	WREG32(mmAFMT_AVI_INFO2 + dig->afmt->offset,
1547 	       payload[0x8] | (payload[0x9] << 8) | (payload[0xA] << 16) | (payload[0xB] << 24));
1548 	WREG32(mmAFMT_AVI_INFO3 + dig->afmt->offset,
1549 	       payload[0xC] | (payload[0xD] << 8) | (header[1] << 24));
1550 
1551 	tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset);
1552 	/* anything other than 0 */
1553 	tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1,
1554 			HDMI_AUDIO_INFO_LINE, 2);
1555 	WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp);
1556 }
1557 
1558 static void dce_v6_0_audio_set_dto(struct drm_encoder *encoder, u32 clock)
1559 {
1560 	struct drm_device *dev = encoder->dev;
1561 	struct amdgpu_device *adev = drm_to_adev(dev);
1562 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
1563 	int em = amdgpu_atombios_encoder_get_encoder_mode(encoder);
1564 	u32 tmp;
1565 
1566 	/*
1567 	 * Two dtos: generally use dto0 for hdmi, dto1 for dp.
1568 	 * Express [24MHz / target pixel clock] as an exact rational
1569 	 * number (coefficient of two integer numbers.  DCCG_AUDIO_DTOx_PHASE
1570 	 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
1571 	 */
1572 	tmp = RREG32(mmDCCG_AUDIO_DTO_SOURCE);
1573 	tmp = REG_SET_FIELD(tmp, DCCG_AUDIO_DTO_SOURCE,
1574 			DCCG_AUDIO_DTO0_SOURCE_SEL, amdgpu_crtc->crtc_id);
1575 	if (em == ATOM_ENCODER_MODE_HDMI) {
1576 		tmp = REG_SET_FIELD(tmp, DCCG_AUDIO_DTO_SOURCE,
1577 				DCCG_AUDIO_DTO_SEL, 0);
1578 	} else if (ENCODER_MODE_IS_DP(em)) {
1579 		tmp = REG_SET_FIELD(tmp, DCCG_AUDIO_DTO_SOURCE,
1580 				DCCG_AUDIO_DTO_SEL, 1);
1581 	}
1582 	WREG32(mmDCCG_AUDIO_DTO_SOURCE, tmp);
1583 	if (em == ATOM_ENCODER_MODE_HDMI) {
1584 		WREG32(mmDCCG_AUDIO_DTO0_PHASE, 24000);
1585 		WREG32(mmDCCG_AUDIO_DTO0_MODULE, clock);
1586 	} else if (ENCODER_MODE_IS_DP(em)) {
1587 		WREG32(mmDCCG_AUDIO_DTO1_PHASE, 24000);
1588 		WREG32(mmDCCG_AUDIO_DTO1_MODULE, clock);
1589 	}
1590 }
1591 
1592 static void dce_v6_0_audio_set_packet(struct drm_encoder *encoder)
1593 {
1594 	struct drm_device *dev = encoder->dev;
1595 	struct amdgpu_device *adev = drm_to_adev(dev);
1596 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1597 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1598 	u32 tmp;
1599 
1600 	tmp = RREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset);
1601 	tmp = REG_SET_FIELD(tmp, AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, 1);
1602 	WREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
1603 
1604 	tmp = RREG32(mmAFMT_60958_0 + dig->afmt->offset);
1605 	tmp = REG_SET_FIELD(tmp, AFMT_60958_0, AFMT_60958_CS_CHANNEL_NUMBER_L, 1);
1606 	WREG32(mmAFMT_60958_0 + dig->afmt->offset, tmp);
1607 
1608 	tmp = RREG32(mmAFMT_60958_1 + dig->afmt->offset);
1609 	tmp = REG_SET_FIELD(tmp, AFMT_60958_1, AFMT_60958_CS_CHANNEL_NUMBER_R, 2);
1610 	WREG32(mmAFMT_60958_1 + dig->afmt->offset, tmp);
1611 
1612 	tmp = RREG32(mmAFMT_60958_2 + dig->afmt->offset);
1613 	tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_2, 3);
1614 	tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_3, 4);
1615 	tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_4, 5);
1616 	tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_5, 6);
1617 	tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_6, 7);
1618 	tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_7, 8);
1619 	WREG32(mmAFMT_60958_2 + dig->afmt->offset, tmp);
1620 
1621 	tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL2 + dig->afmt->offset);
1622 	tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL2, AFMT_AUDIO_CHANNEL_ENABLE, 0xff);
1623 	WREG32(mmAFMT_AUDIO_PACKET_CONTROL2 + dig->afmt->offset, tmp);
1624 
1625 	tmp = RREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset);
1626 	tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_DELAY_EN, 1);
1627 	tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_PACKETS_PER_LINE, 3);
1628 	WREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
1629 
1630 	tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
1631 	tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_RESET_FIFO_WHEN_AUDIO_DIS, 1);
1632 	tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, 1);
1633 	WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
1634 }
1635 
1636 static void dce_v6_0_audio_set_mute(struct drm_encoder *encoder, bool mute)
1637 {
1638 	struct drm_device *dev = encoder->dev;
1639 	struct amdgpu_device *adev = drm_to_adev(dev);
1640 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1641 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1642 	u32 tmp;
1643 
1644 	tmp = RREG32(mmHDMI_GC + dig->afmt->offset);
1645 	tmp = REG_SET_FIELD(tmp, HDMI_GC, HDMI_GC_AVMUTE, mute ? 1 : 0);
1646 	WREG32(mmHDMI_GC + dig->afmt->offset, tmp);
1647 }
1648 
1649 static void dce_v6_0_audio_hdmi_enable(struct drm_encoder *encoder, bool enable)
1650 {
1651 	struct drm_device *dev = encoder->dev;
1652 	struct amdgpu_device *adev = drm_to_adev(dev);
1653 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1654 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1655 	u32 tmp;
1656 
1657 	if (enable) {
1658 		tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset);
1659 		tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_SEND, 1);
1660 		tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_CONT, 1);
1661 		tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, 1);
1662 		tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_CONT, 1);
1663 		WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
1664 
1665 		tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset);
1666 		tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1, HDMI_AVI_INFO_LINE, 2);
1667 		WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp);
1668 
1669 		tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
1670 		tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, 1);
1671 		WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
1672 	} else {
1673 		tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset);
1674 		tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_SEND, 0);
1675 		tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_CONT, 0);
1676 		tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, 0);
1677 		tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_CONT, 0);
1678 		WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
1679 
1680 		tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
1681 		tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, 0);
1682 		WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
1683 	}
1684 }
1685 
1686 static void dce_v6_0_audio_dp_enable(struct drm_encoder *encoder, bool enable)
1687 {
1688 	struct drm_device *dev = encoder->dev;
1689 	struct amdgpu_device *adev = drm_to_adev(dev);
1690 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1691 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1692 	u32 tmp;
1693 
1694 	if (enable) {
1695 		tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
1696 		tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, 1);
1697 		WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
1698 
1699 		tmp = RREG32(mmDP_SEC_TIMESTAMP + dig->afmt->offset);
1700 		tmp = REG_SET_FIELD(tmp, DP_SEC_TIMESTAMP, DP_SEC_TIMESTAMP_MODE, 1);
1701 		WREG32(mmDP_SEC_TIMESTAMP + dig->afmt->offset, tmp);
1702 
1703 		tmp = RREG32(mmDP_SEC_CNTL + dig->afmt->offset);
1704 		tmp = REG_SET_FIELD(tmp, DP_SEC_CNTL, DP_SEC_ASP_ENABLE, 1);
1705 		tmp = REG_SET_FIELD(tmp, DP_SEC_CNTL, DP_SEC_ATP_ENABLE, 1);
1706 		tmp = REG_SET_FIELD(tmp, DP_SEC_CNTL, DP_SEC_AIP_ENABLE, 1);
1707 		tmp = REG_SET_FIELD(tmp, DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1);
1708 		WREG32(mmDP_SEC_CNTL + dig->afmt->offset, tmp);
1709 	} else {
1710 		WREG32(mmDP_SEC_CNTL + dig->afmt->offset, 0);
1711 	}
1712 }
1713 
1714 static void dce_v6_0_afmt_setmode(struct drm_encoder *encoder,
1715 				  struct drm_display_mode *mode)
1716 {
1717 	struct drm_device *dev = encoder->dev;
1718 	struct amdgpu_device *adev = drm_to_adev(dev);
1719 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1720 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1721 	struct drm_connector *connector;
1722 	struct drm_connector_list_iter iter;
1723 	struct amdgpu_connector *amdgpu_connector = NULL;
1724 	int em = amdgpu_atombios_encoder_get_encoder_mode(encoder);
1725 	int bpc = 8;
1726 
1727 	if (!dig || !dig->afmt)
1728 		return;
1729 
1730 	drm_connector_list_iter_begin(dev, &iter);
1731 	drm_for_each_connector_iter(connector, &iter) {
1732 		if (connector->encoder == encoder) {
1733 			amdgpu_connector = to_amdgpu_connector(connector);
1734 			break;
1735 		}
1736 	}
1737 	drm_connector_list_iter_end(&iter);
1738 
1739 	if (!amdgpu_connector) {
1740 		DRM_ERROR("Couldn't find encoder's connector\n");
1741 		return;
1742 	}
1743 
1744 	if (!dig->afmt->enabled)
1745 		return;
1746 
1747 	dig->afmt->pin = dce_v6_0_audio_get_pin(adev);
1748 	if (!dig->afmt->pin)
1749 		return;
1750 
1751 	if (encoder->crtc) {
1752 		struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
1753 		bpc = amdgpu_crtc->bpc;
1754 	}
1755 
1756 	/* disable audio before setting up hw */
1757 	dce_v6_0_audio_enable(adev, dig->afmt->pin, false);
1758 
1759 	dce_v6_0_audio_set_mute(encoder, true);
1760 	dce_v6_0_audio_write_speaker_allocation(encoder);
1761 	dce_v6_0_audio_write_sad_regs(encoder);
1762 	dce_v6_0_audio_write_latency_fields(encoder, mode);
1763 	if (em == ATOM_ENCODER_MODE_HDMI) {
1764 		dce_v6_0_audio_set_dto(encoder, mode->clock);
1765 		dce_v6_0_audio_set_vbi_packet(encoder);
1766 		dce_v6_0_audio_set_acr(encoder, mode->clock, bpc);
1767 	} else if (ENCODER_MODE_IS_DP(em)) {
1768 		dce_v6_0_audio_set_dto(encoder, adev->clock.default_dispclk * 10);
1769 	}
1770 	dce_v6_0_audio_set_packet(encoder);
1771 	dce_v6_0_audio_select_pin(encoder);
1772 	dce_v6_0_audio_set_avi_infoframe(encoder, mode);
1773 	dce_v6_0_audio_set_mute(encoder, false);
1774 	if (em == ATOM_ENCODER_MODE_HDMI) {
1775 		dce_v6_0_audio_hdmi_enable(encoder, 1);
1776 	} else if (ENCODER_MODE_IS_DP(em)) {
1777 		dce_v6_0_audio_dp_enable(encoder, 1);
1778 	}
1779 
1780 	/* enable audio after setting up hw */
1781 	dce_v6_0_audio_enable(adev, dig->afmt->pin, true);
1782 }
1783 
1784 static void dce_v6_0_afmt_enable(struct drm_encoder *encoder, bool enable)
1785 {
1786 	struct drm_device *dev = encoder->dev;
1787 	struct amdgpu_device *adev = drm_to_adev(dev);
1788 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1789 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1790 
1791 	if (!dig || !dig->afmt)
1792 		return;
1793 
1794 	/* Silent, r600_hdmi_enable will raise WARN for us */
1795 	if (enable && dig->afmt->enabled)
1796 		return;
1797 
1798 	if (!enable && !dig->afmt->enabled)
1799 		return;
1800 
1801 	if (!enable && dig->afmt->pin) {
1802 		dce_v6_0_audio_enable(adev, dig->afmt->pin, false);
1803 		dig->afmt->pin = NULL;
1804 	}
1805 
1806 	dig->afmt->enabled = enable;
1807 
1808 	DRM_DEBUG("%sabling AFMT interface @ 0x%04X for encoder 0x%x\n",
1809 		  enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id);
1810 }
1811 
1812 static int dce_v6_0_afmt_init(struct amdgpu_device *adev)
1813 {
1814 	int i, j;
1815 
1816 	for (i = 0; i < adev->mode_info.num_dig; i++)
1817 		adev->mode_info.afmt[i] = NULL;
1818 
1819 	/* DCE6 has audio blocks tied to DIG encoders */
1820 	for (i = 0; i < adev->mode_info.num_dig; i++) {
1821 		adev->mode_info.afmt[i] = kzalloc_obj(struct amdgpu_afmt,
1822 						      GFP_KERNEL);
1823 		if (adev->mode_info.afmt[i]) {
1824 			adev->mode_info.afmt[i]->offset = dig_offsets[i];
1825 			adev->mode_info.afmt[i]->id = i;
1826 		} else {
1827 			for (j = 0; j < i; j++) {
1828 				kfree(adev->mode_info.afmt[j]);
1829 				adev->mode_info.afmt[j] = NULL;
1830 			}
1831 			DRM_ERROR("Out of memory allocating afmt table\n");
1832 			return -ENOMEM;
1833 		}
1834 	}
1835 	return 0;
1836 }
1837 
1838 static void dce_v6_0_afmt_fini(struct amdgpu_device *adev)
1839 {
1840 	int i;
1841 
1842 	for (i = 0; i < adev->mode_info.num_dig; i++) {
1843 		kfree(adev->mode_info.afmt[i]);
1844 		adev->mode_info.afmt[i] = NULL;
1845 	}
1846 }
1847 
1848 static const u32 vga_control_regs[6] =
1849 {
1850 	mmD1VGA_CONTROL,
1851 	mmD2VGA_CONTROL,
1852 	mmD3VGA_CONTROL,
1853 	mmD4VGA_CONTROL,
1854 	mmD5VGA_CONTROL,
1855 	mmD6VGA_CONTROL,
1856 };
1857 
1858 static void dce_v6_0_vga_enable(struct drm_crtc *crtc, bool enable)
1859 {
1860 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1861 	struct drm_device *dev = crtc->dev;
1862 	struct amdgpu_device *adev = drm_to_adev(dev);
1863 	u32 vga_control;
1864 
1865 	vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1;
1866 	WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control | (enable ? 1 : 0));
1867 }
1868 
1869 static void dce_v6_0_grph_enable(struct drm_crtc *crtc, bool enable)
1870 {
1871 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1872 	struct drm_device *dev = crtc->dev;
1873 	struct amdgpu_device *adev = drm_to_adev(dev);
1874 
1875 	WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, enable ? 1 : 0);
1876 }
1877 
1878 static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
1879 				     struct drm_framebuffer *fb,
1880 				     int x, int y)
1881 {
1882 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1883 	struct drm_device *dev = crtc->dev;
1884 	struct amdgpu_device *adev = drm_to_adev(dev);
1885 	struct drm_framebuffer *target_fb;
1886 	struct drm_gem_object *obj;
1887 	struct amdgpu_bo *abo;
1888 	uint64_t fb_location, tiling_flags;
1889 	uint32_t fb_format, fb_pitch_pixels, pipe_config;
1890 	u32 fb_swap = (GRPH_ENDIAN_NONE << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1891 	u32 viewport_w, viewport_h;
1892 	int r;
1893 	bool bypass_lut = false;
1894 
1895 	/* no fb bound */
1896 	if (!crtc->primary->fb) {
1897 		DRM_DEBUG_KMS("No FB bound\n");
1898 		return 0;
1899 	}
1900 
1901 	target_fb = crtc->primary->fb;
1902 
1903 	/* If atomic, assume fb object is pinned & idle & fenced and
1904 	 * just update base pointers
1905 	 */
1906 	obj = target_fb->obj[0];
1907 	abo = gem_to_amdgpu_bo(obj);
1908 	r = amdgpu_bo_reserve(abo, false);
1909 	if (unlikely(r != 0))
1910 		return r;
1911 
1912 	abo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
1913 	r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM);
1914 	if (unlikely(r != 0)) {
1915 		amdgpu_bo_unreserve(abo);
1916 		return -EINVAL;
1917 	}
1918 	fb_location = amdgpu_bo_gpu_offset(abo);
1919 
1920 	amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
1921 	amdgpu_bo_unreserve(abo);
1922 
1923 	switch (target_fb->format->format) {
1924 	case DRM_FORMAT_C8:
1925 		fb_format = ((GRPH_DEPTH_8BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1926 			     (GRPH_FORMAT_INDEXED << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1927 		break;
1928 	case DRM_FORMAT_XRGB4444:
1929 	case DRM_FORMAT_ARGB4444:
1930 		fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1931 			     (GRPH_FORMAT_ARGB4444 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1932 #ifdef __BIG_ENDIAN
1933 		fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1934 #endif
1935 		break;
1936 	case DRM_FORMAT_XRGB1555:
1937 	case DRM_FORMAT_ARGB1555:
1938 		fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1939 			     (GRPH_FORMAT_ARGB1555 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1940 #ifdef __BIG_ENDIAN
1941 		fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1942 #endif
1943 		break;
1944 	case DRM_FORMAT_BGRX5551:
1945 	case DRM_FORMAT_BGRA5551:
1946 		fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1947 			     (GRPH_FORMAT_BGRA5551 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1948 #ifdef __BIG_ENDIAN
1949 		fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1950 #endif
1951 		break;
1952 	case DRM_FORMAT_RGB565:
1953 		fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1954 			     (GRPH_FORMAT_ARGB565 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1955 #ifdef __BIG_ENDIAN
1956 		fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1957 #endif
1958 		break;
1959 	case DRM_FORMAT_XRGB8888:
1960 	case DRM_FORMAT_ARGB8888:
1961 		fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1962 			     (GRPH_FORMAT_ARGB8888 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1963 #ifdef __BIG_ENDIAN
1964 		fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1965 #endif
1966 		break;
1967 	case DRM_FORMAT_XRGB2101010:
1968 	case DRM_FORMAT_ARGB2101010:
1969 		fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1970 			     (GRPH_FORMAT_ARGB2101010 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1971 #ifdef __BIG_ENDIAN
1972 		fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1973 #endif
1974 		/* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
1975 		bypass_lut = true;
1976 		break;
1977 	case DRM_FORMAT_BGRX1010102:
1978 	case DRM_FORMAT_BGRA1010102:
1979 		fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1980 			     (GRPH_FORMAT_BGRA1010102 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1981 #ifdef __BIG_ENDIAN
1982 		fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1983 #endif
1984 		/* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
1985 		bypass_lut = true;
1986 		break;
1987 	case DRM_FORMAT_XBGR8888:
1988 	case DRM_FORMAT_ABGR8888:
1989 		fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1990 			     (GRPH_FORMAT_ARGB8888 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1991 		fb_swap = ((GRPH_RED_SEL_B << GRPH_SWAP_CNTL__GRPH_RED_CROSSBAR__SHIFT) |
1992 			   (GRPH_BLUE_SEL_R << GRPH_SWAP_CNTL__GRPH_BLUE_CROSSBAR__SHIFT));
1993 #ifdef __BIG_ENDIAN
1994 		fb_swap |= (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1995 #endif
1996 		break;
1997 	default:
1998 		DRM_ERROR("Unsupported screen format %p4cc\n",
1999 			  &target_fb->format->format);
2000 		return -EINVAL;
2001 	}
2002 
2003 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_2D_TILED_THIN1) {
2004 		unsigned bankw, bankh, mtaspect, tile_split, num_banks;
2005 
2006 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
2007 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
2008 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
2009 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
2010 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
2011 
2012 		fb_format |= (num_banks << GRPH_CONTROL__GRPH_NUM_BANKS__SHIFT);
2013 		fb_format |= (GRPH_ARRAY_2D_TILED_THIN1 << GRPH_CONTROL__GRPH_ARRAY_MODE__SHIFT);
2014 		fb_format |= (tile_split << GRPH_CONTROL__GRPH_TILE_SPLIT__SHIFT);
2015 		fb_format |= (bankw << GRPH_CONTROL__GRPH_BANK_WIDTH__SHIFT);
2016 		fb_format |= (bankh << GRPH_CONTROL__GRPH_BANK_HEIGHT__SHIFT);
2017 		fb_format |= (mtaspect << GRPH_CONTROL__GRPH_MACRO_TILE_ASPECT__SHIFT);
2018 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1) {
2019 		fb_format |= (GRPH_ARRAY_1D_TILED_THIN1 << GRPH_CONTROL__GRPH_ARRAY_MODE__SHIFT);
2020 	}
2021 
2022 	pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
2023 	fb_format |= (pipe_config << GRPH_CONTROL__GRPH_PIPE_CONFIG__SHIFT);
2024 
2025 	dce_v6_0_vga_enable(crtc, false);
2026 
2027 	/* Make sure surface address is updated at vertical blank rather than
2028 	 * horizontal blank
2029 	 */
2030 	WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, 0);
2031 
2032 	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
2033 	       upper_32_bits(fb_location));
2034 	WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
2035 	       upper_32_bits(fb_location));
2036 	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
2037 	       (u32)fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK);
2038 	WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
2039 	       (u32) fb_location & GRPH_SECONDARY_SURFACE_ADDRESS__GRPH_SECONDARY_SURFACE_ADDRESS_MASK);
2040 	WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
2041 	WREG32(mmGRPH_SWAP_CNTL + amdgpu_crtc->crtc_offset, fb_swap);
2042 
2043 	/*
2044 	 * The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT
2045 	 * for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to
2046 	 * retain the full precision throughout the pipeline.
2047 	 */
2048 	WREG32_P(mmGRPH_LUT_10BIT_BYPASS + amdgpu_crtc->crtc_offset,
2049 		 (bypass_lut ? GRPH_LUT_10BIT_BYPASS__GRPH_LUT_10BIT_BYPASS_EN_MASK : 0),
2050 		 ~GRPH_LUT_10BIT_BYPASS__GRPH_LUT_10BIT_BYPASS_EN_MASK);
2051 
2052 	if (bypass_lut)
2053 		DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n");
2054 
2055 	WREG32(mmGRPH_SURFACE_OFFSET_X + amdgpu_crtc->crtc_offset, 0);
2056 	WREG32(mmGRPH_SURFACE_OFFSET_Y + amdgpu_crtc->crtc_offset, 0);
2057 	WREG32(mmGRPH_X_START + amdgpu_crtc->crtc_offset, 0);
2058 	WREG32(mmGRPH_Y_START + amdgpu_crtc->crtc_offset, 0);
2059 	WREG32(mmGRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width);
2060 	WREG32(mmGRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height);
2061 
2062 	fb_pitch_pixels = target_fb->pitches[0] / target_fb->format->cpp[0];
2063 	WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels);
2064 
2065 	dce_v6_0_grph_enable(crtc, true);
2066 
2067 	WREG32(mmDESKTOP_HEIGHT + amdgpu_crtc->crtc_offset,
2068 		       target_fb->height);
2069 	x &= ~3;
2070 	y &= ~1;
2071 	WREG32(mmVIEWPORT_START + amdgpu_crtc->crtc_offset,
2072 	       (x << 16) | y);
2073 	viewport_w = crtc->mode.hdisplay;
2074 	viewport_h = (crtc->mode.vdisplay + 1) & ~1;
2075 
2076 	WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
2077 	       (viewport_w << 16) | viewport_h);
2078 
2079 	/* set pageflip to happen anywhere in vblank interval */
2080 	WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
2081 
2082 	if (fb && fb != crtc->primary->fb) {
2083 		abo = gem_to_amdgpu_bo(fb->obj[0]);
2084 		r = amdgpu_bo_reserve(abo, true);
2085 		if (unlikely(r != 0))
2086 			return r;
2087 		amdgpu_bo_unpin(abo);
2088 		amdgpu_bo_unreserve(abo);
2089 	}
2090 
2091 	/* Bytes per pixel may have changed */
2092 	dce_v6_0_bandwidth_update(adev);
2093 
2094 	return 0;
2095 
2096 }
2097 
2098 static void dce_v6_0_set_interleave(struct drm_crtc *crtc,
2099 				    struct drm_display_mode *mode)
2100 {
2101 	struct drm_device *dev = crtc->dev;
2102 	struct amdgpu_device *adev = drm_to_adev(dev);
2103 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2104 
2105 	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
2106 		WREG32(mmDATA_FORMAT + amdgpu_crtc->crtc_offset,
2107 			DATA_FORMAT__INTERLEAVE_EN_MASK);
2108 	else
2109 		WREG32(mmDATA_FORMAT + amdgpu_crtc->crtc_offset, 0);
2110 }
2111 
2112 static void dce_v6_0_crtc_load_lut(struct drm_crtc *crtc)
2113 {
2114 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2115 	struct drm_device *dev = crtc->dev;
2116 	struct amdgpu_device *adev = drm_to_adev(dev);
2117 	u16 *r, *g, *b;
2118 	int i;
2119 
2120 	DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id);
2121 
2122 	WREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
2123 	       ((INPUT_CSC_BYPASS << INPUT_CSC_CONTROL__INPUT_CSC_GRPH_MODE__SHIFT) |
2124 		(INPUT_CSC_BYPASS << INPUT_CSC_CONTROL__INPUT_CSC_OVL_MODE__SHIFT)));
2125 	WREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset,
2126 	       PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_BYPASS_MASK);
2127 	WREG32(mmPRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset,
2128 	       PRESCALE_OVL_CONTROL__OVL_PRESCALE_BYPASS_MASK);
2129 	WREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset,
2130 	       ((INPUT_GAMMA_USE_LUT << INPUT_GAMMA_CONTROL__GRPH_INPUT_GAMMA_MODE__SHIFT) |
2131 		(INPUT_GAMMA_USE_LUT << INPUT_GAMMA_CONTROL__OVL_INPUT_GAMMA_MODE__SHIFT)));
2132 
2133 	WREG32(mmDC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0);
2134 
2135 	WREG32(mmDC_LUT_BLACK_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0);
2136 	WREG32(mmDC_LUT_BLACK_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0);
2137 	WREG32(mmDC_LUT_BLACK_OFFSET_RED + amdgpu_crtc->crtc_offset, 0);
2138 
2139 	WREG32(mmDC_LUT_WHITE_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0xffff);
2140 	WREG32(mmDC_LUT_WHITE_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0xffff);
2141 	WREG32(mmDC_LUT_WHITE_OFFSET_RED + amdgpu_crtc->crtc_offset, 0xffff);
2142 
2143 	WREG32(mmDC_LUT_RW_MODE + amdgpu_crtc->crtc_offset, 0);
2144 	WREG32(mmDC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007);
2145 
2146 	WREG32(mmDC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0);
2147 	r = crtc->gamma_store;
2148 	g = r + crtc->gamma_size;
2149 	b = g + crtc->gamma_size;
2150 	for (i = 0; i < 256; i++) {
2151 		WREG32(mmDC_LUT_30_COLOR + amdgpu_crtc->crtc_offset,
2152 		       ((*r++ & 0xffc0) << 14) |
2153 		       ((*g++ & 0xffc0) << 4) |
2154 		       (*b++ >> 6));
2155 	}
2156 
2157 	WREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
2158 	       ((DEGAMMA_BYPASS << DEGAMMA_CONTROL__GRPH_DEGAMMA_MODE__SHIFT) |
2159 		(DEGAMMA_BYPASS << DEGAMMA_CONTROL__OVL_DEGAMMA_MODE__SHIFT) |
2160 		(DEGAMMA_BYPASS << DEGAMMA_CONTROL__ICON_DEGAMMA_MODE__SHIFT) |
2161 		(DEGAMMA_BYPASS << DEGAMMA_CONTROL__CURSOR_DEGAMMA_MODE__SHIFT)));
2162 	WREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset,
2163 	       ((GAMUT_REMAP_BYPASS << GAMUT_REMAP_CONTROL__GRPH_GAMUT_REMAP_MODE__SHIFT) |
2164 		(GAMUT_REMAP_BYPASS << GAMUT_REMAP_CONTROL__OVL_GAMUT_REMAP_MODE__SHIFT)));
2165 	WREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
2166 	       ((REGAMMA_BYPASS << REGAMMA_CONTROL__GRPH_REGAMMA_MODE__SHIFT) |
2167 		(REGAMMA_BYPASS << REGAMMA_CONTROL__OVL_REGAMMA_MODE__SHIFT)));
2168 	WREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
2169 	       ((OUTPUT_CSC_BYPASS << OUTPUT_CSC_CONTROL__OUTPUT_CSC_GRPH_MODE__SHIFT) |
2170 		(OUTPUT_CSC_BYPASS << OUTPUT_CSC_CONTROL__OUTPUT_CSC_OVL_MODE__SHIFT)));
2171 	/* XXX match this to the depth of the crtc fmt block, move to modeset? */
2172 	WREG32(0x1a50 + amdgpu_crtc->crtc_offset, 0);
2173 
2174 
2175 }
2176 
2177 static int dce_v6_0_pick_dig_encoder(struct drm_encoder *encoder)
2178 {
2179 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
2180 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
2181 
2182 	switch (amdgpu_encoder->encoder_id) {
2183 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
2184 		return dig->linkb ? 1 : 0;
2185 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
2186 		return dig->linkb ? 3 : 2;
2187 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
2188 		return dig->linkb ? 5 : 4;
2189 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
2190 		return 6;
2191 	default:
2192 		DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id);
2193 		return 0;
2194 	}
2195 }
2196 
2197 /**
2198  * dce_v6_0_pick_pll - Allocate a PPLL for use by the crtc.
2199  *
2200  * @crtc: drm crtc
2201  *
2202  * Returns the PPLL (Pixel PLL) to be used by the crtc.  For DP monitors
2203  * a single PPLL can be used for all DP crtcs/encoders.  For non-DP
2204  * monitors a dedicated PPLL must be used.  If a particular board has
2205  * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming
2206  * as there is no need to program the PLL itself.  If we are not able to
2207  * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to
2208  * avoid messing up an existing monitor.
2209  *
2210  *
2211  */
2212 static u32 dce_v6_0_pick_pll(struct drm_crtc *crtc)
2213 {
2214 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2215 	struct drm_device *dev = crtc->dev;
2216 	struct amdgpu_device *adev = drm_to_adev(dev);
2217 	u32 pll_in_use;
2218 	int pll;
2219 
2220 	if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) {
2221 		if (adev->clock.dp_extclk)
2222 			/* skip PPLL programming if using ext clock */
2223 			return ATOM_PPLL_INVALID;
2224 		else
2225 			return ATOM_PPLL0;
2226 	} else {
2227 		/* use the same PPLL for all monitors with the same clock */
2228 		pll = amdgpu_pll_get_shared_nondp_ppll(crtc);
2229 		if (pll != ATOM_PPLL_INVALID)
2230 			return pll;
2231 	}
2232 
2233 	/*  PPLL1, and PPLL2 */
2234 	pll_in_use = amdgpu_pll_get_use_mask(crtc);
2235 	if (!(pll_in_use & (1 << ATOM_PPLL2)))
2236 		return ATOM_PPLL2;
2237 	if (!(pll_in_use & (1 << ATOM_PPLL1)))
2238 		return ATOM_PPLL1;
2239 	DRM_ERROR("unable to allocate a PPLL\n");
2240 	return ATOM_PPLL_INVALID;
2241 }
2242 
2243 static void dce_v6_0_lock_cursor(struct drm_crtc *crtc, bool lock)
2244 {
2245 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
2246 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2247 	uint32_t cur_lock;
2248 
2249 	cur_lock = RREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset);
2250 	if (lock)
2251 		cur_lock |= CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK;
2252 	else
2253 		cur_lock &= ~CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK;
2254 	WREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset, cur_lock);
2255 }
2256 
2257 static void dce_v6_0_hide_cursor(struct drm_crtc *crtc)
2258 {
2259 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2260 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
2261 
2262 	WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
2263 	       (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
2264 	       (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
2265 }
2266 
2267 static void dce_v6_0_show_cursor(struct drm_crtc *crtc)
2268 {
2269 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2270 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
2271 
2272 	WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
2273 	       upper_32_bits(amdgpu_crtc->cursor_addr));
2274 	WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
2275 	       lower_32_bits(amdgpu_crtc->cursor_addr));
2276 
2277 	WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
2278 	       CUR_CONTROL__CURSOR_EN_MASK |
2279 	       (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
2280 	       (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
2281 }
2282 
2283 static int dce_v6_0_cursor_move_locked(struct drm_crtc *crtc,
2284 				       int x, int y)
2285 {
2286 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2287 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
2288 	int xorigin = 0, yorigin = 0;
2289 
2290 	int w = amdgpu_crtc->cursor_width;
2291 
2292 	amdgpu_crtc->cursor_x = x;
2293 	amdgpu_crtc->cursor_y = y;
2294 
2295 	/* avivo cursor are offset into the total surface */
2296 	x += crtc->x;
2297 	y += crtc->y;
2298 	DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
2299 
2300 	if (x < 0) {
2301 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
2302 		x = 0;
2303 	}
2304 	if (y < 0) {
2305 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
2306 		y = 0;
2307 	}
2308 
2309 	WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
2310 	WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
2311 	WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
2312 	       ((w - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
2313 
2314 	return 0;
2315 }
2316 
2317 static int dce_v6_0_crtc_cursor_move(struct drm_crtc *crtc,
2318 				     int x, int y)
2319 {
2320 	int ret;
2321 
2322 	dce_v6_0_lock_cursor(crtc, true);
2323 	ret = dce_v6_0_cursor_move_locked(crtc, x, y);
2324 	dce_v6_0_lock_cursor(crtc, false);
2325 
2326 	return ret;
2327 }
2328 
2329 static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
2330 				     struct drm_file *file_priv,
2331 				     uint32_t handle,
2332 				     uint32_t width,
2333 				     uint32_t height,
2334 				     int32_t hot_x,
2335 				     int32_t hot_y)
2336 {
2337 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2338 	struct drm_gem_object *obj;
2339 	struct amdgpu_bo *aobj;
2340 	int ret;
2341 
2342 	if (!handle) {
2343 		/* turn off cursor */
2344 		dce_v6_0_hide_cursor(crtc);
2345 		obj = NULL;
2346 		goto unpin;
2347 	}
2348 
2349 	if ((width > amdgpu_crtc->max_cursor_width) ||
2350 	    (height > amdgpu_crtc->max_cursor_height)) {
2351 		DRM_ERROR("bad cursor width or height %d x %d\n", width, height);
2352 		return -EINVAL;
2353 	}
2354 
2355 	obj = drm_gem_object_lookup(file_priv, handle);
2356 	if (!obj) {
2357 		DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id);
2358 		return -ENOENT;
2359 	}
2360 
2361 	aobj = gem_to_amdgpu_bo(obj);
2362 	ret = amdgpu_bo_reserve(aobj, false);
2363 	if (ret != 0) {
2364 		drm_gem_object_put(obj);
2365 		return ret;
2366 	}
2367 
2368 	aobj->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
2369 	ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
2370 	amdgpu_bo_unreserve(aobj);
2371 	if (ret) {
2372 		DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
2373 		drm_gem_object_put(obj);
2374 		return ret;
2375 	}
2376 	amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
2377 
2378 	dce_v6_0_lock_cursor(crtc, true);
2379 
2380 	if (width != amdgpu_crtc->cursor_width ||
2381 	    height != amdgpu_crtc->cursor_height ||
2382 	    hot_x != amdgpu_crtc->cursor_hot_x ||
2383 	    hot_y != amdgpu_crtc->cursor_hot_y) {
2384 		int x, y;
2385 
2386 		x = amdgpu_crtc->cursor_x + amdgpu_crtc->cursor_hot_x - hot_x;
2387 		y = amdgpu_crtc->cursor_y + amdgpu_crtc->cursor_hot_y - hot_y;
2388 
2389 		dce_v6_0_cursor_move_locked(crtc, x, y);
2390 
2391 		amdgpu_crtc->cursor_width = width;
2392 		amdgpu_crtc->cursor_height = height;
2393 		amdgpu_crtc->cursor_hot_x = hot_x;
2394 		amdgpu_crtc->cursor_hot_y = hot_y;
2395 	}
2396 
2397 	dce_v6_0_show_cursor(crtc);
2398 	dce_v6_0_lock_cursor(crtc, false);
2399 
2400 unpin:
2401 	if (amdgpu_crtc->cursor_bo) {
2402 		struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2403 		ret = amdgpu_bo_reserve(aobj, true);
2404 		if (likely(ret == 0)) {
2405 			amdgpu_bo_unpin(aobj);
2406 			amdgpu_bo_unreserve(aobj);
2407 		}
2408 		drm_gem_object_put(amdgpu_crtc->cursor_bo);
2409 	}
2410 
2411 	amdgpu_crtc->cursor_bo = obj;
2412 	return 0;
2413 }
2414 
2415 static void dce_v6_0_cursor_reset(struct drm_crtc *crtc)
2416 {
2417 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2418 
2419 	if (amdgpu_crtc->cursor_bo) {
2420 		dce_v6_0_lock_cursor(crtc, true);
2421 
2422 		dce_v6_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
2423 					    amdgpu_crtc->cursor_y);
2424 
2425 		dce_v6_0_show_cursor(crtc);
2426 		dce_v6_0_lock_cursor(crtc, false);
2427 	}
2428 }
2429 
2430 static int dce_v6_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
2431 				   u16 *blue, uint32_t size,
2432 				   struct drm_modeset_acquire_ctx *ctx)
2433 {
2434 	dce_v6_0_crtc_load_lut(crtc);
2435 
2436 	return 0;
2437 }
2438 
2439 static void dce_v6_0_crtc_destroy(struct drm_crtc *crtc)
2440 {
2441 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2442 
2443 	drm_crtc_cleanup(crtc);
2444 	kfree(amdgpu_crtc);
2445 }
2446 
2447 static const struct drm_crtc_funcs dce_v6_0_crtc_funcs = {
2448 	.cursor_set2 = dce_v6_0_crtc_cursor_set2,
2449 	.cursor_move = dce_v6_0_crtc_cursor_move,
2450 	.gamma_set = dce_v6_0_crtc_gamma_set,
2451 	.set_config = amdgpu_display_crtc_set_config,
2452 	.destroy = dce_v6_0_crtc_destroy,
2453 	.page_flip_target = amdgpu_display_crtc_page_flip_target,
2454 	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
2455 	.enable_vblank = amdgpu_enable_vblank_kms,
2456 	.disable_vblank = amdgpu_disable_vblank_kms,
2457 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
2458 };
2459 
2460 static void dce_v6_0_crtc_dpms(struct drm_crtc *crtc, int mode)
2461 {
2462 	struct drm_device *dev = crtc->dev;
2463 	struct amdgpu_device *adev = drm_to_adev(dev);
2464 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2465 	unsigned type;
2466 
2467 	switch (mode) {
2468 	case DRM_MODE_DPMS_ON:
2469 		amdgpu_crtc->enabled = true;
2470 		amdgpu_atombios_crtc_enable(crtc, ATOM_ENABLE);
2471 		amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
2472 		/* Make sure VBLANK and PFLIP interrupts are still enabled */
2473 		type = amdgpu_display_crtc_idx_to_irq_type(adev,
2474 						amdgpu_crtc->crtc_id);
2475 		amdgpu_irq_update(adev, &adev->crtc_irq, type);
2476 		amdgpu_irq_update(adev, &adev->pageflip_irq, type);
2477 		drm_crtc_vblank_on(crtc);
2478 		dce_v6_0_crtc_load_lut(crtc);
2479 		break;
2480 	case DRM_MODE_DPMS_STANDBY:
2481 	case DRM_MODE_DPMS_SUSPEND:
2482 	case DRM_MODE_DPMS_OFF:
2483 		drm_crtc_vblank_off(crtc);
2484 		if (amdgpu_crtc->enabled)
2485 			amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
2486 		amdgpu_atombios_crtc_enable(crtc, ATOM_DISABLE);
2487 		amdgpu_crtc->enabled = false;
2488 		break;
2489 	}
2490 	/* adjust pm to dpms */
2491 	amdgpu_dpm_compute_clocks(adev);
2492 }
2493 
2494 static void dce_v6_0_crtc_prepare(struct drm_crtc *crtc)
2495 {
2496 	/* disable crtc pair power gating before programming */
2497 	amdgpu_atombios_crtc_powergate(crtc, ATOM_DISABLE);
2498 	amdgpu_atombios_crtc_lock(crtc, ATOM_ENABLE);
2499 	dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2500 }
2501 
2502 static void dce_v6_0_crtc_commit(struct drm_crtc *crtc)
2503 {
2504 	dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
2505 	amdgpu_atombios_crtc_lock(crtc, ATOM_DISABLE);
2506 }
2507 
2508 static void dce_v6_0_crtc_disable(struct drm_crtc *crtc)
2509 {
2510 
2511 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2512 	struct drm_device *dev = crtc->dev;
2513 	struct amdgpu_device *adev = drm_to_adev(dev);
2514 	struct amdgpu_atom_ss ss;
2515 	int i;
2516 
2517 	dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2518 	if (crtc->primary->fb) {
2519 		int r;
2520 		struct amdgpu_bo *abo;
2521 
2522 		abo = gem_to_amdgpu_bo(crtc->primary->fb->obj[0]);
2523 		r = amdgpu_bo_reserve(abo, true);
2524 		if (unlikely(r))
2525 			DRM_ERROR("failed to reserve abo before unpin\n");
2526 		else {
2527 			amdgpu_bo_unpin(abo);
2528 			amdgpu_bo_unreserve(abo);
2529 		}
2530 	}
2531 	/* disable the GRPH */
2532 	dce_v6_0_grph_enable(crtc, false);
2533 
2534 	amdgpu_atombios_crtc_powergate(crtc, ATOM_ENABLE);
2535 
2536 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2537 		if (adev->mode_info.crtcs[i] &&
2538 		    adev->mode_info.crtcs[i]->enabled &&
2539 		    i != amdgpu_crtc->crtc_id &&
2540 		    amdgpu_crtc->pll_id == adev->mode_info.crtcs[i]->pll_id) {
2541 			/* one other crtc is using this pll don't turn
2542 			 * off the pll
2543 			 */
2544 			goto done;
2545 		}
2546 	}
2547 
2548 	switch (amdgpu_crtc->pll_id) {
2549 	case ATOM_PPLL1:
2550 	case ATOM_PPLL2:
2551 		/* disable the ppll */
2552 		amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
2553 						 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
2554 		break;
2555 	default:
2556 		break;
2557 	}
2558 done:
2559 	amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2560 	amdgpu_crtc->adjusted_clock = 0;
2561 	amdgpu_crtc->encoder = NULL;
2562 	amdgpu_crtc->connector = NULL;
2563 }
2564 
2565 static int dce_v6_0_crtc_mode_set(struct drm_crtc *crtc,
2566 				  struct drm_display_mode *mode,
2567 				  struct drm_display_mode *adjusted_mode,
2568 				  int x, int y, struct drm_framebuffer *old_fb)
2569 {
2570 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2571 
2572 	if (!amdgpu_crtc->adjusted_clock)
2573 		return -EINVAL;
2574 
2575 	amdgpu_atombios_crtc_set_pll(crtc, adjusted_mode);
2576 	amdgpu_atombios_crtc_set_dtd_timing(crtc, adjusted_mode);
2577 	dce_v6_0_crtc_do_set_base(crtc, old_fb, x, y);
2578 	amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode);
2579 	amdgpu_atombios_crtc_scaler_setup(crtc);
2580 	dce_v6_0_cursor_reset(crtc);
2581 	/* update the hw version fpr dpm */
2582 	amdgpu_crtc->hw_mode = *adjusted_mode;
2583 
2584 	return 0;
2585 }
2586 
2587 static bool dce_v6_0_crtc_mode_fixup(struct drm_crtc *crtc,
2588 				     const struct drm_display_mode *mode,
2589 				     struct drm_display_mode *adjusted_mode)
2590 {
2591 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2592 	struct drm_device *dev = crtc->dev;
2593 	struct drm_encoder *encoder;
2594 
2595 	/* assign the encoder to the amdgpu crtc to avoid repeated lookups later */
2596 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
2597 		if (encoder->crtc == crtc) {
2598 			amdgpu_crtc->encoder = encoder;
2599 			amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder);
2600 			break;
2601 		}
2602 	}
2603 	if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) {
2604 		amdgpu_crtc->encoder = NULL;
2605 		amdgpu_crtc->connector = NULL;
2606 		return false;
2607 	}
2608 	if (!amdgpu_display_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
2609 		return false;
2610 	if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode))
2611 		return false;
2612 	/* pick pll */
2613 	amdgpu_crtc->pll_id = dce_v6_0_pick_pll(crtc);
2614 	/* if we can't get a PPLL for a non-DP encoder, fail */
2615 	if ((amdgpu_crtc->pll_id == ATOM_PPLL_INVALID) &&
2616 	    !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder)))
2617 		return false;
2618 
2619 	return true;
2620 }
2621 
2622 static int dce_v6_0_crtc_set_base(struct drm_crtc *crtc, int x, int y,
2623 				  struct drm_framebuffer *old_fb)
2624 {
2625 	return dce_v6_0_crtc_do_set_base(crtc, old_fb, x, y);
2626 }
2627 
2628 static const struct drm_crtc_helper_funcs dce_v6_0_crtc_helper_funcs = {
2629 	.dpms = dce_v6_0_crtc_dpms,
2630 	.mode_fixup = dce_v6_0_crtc_mode_fixup,
2631 	.mode_set = dce_v6_0_crtc_mode_set,
2632 	.mode_set_base = dce_v6_0_crtc_set_base,
2633 	.prepare = dce_v6_0_crtc_prepare,
2634 	.commit = dce_v6_0_crtc_commit,
2635 	.disable = dce_v6_0_crtc_disable,
2636 	.get_scanout_position = amdgpu_crtc_get_scanout_position,
2637 };
2638 
2639 static void dce_v6_0_panic_flush(struct drm_plane *plane)
2640 {
2641 	struct drm_framebuffer *fb;
2642 	struct amdgpu_crtc *amdgpu_crtc;
2643 	struct amdgpu_device *adev;
2644 	uint32_t fb_format;
2645 
2646 	if (!plane->fb)
2647 		return;
2648 
2649 	fb = plane->fb;
2650 	amdgpu_crtc = to_amdgpu_crtc(plane->crtc);
2651 	adev = drm_to_adev(fb->dev);
2652 
2653 	/* Disable DC tiling */
2654 	fb_format = RREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset);
2655 	fb_format &= ~GRPH_CONTROL__GRPH_ARRAY_MODE_MASK;
2656 	WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
2657 
2658 }
2659 
2660 static const struct drm_plane_helper_funcs dce_v6_0_drm_primary_plane_helper_funcs = {
2661 	.get_scanout_buffer = amdgpu_display_get_scanout_buffer,
2662 	.panic_flush = dce_v6_0_panic_flush,
2663 };
2664 
2665 static int dce_v6_0_crtc_init(struct amdgpu_device *adev, int index)
2666 {
2667 	struct amdgpu_crtc *amdgpu_crtc;
2668 
2669 	amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) +
2670 			      (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
2671 	if (amdgpu_crtc == NULL)
2672 		return -ENOMEM;
2673 
2674 	drm_crtc_init(adev_to_drm(adev), &amdgpu_crtc->base, &dce_v6_0_crtc_funcs);
2675 
2676 	drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
2677 	amdgpu_crtc->crtc_id = index;
2678 	adev->mode_info.crtcs[index] = amdgpu_crtc;
2679 
2680 	amdgpu_crtc->max_cursor_width = CURSOR_WIDTH;
2681 	amdgpu_crtc->max_cursor_height = CURSOR_HEIGHT;
2682 	adev_to_drm(adev)->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
2683 	adev_to_drm(adev)->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
2684 
2685 	amdgpu_crtc->crtc_offset = crtc_offsets[amdgpu_crtc->crtc_id];
2686 
2687 	amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2688 	amdgpu_crtc->adjusted_clock = 0;
2689 	amdgpu_crtc->encoder = NULL;
2690 	amdgpu_crtc->connector = NULL;
2691 	drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v6_0_crtc_helper_funcs);
2692 	drm_plane_helper_add(amdgpu_crtc->base.primary, &dce_v6_0_drm_primary_plane_helper_funcs);
2693 
2694 	return 0;
2695 }
2696 
2697 static int dce_v6_0_early_init(struct amdgpu_ip_block *ip_block)
2698 {
2699 	struct amdgpu_device *adev = ip_block->adev;
2700 
2701 	adev->audio_endpt_rreg = &dce_v6_0_audio_endpt_rreg;
2702 	adev->audio_endpt_wreg = &dce_v6_0_audio_endpt_wreg;
2703 
2704 	dce_v6_0_set_display_funcs(adev);
2705 
2706 	adev->mode_info.num_crtc = dce_v6_0_get_num_crtc(adev);
2707 
2708 	switch (adev->asic_type) {
2709 	case CHIP_TAHITI:
2710 	case CHIP_PITCAIRN:
2711 	case CHIP_VERDE:
2712 		adev->mode_info.num_hpd = 6;
2713 		adev->mode_info.num_dig = 6;
2714 		break;
2715 	case CHIP_OLAND:
2716 		adev->mode_info.num_hpd = 2;
2717 		adev->mode_info.num_dig = 2;
2718 		break;
2719 	default:
2720 		return -EINVAL;
2721 	}
2722 
2723 	dce_v6_0_set_irq_funcs(adev);
2724 
2725 	return 0;
2726 }
2727 
2728 static int dce_v6_0_sw_init(struct amdgpu_ip_block *ip_block)
2729 {
2730 	int r, i;
2731 	struct amdgpu_device *adev = ip_block->adev;
2732 
2733 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2734 		r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + 1, &adev->crtc_irq);
2735 		if (r)
2736 			return r;
2737 	}
2738 
2739 	for (i = 8; i < 20; i += 2) {
2740 		r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i, &adev->pageflip_irq);
2741 		if (r)
2742 			return r;
2743 	}
2744 
2745 	/* HPD hotplug */
2746 	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 42, &adev->hpd_irq);
2747 	if (r)
2748 		return r;
2749 
2750 	adev->mode_info.mode_config_initialized = true;
2751 
2752 	adev_to_drm(adev)->mode_config.funcs = &amdgpu_mode_funcs;
2753 	adev_to_drm(adev)->mode_config.async_page_flip = true;
2754 	adev_to_drm(adev)->mode_config.max_width = 16384;
2755 	adev_to_drm(adev)->mode_config.max_height = 16384;
2756 	adev_to_drm(adev)->mode_config.preferred_depth = 24;
2757 	adev_to_drm(adev)->mode_config.prefer_shadow = 1;
2758 	adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true;
2759 
2760 	r = amdgpu_display_modeset_create_props(adev);
2761 	if (r)
2762 		return r;
2763 
2764 	adev_to_drm(adev)->mode_config.max_width = 16384;
2765 	adev_to_drm(adev)->mode_config.max_height = 16384;
2766 
2767 	/* allocate crtcs */
2768 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2769 		r = dce_v6_0_crtc_init(adev, i);
2770 		if (r)
2771 			return r;
2772 	}
2773 
2774 	if (amdgpu_atombios_get_connector_info_from_object_table(adev))
2775 		amdgpu_display_print_display_setup(adev_to_drm(adev));
2776 	else
2777 		return -EINVAL;
2778 
2779 	/* setup afmt */
2780 	r = dce_v6_0_afmt_init(adev);
2781 	if (r)
2782 		return r;
2783 
2784 	r = dce_v6_0_audio_init(adev);
2785 	if (r)
2786 		return r;
2787 
2788 	/* Disable vblank IRQs aggressively for power-saving */
2789 	/* XXX: can this be enabled for DC? */
2790 	adev_to_drm(adev)->vblank_disable_immediate = true;
2791 
2792 	r = drm_vblank_init(adev_to_drm(adev), adev->mode_info.num_crtc);
2793 	if (r)
2794 		return r;
2795 
2796 	/* Pre-DCE11 */
2797 	INIT_DELAYED_WORK(&adev->hotplug_work,
2798 		  amdgpu_display_hotplug_work_func);
2799 
2800 	drm_kms_helper_poll_init(adev_to_drm(adev));
2801 
2802 	return r;
2803 }
2804 
2805 static int dce_v6_0_sw_fini(struct amdgpu_ip_block *ip_block)
2806 {
2807 	struct amdgpu_device *adev = ip_block->adev;
2808 
2809 	drm_edid_free(adev->mode_info.bios_hardcoded_edid);
2810 
2811 	drm_kms_helper_poll_fini(adev_to_drm(adev));
2812 
2813 	dce_v6_0_audio_fini(adev);
2814 	dce_v6_0_afmt_fini(adev);
2815 
2816 	drm_mode_config_cleanup(adev_to_drm(adev));
2817 	adev->mode_info.mode_config_initialized = false;
2818 
2819 	return 0;
2820 }
2821 
2822 static int dce_v6_0_hw_init(struct amdgpu_ip_block *ip_block)
2823 {
2824 	int i;
2825 	struct amdgpu_device *adev = ip_block->adev;
2826 
2827 	/* disable vga render */
2828 	dce_v6_0_set_vga_render_state(adev, false);
2829 	/* init dig PHYs, disp eng pll */
2830 	amdgpu_atombios_encoder_init_dig(adev);
2831 	amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
2832 
2833 	/* initialize hpd */
2834 	dce_v6_0_hpd_init(adev);
2835 
2836 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
2837 		dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
2838 	}
2839 
2840 	dce_v6_0_pageflip_interrupt_init(adev);
2841 
2842 	return 0;
2843 }
2844 
2845 static int dce_v6_0_hw_fini(struct amdgpu_ip_block *ip_block)
2846 {
2847 	int i;
2848 	struct amdgpu_device *adev = ip_block->adev;
2849 
2850 	dce_v6_0_hpd_fini(adev);
2851 
2852 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
2853 		dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
2854 	}
2855 
2856 	dce_v6_0_pageflip_interrupt_fini(adev);
2857 
2858 	flush_delayed_work(&adev->hotplug_work);
2859 
2860 	return 0;
2861 }
2862 
2863 static int dce_v6_0_suspend(struct amdgpu_ip_block *ip_block)
2864 {
2865 	struct amdgpu_device *adev = ip_block->adev;
2866 	int r;
2867 
2868 	r = amdgpu_display_suspend_helper(adev);
2869 	if (r)
2870 		return r;
2871 	adev->mode_info.bl_level =
2872 		amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
2873 
2874 	return dce_v6_0_hw_fini(ip_block);
2875 }
2876 
2877 static int dce_v6_0_resume(struct amdgpu_ip_block *ip_block)
2878 {
2879 	struct amdgpu_device *adev = ip_block->adev;
2880 	int ret;
2881 
2882 	amdgpu_atombios_encoder_set_backlight_level_to_reg(adev,
2883 							   adev->mode_info.bl_level);
2884 
2885 	ret = dce_v6_0_hw_init(ip_block);
2886 
2887 	/* turn on the BL */
2888 	if (adev->mode_info.bl_encoder) {
2889 		u8 bl_level = amdgpu_display_backlight_get_level(adev,
2890 								  adev->mode_info.bl_encoder);
2891 		amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
2892 						    bl_level);
2893 	}
2894 	if (ret)
2895 		return ret;
2896 
2897 	return amdgpu_display_resume_helper(adev);
2898 }
2899 
2900 static bool dce_v6_0_is_idle(struct amdgpu_ip_block *ip_block)
2901 {
2902 	return true;
2903 }
2904 
2905 static int dce_v6_0_soft_reset(struct amdgpu_ip_block *ip_block)
2906 {
2907 	u32 srbm_soft_reset = 0, tmp;
2908 	struct amdgpu_device *adev = ip_block->adev;
2909 
2910 	if (dce_v6_0_is_display_hung(adev))
2911 		srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK;
2912 
2913 	if (srbm_soft_reset) {
2914 		tmp = RREG32(mmSRBM_SOFT_RESET);
2915 		tmp |= srbm_soft_reset;
2916 		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
2917 		WREG32(mmSRBM_SOFT_RESET, tmp);
2918 		tmp = RREG32(mmSRBM_SOFT_RESET);
2919 
2920 		udelay(50);
2921 
2922 		tmp &= ~srbm_soft_reset;
2923 		WREG32(mmSRBM_SOFT_RESET, tmp);
2924 		tmp = RREG32(mmSRBM_SOFT_RESET);
2925 
2926 		/* Wait a little for things to settle down */
2927 		udelay(50);
2928 	}
2929 	return 0;
2930 }
2931 
2932 static void dce_v6_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
2933 						     int crtc,
2934 						     enum amdgpu_interrupt_state state)
2935 {
2936 	u32 reg_block, interrupt_mask;
2937 
2938 	if (crtc >= adev->mode_info.num_crtc) {
2939 		DRM_DEBUG("invalid crtc %d\n", crtc);
2940 		return;
2941 	}
2942 
2943 	switch (crtc) {
2944 	case 0:
2945 		reg_block = CRTC0_REGISTER_OFFSET;
2946 		break;
2947 	case 1:
2948 		reg_block = CRTC1_REGISTER_OFFSET;
2949 		break;
2950 	case 2:
2951 		reg_block = CRTC2_REGISTER_OFFSET;
2952 		break;
2953 	case 3:
2954 		reg_block = CRTC3_REGISTER_OFFSET;
2955 		break;
2956 	case 4:
2957 		reg_block = CRTC4_REGISTER_OFFSET;
2958 		break;
2959 	case 5:
2960 		reg_block = CRTC5_REGISTER_OFFSET;
2961 		break;
2962 	default:
2963 		DRM_DEBUG("invalid crtc %d\n", crtc);
2964 		return;
2965 	}
2966 
2967 	switch (state) {
2968 	case AMDGPU_IRQ_STATE_DISABLE:
2969 		interrupt_mask = RREG32(mmINT_MASK + reg_block);
2970 		interrupt_mask &= ~INT_MASK__VBLANK_INT_MASK;
2971 		WREG32(mmINT_MASK + reg_block, interrupt_mask);
2972 		break;
2973 	case AMDGPU_IRQ_STATE_ENABLE:
2974 		interrupt_mask = RREG32(mmINT_MASK + reg_block);
2975 		interrupt_mask |= INT_MASK__VBLANK_INT_MASK;
2976 		WREG32(mmINT_MASK + reg_block, interrupt_mask);
2977 		break;
2978 	default:
2979 		break;
2980 	}
2981 }
2982 
2983 static void dce_v6_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev,
2984 						    int crtc,
2985 						    enum amdgpu_interrupt_state state)
2986 {
2987 
2988 }
2989 
2990 static int dce_v6_0_set_hpd_irq_state(struct amdgpu_device *adev,
2991 					    struct amdgpu_irq_src *src,
2992 					    unsigned hpd,
2993 					    enum amdgpu_interrupt_state state)
2994 {
2995 	u32 dc_hpd_int_cntl;
2996 
2997 	if (hpd >= adev->mode_info.num_hpd) {
2998 		DRM_DEBUG("invalid hpd %d\n", hpd);
2999 		return 0;
3000 	}
3001 
3002 	switch (state) {
3003 	case AMDGPU_IRQ_STATE_DISABLE:
3004 		dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
3005 		dc_hpd_int_cntl &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
3006 		WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], dc_hpd_int_cntl);
3007 		break;
3008 	case AMDGPU_IRQ_STATE_ENABLE:
3009 		dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
3010 		dc_hpd_int_cntl |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
3011 		WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], dc_hpd_int_cntl);
3012 		break;
3013 	default:
3014 		break;
3015 	}
3016 
3017 	return 0;
3018 }
3019 
3020 static int dce_v6_0_set_crtc_irq_state(struct amdgpu_device *adev,
3021 					     struct amdgpu_irq_src *src,
3022 					     unsigned type,
3023 					     enum amdgpu_interrupt_state state)
3024 {
3025 	switch (type) {
3026 	case AMDGPU_CRTC_IRQ_VBLANK1:
3027 		dce_v6_0_set_crtc_vblank_interrupt_state(adev, 0, state);
3028 		break;
3029 	case AMDGPU_CRTC_IRQ_VBLANK2:
3030 		dce_v6_0_set_crtc_vblank_interrupt_state(adev, 1, state);
3031 		break;
3032 	case AMDGPU_CRTC_IRQ_VBLANK3:
3033 		dce_v6_0_set_crtc_vblank_interrupt_state(adev, 2, state);
3034 		break;
3035 	case AMDGPU_CRTC_IRQ_VBLANK4:
3036 		dce_v6_0_set_crtc_vblank_interrupt_state(adev, 3, state);
3037 		break;
3038 	case AMDGPU_CRTC_IRQ_VBLANK5:
3039 		dce_v6_0_set_crtc_vblank_interrupt_state(adev, 4, state);
3040 		break;
3041 	case AMDGPU_CRTC_IRQ_VBLANK6:
3042 		dce_v6_0_set_crtc_vblank_interrupt_state(adev, 5, state);
3043 		break;
3044 	case AMDGPU_CRTC_IRQ_VLINE1:
3045 		dce_v6_0_set_crtc_vline_interrupt_state(adev, 0, state);
3046 		break;
3047 	case AMDGPU_CRTC_IRQ_VLINE2:
3048 		dce_v6_0_set_crtc_vline_interrupt_state(adev, 1, state);
3049 		break;
3050 	case AMDGPU_CRTC_IRQ_VLINE3:
3051 		dce_v6_0_set_crtc_vline_interrupt_state(adev, 2, state);
3052 		break;
3053 	case AMDGPU_CRTC_IRQ_VLINE4:
3054 		dce_v6_0_set_crtc_vline_interrupt_state(adev, 3, state);
3055 		break;
3056 	case AMDGPU_CRTC_IRQ_VLINE5:
3057 		dce_v6_0_set_crtc_vline_interrupt_state(adev, 4, state);
3058 		break;
3059 	case AMDGPU_CRTC_IRQ_VLINE6:
3060 		dce_v6_0_set_crtc_vline_interrupt_state(adev, 5, state);
3061 		break;
3062 	default:
3063 		break;
3064 	}
3065 	return 0;
3066 }
3067 
3068 static int dce_v6_0_crtc_irq(struct amdgpu_device *adev,
3069 			     struct amdgpu_irq_src *source,
3070 			     struct amdgpu_iv_entry *entry)
3071 {
3072 	unsigned crtc = entry->src_id - 1;
3073 	uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg);
3074 	unsigned int irq_type = amdgpu_display_crtc_idx_to_irq_type(adev,
3075 								    crtc);
3076 
3077 	switch (entry->src_data[0]) {
3078 	case 0: /* vblank */
3079 		if (disp_int & interrupt_status_offsets[crtc].vblank)
3080 			WREG32(mmVBLANK_STATUS + crtc_offsets[crtc], VBLANK_STATUS__VBLANK_ACK_MASK);
3081 		else
3082 			DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3083 
3084 		if (amdgpu_irq_enabled(adev, source, irq_type)) {
3085 			drm_handle_vblank(adev_to_drm(adev), crtc);
3086 		}
3087 		DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
3088 		break;
3089 	case 1: /* vline */
3090 		if (disp_int & interrupt_status_offsets[crtc].vline)
3091 			WREG32(mmVLINE_STATUS + crtc_offsets[crtc], VLINE_STATUS__VLINE_ACK_MASK);
3092 		else
3093 			DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3094 
3095 		DRM_DEBUG("IH: D%d vline\n", crtc + 1);
3096 		break;
3097 	default:
3098 		DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]);
3099 		break;
3100 	}
3101 
3102 	return 0;
3103 }
3104 
3105 static int dce_v6_0_set_pageflip_irq_state(struct amdgpu_device *adev,
3106 						 struct amdgpu_irq_src *src,
3107 						 unsigned type,
3108 						 enum amdgpu_interrupt_state state)
3109 {
3110 	u32 reg;
3111 
3112 	if (type >= adev->mode_info.num_crtc) {
3113 		DRM_ERROR("invalid pageflip crtc %d\n", type);
3114 		return -EINVAL;
3115 	}
3116 
3117 	reg = RREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type]);
3118 	if (state == AMDGPU_IRQ_STATE_DISABLE)
3119 		WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
3120 		       reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
3121 	else
3122 		WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
3123 		       reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
3124 
3125 	return 0;
3126 }
3127 
3128 static int dce_v6_0_pageflip_irq(struct amdgpu_device *adev,
3129 				 struct amdgpu_irq_src *source,
3130 				 struct amdgpu_iv_entry *entry)
3131 {
3132 	unsigned long flags;
3133 	unsigned crtc_id;
3134 	struct amdgpu_crtc *amdgpu_crtc;
3135 	struct amdgpu_flip_work *works;
3136 
3137 	crtc_id = (entry->src_id - 8) >> 1;
3138 	amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
3139 
3140 	if (crtc_id >= adev->mode_info.num_crtc) {
3141 		DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
3142 		return -EINVAL;
3143 	}
3144 
3145 	if (RREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id]) &
3146 	    GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK)
3147 		WREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id],
3148 		       GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK);
3149 
3150 	/* IRQ could occur when in initial stage */
3151 	if (amdgpu_crtc == NULL)
3152 		return 0;
3153 
3154 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
3155 	works = amdgpu_crtc->pflip_works;
3156 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) {
3157 		DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
3158 						"AMDGPU_FLIP_SUBMITTED(%d)\n",
3159 						amdgpu_crtc->pflip_status,
3160 						AMDGPU_FLIP_SUBMITTED);
3161 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
3162 		return 0;
3163 	}
3164 
3165 	/* page flip completed. clean up */
3166 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
3167 	amdgpu_crtc->pflip_works = NULL;
3168 
3169 	/* wakeup usersapce */
3170 	if (works->event)
3171 		drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
3172 
3173 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
3174 
3175 	drm_crtc_vblank_put(&amdgpu_crtc->base);
3176 	schedule_work(&works->unpin_work);
3177 
3178 	return 0;
3179 }
3180 
3181 static int dce_v6_0_hpd_irq(struct amdgpu_device *adev,
3182 			    struct amdgpu_irq_src *source,
3183 			    struct amdgpu_iv_entry *entry)
3184 {
3185 	uint32_t disp_int, mask;
3186 	unsigned hpd;
3187 
3188 	if (entry->src_data[0] >= adev->mode_info.num_hpd) {
3189 		DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]);
3190 		return 0;
3191 	}
3192 
3193 	hpd = entry->src_data[0];
3194 	disp_int = RREG32(interrupt_status_offsets[hpd].reg);
3195 	mask = interrupt_status_offsets[hpd].hpd;
3196 
3197 	if (disp_int & mask) {
3198 		dce_v6_0_hpd_int_ack(adev, hpd);
3199 		schedule_delayed_work(&adev->hotplug_work, 0);
3200 		DRM_DEBUG("IH: HPD%d\n", hpd + 1);
3201 	}
3202 
3203 	return 0;
3204 }
3205 
3206 static int dce_v6_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
3207 					  enum amd_clockgating_state state)
3208 {
3209 	return 0;
3210 }
3211 
3212 static int dce_v6_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
3213 					  enum amd_powergating_state state)
3214 {
3215 	return 0;
3216 }
3217 
3218 static const struct amd_ip_funcs dce_v6_0_ip_funcs = {
3219 	.name = "dce_v6_0",
3220 	.early_init = dce_v6_0_early_init,
3221 	.sw_init = dce_v6_0_sw_init,
3222 	.sw_fini = dce_v6_0_sw_fini,
3223 	.hw_init = dce_v6_0_hw_init,
3224 	.hw_fini = dce_v6_0_hw_fini,
3225 	.suspend = dce_v6_0_suspend,
3226 	.resume = dce_v6_0_resume,
3227 	.is_idle = dce_v6_0_is_idle,
3228 	.soft_reset = dce_v6_0_soft_reset,
3229 	.set_clockgating_state = dce_v6_0_set_clockgating_state,
3230 	.set_powergating_state = dce_v6_0_set_powergating_state,
3231 };
3232 
3233 static void dce_v6_0_encoder_mode_set(struct drm_encoder *encoder,
3234 			  struct drm_display_mode *mode,
3235 			  struct drm_display_mode *adjusted_mode)
3236 {
3237 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3238 	int em = amdgpu_atombios_encoder_get_encoder_mode(encoder);
3239 
3240 	amdgpu_encoder->pixel_clock = adjusted_mode->clock;
3241 
3242 	/* need to call this here rather than in prepare() since we need some crtc info */
3243 	amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
3244 
3245 	/* set scaler clears this on some chips */
3246 	dce_v6_0_set_interleave(encoder->crtc, mode);
3247 
3248 	if (em == ATOM_ENCODER_MODE_HDMI || ENCODER_MODE_IS_DP(em)) {
3249 		dce_v6_0_afmt_enable(encoder, true);
3250 		dce_v6_0_afmt_setmode(encoder, adjusted_mode);
3251 	}
3252 }
3253 
3254 static void dce_v6_0_encoder_prepare(struct drm_encoder *encoder)
3255 {
3256 	struct amdgpu_device *adev = drm_to_adev(encoder->dev);
3257 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3258 	struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
3259 
3260 	if ((amdgpu_encoder->active_device &
3261 	     (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
3262 	    (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) !=
3263 	     ENCODER_OBJECT_ID_NONE)) {
3264 		struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
3265 		if (dig) {
3266 			dig->dig_encoder = dce_v6_0_pick_dig_encoder(encoder);
3267 			if (amdgpu_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT)
3268 				dig->afmt = adev->mode_info.afmt[dig->dig_encoder];
3269 		}
3270 	}
3271 
3272 	amdgpu_atombios_scratch_regs_lock(adev, true);
3273 
3274 	if (connector) {
3275 		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
3276 
3277 		/* select the clock/data port if it uses a router */
3278 		if (amdgpu_connector->router.cd_valid)
3279 			amdgpu_i2c_router_select_cd_port(amdgpu_connector);
3280 
3281 		/* turn eDP panel on for mode set */
3282 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
3283 			amdgpu_atombios_encoder_set_edp_panel_power(connector,
3284 							     ATOM_TRANSMITTER_ACTION_POWER_ON);
3285 	}
3286 
3287 	/* this is needed for the pll/ss setup to work correctly in some cases */
3288 	amdgpu_atombios_encoder_set_crtc_source(encoder);
3289 	/* set up the FMT blocks */
3290 	dce_v6_0_program_fmt(encoder);
3291 }
3292 
3293 static void dce_v6_0_encoder_commit(struct drm_encoder *encoder)
3294 {
3295 	struct drm_device *dev = encoder->dev;
3296 	struct amdgpu_device *adev = drm_to_adev(dev);
3297 
3298 	/* need to call this here as we need the crtc set up */
3299 	amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
3300 	amdgpu_atombios_scratch_regs_lock(adev, false);
3301 }
3302 
3303 static void dce_v6_0_encoder_disable(struct drm_encoder *encoder)
3304 {
3305 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3306 	struct amdgpu_encoder_atom_dig *dig;
3307 	int em = amdgpu_atombios_encoder_get_encoder_mode(encoder);
3308 
3309 	amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
3310 
3311 	if (amdgpu_atombios_encoder_is_digital(encoder)) {
3312 		if (em == ATOM_ENCODER_MODE_HDMI || ENCODER_MODE_IS_DP(em))
3313 			dce_v6_0_afmt_enable(encoder, false);
3314 		dig = amdgpu_encoder->enc_priv;
3315 		dig->dig_encoder = -1;
3316 	}
3317 	amdgpu_encoder->active_device = 0;
3318 }
3319 
3320 /* these are handled by the primary encoders */
3321 static void dce_v6_0_ext_prepare(struct drm_encoder *encoder)
3322 {
3323 
3324 }
3325 
3326 static void dce_v6_0_ext_commit(struct drm_encoder *encoder)
3327 {
3328 
3329 }
3330 
3331 static void dce_v6_0_ext_mode_set(struct drm_encoder *encoder,
3332 		      struct drm_display_mode *mode,
3333 		      struct drm_display_mode *adjusted_mode)
3334 {
3335 
3336 }
3337 
3338 static void dce_v6_0_ext_disable(struct drm_encoder *encoder)
3339 {
3340 
3341 }
3342 
3343 static void dce_v6_0_ext_dpms(struct drm_encoder *encoder, int mode)
3344 {
3345 
3346 }
3347 
3348 static bool dce_v6_0_ext_mode_fixup(struct drm_encoder *encoder,
3349 				    const struct drm_display_mode *mode,
3350 				    struct drm_display_mode *adjusted_mode)
3351 {
3352 	return true;
3353 }
3354 
3355 static const struct drm_encoder_helper_funcs dce_v6_0_ext_helper_funcs = {
3356 	.dpms = dce_v6_0_ext_dpms,
3357 	.mode_fixup = dce_v6_0_ext_mode_fixup,
3358 	.prepare = dce_v6_0_ext_prepare,
3359 	.mode_set = dce_v6_0_ext_mode_set,
3360 	.commit = dce_v6_0_ext_commit,
3361 	.disable = dce_v6_0_ext_disable,
3362 	/* no detect for TMDS/LVDS yet */
3363 };
3364 
3365 static const struct drm_encoder_helper_funcs dce_v6_0_dig_helper_funcs = {
3366 	.dpms = amdgpu_atombios_encoder_dpms,
3367 	.mode_fixup = amdgpu_atombios_encoder_mode_fixup,
3368 	.prepare = dce_v6_0_encoder_prepare,
3369 	.mode_set = dce_v6_0_encoder_mode_set,
3370 	.commit = dce_v6_0_encoder_commit,
3371 	.disable = dce_v6_0_encoder_disable,
3372 	.detect = amdgpu_atombios_encoder_dig_detect,
3373 };
3374 
3375 static const struct drm_encoder_helper_funcs dce_v6_0_dac_helper_funcs = {
3376 	.dpms = amdgpu_atombios_encoder_dpms,
3377 	.mode_fixup = amdgpu_atombios_encoder_mode_fixup,
3378 	.prepare = dce_v6_0_encoder_prepare,
3379 	.mode_set = dce_v6_0_encoder_mode_set,
3380 	.commit = dce_v6_0_encoder_commit,
3381 	.detect = amdgpu_atombios_encoder_dac_detect,
3382 };
3383 
3384 static void dce_v6_0_encoder_destroy(struct drm_encoder *encoder)
3385 {
3386 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3387 	if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3388 		amdgpu_atombios_encoder_fini_backlight(amdgpu_encoder);
3389 	kfree(amdgpu_encoder->enc_priv);
3390 	drm_encoder_cleanup(encoder);
3391 	kfree(amdgpu_encoder);
3392 }
3393 
3394 static const struct drm_encoder_funcs dce_v6_0_encoder_funcs = {
3395 	.destroy = dce_v6_0_encoder_destroy,
3396 };
3397 
3398 static void dce_v6_0_encoder_add(struct amdgpu_device *adev,
3399 				 uint32_t encoder_enum,
3400 				 uint32_t supported_device,
3401 				 u16 caps)
3402 {
3403 	struct drm_device *dev = adev_to_drm(adev);
3404 	struct drm_encoder *encoder;
3405 	struct amdgpu_encoder *amdgpu_encoder;
3406 
3407 	/* see if we already added it */
3408 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
3409 		amdgpu_encoder = to_amdgpu_encoder(encoder);
3410 		if (amdgpu_encoder->encoder_enum == encoder_enum) {
3411 			amdgpu_encoder->devices |= supported_device;
3412 			return;
3413 		}
3414 	}
3415 
3416 	/* add a new one */
3417 	amdgpu_encoder = kzalloc_obj(struct amdgpu_encoder, GFP_KERNEL);
3418 	if (!amdgpu_encoder)
3419 		return;
3420 
3421 	encoder = &amdgpu_encoder->base;
3422 	switch (adev->mode_info.num_crtc) {
3423 	case 1:
3424 		encoder->possible_crtcs = 0x1;
3425 		break;
3426 	case 2:
3427 	default:
3428 		encoder->possible_crtcs = 0x3;
3429 		break;
3430 	case 4:
3431 		encoder->possible_crtcs = 0xf;
3432 		break;
3433 	case 6:
3434 		encoder->possible_crtcs = 0x3f;
3435 		break;
3436 	}
3437 
3438 	amdgpu_encoder->enc_priv = NULL;
3439 	amdgpu_encoder->encoder_enum = encoder_enum;
3440 	amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
3441 	amdgpu_encoder->devices = supported_device;
3442 	amdgpu_encoder->rmx_type = RMX_OFF;
3443 	amdgpu_encoder->underscan_type = UNDERSCAN_OFF;
3444 	amdgpu_encoder->is_ext_encoder = false;
3445 	amdgpu_encoder->caps = caps;
3446 
3447 	switch (amdgpu_encoder->encoder_id) {
3448 	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
3449 	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
3450 		drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3451 				 DRM_MODE_ENCODER_DAC, NULL);
3452 		drm_encoder_helper_add(encoder, &dce_v6_0_dac_helper_funcs);
3453 		break;
3454 	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
3455 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
3456 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
3457 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
3458 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
3459 		if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
3460 			amdgpu_encoder->rmx_type = RMX_FULL;
3461 			drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3462 					 DRM_MODE_ENCODER_LVDS, NULL);
3463 			amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder);
3464 		} else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
3465 			drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3466 					 DRM_MODE_ENCODER_DAC, NULL);
3467 			amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3468 		} else {
3469 			drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3470 					 DRM_MODE_ENCODER_TMDS, NULL);
3471 			amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3472 		}
3473 		drm_encoder_helper_add(encoder, &dce_v6_0_dig_helper_funcs);
3474 		break;
3475 	case ENCODER_OBJECT_ID_SI170B:
3476 	case ENCODER_OBJECT_ID_CH7303:
3477 	case ENCODER_OBJECT_ID_EXTERNAL_SDVOA:
3478 	case ENCODER_OBJECT_ID_EXTERNAL_SDVOB:
3479 	case ENCODER_OBJECT_ID_TITFP513:
3480 	case ENCODER_OBJECT_ID_VT1623:
3481 	case ENCODER_OBJECT_ID_HDMI_SI1930:
3482 	case ENCODER_OBJECT_ID_TRAVIS:
3483 	case ENCODER_OBJECT_ID_NUTMEG:
3484 		/* these are handled by the primary encoders */
3485 		amdgpu_encoder->is_ext_encoder = true;
3486 		if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3487 			drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3488 					 DRM_MODE_ENCODER_LVDS, NULL);
3489 		else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
3490 			drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3491 					 DRM_MODE_ENCODER_DAC, NULL);
3492 		else
3493 			drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3494 					 DRM_MODE_ENCODER_TMDS, NULL);
3495 		drm_encoder_helper_add(encoder, &dce_v6_0_ext_helper_funcs);
3496 		break;
3497 	}
3498 }
3499 
3500 static const struct amdgpu_display_funcs dce_v6_0_display_funcs = {
3501 	.bandwidth_update = &dce_v6_0_bandwidth_update,
3502 	.vblank_get_counter = &dce_v6_0_vblank_get_counter,
3503 	.backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level,
3504 	.backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level,
3505 	.hpd_sense = &dce_v6_0_hpd_sense,
3506 	.hpd_set_polarity = &dce_v6_0_hpd_set_polarity,
3507 	.hpd_get_gpio_reg = &dce_v6_0_hpd_get_gpio_reg,
3508 	.page_flip = &dce_v6_0_page_flip,
3509 	.page_flip_get_scanoutpos = &dce_v6_0_crtc_get_scanoutpos,
3510 	.add_encoder = &dce_v6_0_encoder_add,
3511 	.add_connector = &amdgpu_connector_add,
3512 };
3513 
3514 static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev)
3515 {
3516 	adev->mode_info.funcs = &dce_v6_0_display_funcs;
3517 }
3518 
3519 static const struct amdgpu_irq_src_funcs dce_v6_0_crtc_irq_funcs = {
3520 	.set = dce_v6_0_set_crtc_irq_state,
3521 	.process = dce_v6_0_crtc_irq,
3522 };
3523 
3524 static const struct amdgpu_irq_src_funcs dce_v6_0_pageflip_irq_funcs = {
3525 	.set = dce_v6_0_set_pageflip_irq_state,
3526 	.process = dce_v6_0_pageflip_irq,
3527 };
3528 
3529 static const struct amdgpu_irq_src_funcs dce_v6_0_hpd_irq_funcs = {
3530 	.set = dce_v6_0_set_hpd_irq_state,
3531 	.process = dce_v6_0_hpd_irq,
3532 };
3533 
3534 static void dce_v6_0_set_irq_funcs(struct amdgpu_device *adev)
3535 {
3536 	if (adev->mode_info.num_crtc > 0)
3537 		adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_VLINE1 + adev->mode_info.num_crtc;
3538 	else
3539 		adev->crtc_irq.num_types = 0;
3540 	adev->crtc_irq.funcs = &dce_v6_0_crtc_irq_funcs;
3541 
3542 	adev->pageflip_irq.num_types = adev->mode_info.num_crtc;
3543 	adev->pageflip_irq.funcs = &dce_v6_0_pageflip_irq_funcs;
3544 
3545 	adev->hpd_irq.num_types = adev->mode_info.num_hpd;
3546 	adev->hpd_irq.funcs = &dce_v6_0_hpd_irq_funcs;
3547 }
3548 
3549 const struct amdgpu_ip_block_version dce_v6_0_ip_block =
3550 {
3551 	.type = AMD_IP_BLOCK_TYPE_DCE,
3552 	.major = 6,
3553 	.minor = 0,
3554 	.rev = 0,
3555 	.funcs = &dce_v6_0_ip_funcs,
3556 };
3557 
3558 const struct amdgpu_ip_block_version dce_v6_4_ip_block =
3559 {
3560 	.type = AMD_IP_BLOCK_TYPE_DCE,
3561 	.major = 6,
3562 	.minor = 4,
3563 	.rev = 0,
3564 	.funcs = &dce_v6_0_ip_funcs,
3565 };
3566