xref: /linux/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c (revision ebf68996de0ab250c5d520eb2291ab65643e9a1e)
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <drm/drm_fourcc.h>
25 #include <drm/drm_vblank.h>
26 
27 #include "amdgpu.h"
28 #include "amdgpu_pm.h"
29 #include "amdgpu_i2c.h"
30 #include "vid.h"
31 #include "atom.h"
32 #include "amdgpu_atombios.h"
33 #include "atombios_crtc.h"
34 #include "atombios_encoders.h"
35 #include "amdgpu_pll.h"
36 #include "amdgpu_connectors.h"
37 #include "amdgpu_display.h"
38 #include "dce_v10_0.h"
39 
40 #include "dce/dce_10_0_d.h"
41 #include "dce/dce_10_0_sh_mask.h"
42 #include "dce/dce_10_0_enum.h"
43 #include "oss/oss_3_0_d.h"
44 #include "oss/oss_3_0_sh_mask.h"
45 #include "gmc/gmc_8_1_d.h"
46 #include "gmc/gmc_8_1_sh_mask.h"
47 
48 #include "ivsrcid/ivsrcid_vislands30.h"
49 
50 static void dce_v10_0_set_display_funcs(struct amdgpu_device *adev);
51 static void dce_v10_0_set_irq_funcs(struct amdgpu_device *adev);
52 
53 static const u32 crtc_offsets[] =
54 {
55 	CRTC0_REGISTER_OFFSET,
56 	CRTC1_REGISTER_OFFSET,
57 	CRTC2_REGISTER_OFFSET,
58 	CRTC3_REGISTER_OFFSET,
59 	CRTC4_REGISTER_OFFSET,
60 	CRTC5_REGISTER_OFFSET,
61 	CRTC6_REGISTER_OFFSET
62 };
63 
64 static const u32 hpd_offsets[] =
65 {
66 	HPD0_REGISTER_OFFSET,
67 	HPD1_REGISTER_OFFSET,
68 	HPD2_REGISTER_OFFSET,
69 	HPD3_REGISTER_OFFSET,
70 	HPD4_REGISTER_OFFSET,
71 	HPD5_REGISTER_OFFSET
72 };
73 
74 static const uint32_t dig_offsets[] = {
75 	DIG0_REGISTER_OFFSET,
76 	DIG1_REGISTER_OFFSET,
77 	DIG2_REGISTER_OFFSET,
78 	DIG3_REGISTER_OFFSET,
79 	DIG4_REGISTER_OFFSET,
80 	DIG5_REGISTER_OFFSET,
81 	DIG6_REGISTER_OFFSET
82 };
83 
84 static const struct {
85 	uint32_t        reg;
86 	uint32_t        vblank;
87 	uint32_t        vline;
88 	uint32_t        hpd;
89 
90 } interrupt_status_offsets[] = { {
91 	.reg = mmDISP_INTERRUPT_STATUS,
92 	.vblank = DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK,
93 	.vline = DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK,
94 	.hpd = DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK
95 }, {
96 	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE,
97 	.vblank = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK,
98 	.vline = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK,
99 	.hpd = DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK
100 }, {
101 	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE2,
102 	.vblank = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK,
103 	.vline = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK,
104 	.hpd = DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK
105 }, {
106 	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE3,
107 	.vblank = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK,
108 	.vline = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK,
109 	.hpd = DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK
110 }, {
111 	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE4,
112 	.vblank = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK,
113 	.vline = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK,
114 	.hpd = DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK
115 }, {
116 	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE5,
117 	.vblank = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK,
118 	.vline = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK,
119 	.hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK
120 } };
121 
122 static const u32 golden_settings_tonga_a11[] =
123 {
124 	mmDCI_CLK_CNTL, 0x00000080, 0x00000000,
125 	mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070,
126 	mmFBC_MISC, 0x1f311fff, 0x12300000,
127 	mmHDMI_CONTROL, 0x31000111, 0x00000011,
128 };
129 
130 static const u32 tonga_mgcg_cgcg_init[] =
131 {
132 	mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100,
133 	mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000,
134 };
135 
136 static const u32 golden_settings_fiji_a10[] =
137 {
138 	mmDCI_CLK_CNTL, 0x00000080, 0x00000000,
139 	mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070,
140 	mmFBC_MISC, 0x1f311fff, 0x12300000,
141 	mmHDMI_CONTROL, 0x31000111, 0x00000011,
142 };
143 
144 static const u32 fiji_mgcg_cgcg_init[] =
145 {
146 	mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100,
147 	mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000,
148 };
149 
150 static void dce_v10_0_init_golden_registers(struct amdgpu_device *adev)
151 {
152 	switch (adev->asic_type) {
153 	case CHIP_FIJI:
154 		amdgpu_device_program_register_sequence(adev,
155 							fiji_mgcg_cgcg_init,
156 							ARRAY_SIZE(fiji_mgcg_cgcg_init));
157 		amdgpu_device_program_register_sequence(adev,
158 							golden_settings_fiji_a10,
159 							ARRAY_SIZE(golden_settings_fiji_a10));
160 		break;
161 	case CHIP_TONGA:
162 		amdgpu_device_program_register_sequence(adev,
163 							tonga_mgcg_cgcg_init,
164 							ARRAY_SIZE(tonga_mgcg_cgcg_init));
165 		amdgpu_device_program_register_sequence(adev,
166 							golden_settings_tonga_a11,
167 							ARRAY_SIZE(golden_settings_tonga_a11));
168 		break;
169 	default:
170 		break;
171 	}
172 }
173 
174 static u32 dce_v10_0_audio_endpt_rreg(struct amdgpu_device *adev,
175 				     u32 block_offset, u32 reg)
176 {
177 	unsigned long flags;
178 	u32 r;
179 
180 	spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
181 	WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
182 	r = RREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset);
183 	spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
184 
185 	return r;
186 }
187 
188 static void dce_v10_0_audio_endpt_wreg(struct amdgpu_device *adev,
189 				      u32 block_offset, u32 reg, u32 v)
190 {
191 	unsigned long flags;
192 
193 	spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
194 	WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
195 	WREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset, v);
196 	spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
197 }
198 
199 static u32 dce_v10_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
200 {
201 	if (crtc >= adev->mode_info.num_crtc)
202 		return 0;
203 	else
204 		return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
205 }
206 
207 static void dce_v10_0_pageflip_interrupt_init(struct amdgpu_device *adev)
208 {
209 	unsigned i;
210 
211 	/* Enable pflip interrupts */
212 	for (i = 0; i < adev->mode_info.num_crtc; i++)
213 		amdgpu_irq_get(adev, &adev->pageflip_irq, i);
214 }
215 
216 static void dce_v10_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
217 {
218 	unsigned i;
219 
220 	/* Disable pflip interrupts */
221 	for (i = 0; i < adev->mode_info.num_crtc; i++)
222 		amdgpu_irq_put(adev, &adev->pageflip_irq, i);
223 }
224 
225 /**
226  * dce_v10_0_page_flip - pageflip callback.
227  *
228  * @adev: amdgpu_device pointer
229  * @crtc_id: crtc to cleanup pageflip on
230  * @crtc_base: new address of the crtc (GPU MC address)
231  *
232  * Triggers the actual pageflip by updating the primary
233  * surface base address.
234  */
235 static void dce_v10_0_page_flip(struct amdgpu_device *adev,
236 				int crtc_id, u64 crtc_base, bool async)
237 {
238 	struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
239 	u32 tmp;
240 
241 	/* flip at hsync for async, default is vsync */
242 	tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset);
243 	tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL,
244 			    GRPH_SURFACE_UPDATE_H_RETRACE_EN, async ? 1 : 0);
245 	WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
246 	/* update the primary scanout address */
247 	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
248 	       upper_32_bits(crtc_base));
249 	/* writing to the low address triggers the update */
250 	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
251 	       lower_32_bits(crtc_base));
252 	/* post the write */
253 	RREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset);
254 }
255 
256 static int dce_v10_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
257 					u32 *vbl, u32 *position)
258 {
259 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
260 		return -EINVAL;
261 
262 	*vbl = RREG32(mmCRTC_V_BLANK_START_END + crtc_offsets[crtc]);
263 	*position = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
264 
265 	return 0;
266 }
267 
268 /**
269  * dce_v10_0_hpd_sense - hpd sense callback.
270  *
271  * @adev: amdgpu_device pointer
272  * @hpd: hpd (hotplug detect) pin
273  *
274  * Checks if a digital monitor is connected (evergreen+).
275  * Returns true if connected, false if not connected.
276  */
277 static bool dce_v10_0_hpd_sense(struct amdgpu_device *adev,
278 			       enum amdgpu_hpd_id hpd)
279 {
280 	bool connected = false;
281 
282 	if (hpd >= adev->mode_info.num_hpd)
283 		return connected;
284 
285 	if (RREG32(mmDC_HPD_INT_STATUS + hpd_offsets[hpd]) &
286 	    DC_HPD_INT_STATUS__DC_HPD_SENSE_MASK)
287 		connected = true;
288 
289 	return connected;
290 }
291 
292 /**
293  * dce_v10_0_hpd_set_polarity - hpd set polarity callback.
294  *
295  * @adev: amdgpu_device pointer
296  * @hpd: hpd (hotplug detect) pin
297  *
298  * Set the polarity of the hpd pin (evergreen+).
299  */
300 static void dce_v10_0_hpd_set_polarity(struct amdgpu_device *adev,
301 				      enum amdgpu_hpd_id hpd)
302 {
303 	u32 tmp;
304 	bool connected = dce_v10_0_hpd_sense(adev, hpd);
305 
306 	if (hpd >= adev->mode_info.num_hpd)
307 		return;
308 
309 	tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]);
310 	if (connected)
311 		tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 0);
312 	else
313 		tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 1);
314 	WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp);
315 }
316 
317 /**
318  * dce_v10_0_hpd_init - hpd setup callback.
319  *
320  * @adev: amdgpu_device pointer
321  *
322  * Setup the hpd pins used by the card (evergreen+).
323  * Enable the pin, set the polarity, and enable the hpd interrupts.
324  */
325 static void dce_v10_0_hpd_init(struct amdgpu_device *adev)
326 {
327 	struct drm_device *dev = adev->ddev;
328 	struct drm_connector *connector;
329 	u32 tmp;
330 
331 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
332 		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
333 
334 		if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
335 			continue;
336 
337 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
338 		    connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
339 			/* don't try to enable hpd on eDP or LVDS avoid breaking the
340 			 * aux dp channel on imac and help (but not completely fix)
341 			 * https://bugzilla.redhat.com/show_bug.cgi?id=726143
342 			 * also avoid interrupt storms during dpms.
343 			 */
344 			tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
345 			tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 0);
346 			WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
347 			continue;
348 		}
349 
350 		tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
351 		tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 1);
352 		WREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
353 
354 		tmp = RREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[amdgpu_connector->hpd.hpd]);
355 		tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL,
356 				    DC_HPD_CONNECT_INT_DELAY,
357 				    AMDGPU_HPD_CONNECT_INT_DELAY_IN_MS);
358 		tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL,
359 				    DC_HPD_DISCONNECT_INT_DELAY,
360 				    AMDGPU_HPD_DISCONNECT_INT_DELAY_IN_MS);
361 		WREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
362 
363 		dce_v10_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
364 		amdgpu_irq_get(adev, &adev->hpd_irq,
365 			       amdgpu_connector->hpd.hpd);
366 	}
367 }
368 
369 /**
370  * dce_v10_0_hpd_fini - hpd tear down callback.
371  *
372  * @adev: amdgpu_device pointer
373  *
374  * Tear down the hpd pins used by the card (evergreen+).
375  * Disable the hpd interrupts.
376  */
377 static void dce_v10_0_hpd_fini(struct amdgpu_device *adev)
378 {
379 	struct drm_device *dev = adev->ddev;
380 	struct drm_connector *connector;
381 	u32 tmp;
382 
383 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
384 		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
385 
386 		if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
387 			continue;
388 
389 		tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
390 		tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 0);
391 		WREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
392 
393 		amdgpu_irq_put(adev, &adev->hpd_irq,
394 			       amdgpu_connector->hpd.hpd);
395 	}
396 }
397 
398 static u32 dce_v10_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
399 {
400 	return mmDC_GPIO_HPD_A;
401 }
402 
403 static bool dce_v10_0_is_display_hung(struct amdgpu_device *adev)
404 {
405 	u32 crtc_hung = 0;
406 	u32 crtc_status[6];
407 	u32 i, j, tmp;
408 
409 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
410 		tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
411 		if (REG_GET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN)) {
412 			crtc_status[i] = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
413 			crtc_hung |= (1 << i);
414 		}
415 	}
416 
417 	for (j = 0; j < 10; j++) {
418 		for (i = 0; i < adev->mode_info.num_crtc; i++) {
419 			if (crtc_hung & (1 << i)) {
420 				tmp = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
421 				if (tmp != crtc_status[i])
422 					crtc_hung &= ~(1 << i);
423 			}
424 		}
425 		if (crtc_hung == 0)
426 			return false;
427 		udelay(100);
428 	}
429 
430 	return true;
431 }
432 
433 static void dce_v10_0_set_vga_render_state(struct amdgpu_device *adev,
434 					   bool render)
435 {
436 	u32 tmp;
437 
438 	/* Lockout access through VGA aperture*/
439 	tmp = RREG32(mmVGA_HDP_CONTROL);
440 	if (render)
441 		tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 0);
442 	else
443 		tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
444 	WREG32(mmVGA_HDP_CONTROL, tmp);
445 
446 	/* disable VGA render */
447 	tmp = RREG32(mmVGA_RENDER_CONTROL);
448 	if (render)
449 		tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 1);
450 	else
451 		tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
452 	WREG32(mmVGA_RENDER_CONTROL, tmp);
453 }
454 
455 static int dce_v10_0_get_num_crtc(struct amdgpu_device *adev)
456 {
457 	int num_crtc = 0;
458 
459 	switch (adev->asic_type) {
460 	case CHIP_FIJI:
461 	case CHIP_TONGA:
462 		num_crtc = 6;
463 		break;
464 	default:
465 		num_crtc = 0;
466 	}
467 	return num_crtc;
468 }
469 
470 void dce_v10_0_disable_dce(struct amdgpu_device *adev)
471 {
472 	/*Disable VGA render and enabled crtc, if has DCE engine*/
473 	if (amdgpu_atombios_has_dce_engine_info(adev)) {
474 		u32 tmp;
475 		int crtc_enabled, i;
476 
477 		dce_v10_0_set_vga_render_state(adev, false);
478 
479 		/*Disable crtc*/
480 		for (i = 0; i < dce_v10_0_get_num_crtc(adev); i++) {
481 			crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]),
482 									 CRTC_CONTROL, CRTC_MASTER_EN);
483 			if (crtc_enabled) {
484 				WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
485 				tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
486 				tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0);
487 				WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
488 				WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
489 			}
490 		}
491 	}
492 }
493 
494 static void dce_v10_0_program_fmt(struct drm_encoder *encoder)
495 {
496 	struct drm_device *dev = encoder->dev;
497 	struct amdgpu_device *adev = dev->dev_private;
498 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
499 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
500 	struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
501 	int bpc = 0;
502 	u32 tmp = 0;
503 	enum amdgpu_connector_dither dither = AMDGPU_FMT_DITHER_DISABLE;
504 
505 	if (connector) {
506 		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
507 		bpc = amdgpu_connector_get_monitor_bpc(connector);
508 		dither = amdgpu_connector->dither;
509 	}
510 
511 	/* LVDS/eDP FMT is set up by atom */
512 	if (amdgpu_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
513 		return;
514 
515 	/* not needed for analog */
516 	if ((amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) ||
517 	    (amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2))
518 		return;
519 
520 	if (bpc == 0)
521 		return;
522 
523 	switch (bpc) {
524 	case 6:
525 		if (dither == AMDGPU_FMT_DITHER_ENABLE) {
526 			/* XXX sort out optimal dither settings */
527 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, 1);
528 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, 1);
529 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, 1);
530 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, 0);
531 		} else {
532 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1);
533 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, 0);
534 		}
535 		break;
536 	case 8:
537 		if (dither == AMDGPU_FMT_DITHER_ENABLE) {
538 			/* XXX sort out optimal dither settings */
539 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, 1);
540 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, 1);
541 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_RGB_RANDOM_ENABLE, 1);
542 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, 1);
543 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, 1);
544 		} else {
545 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1);
546 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, 1);
547 		}
548 		break;
549 	case 10:
550 		if (dither == AMDGPU_FMT_DITHER_ENABLE) {
551 			/* XXX sort out optimal dither settings */
552 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, 1);
553 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, 1);
554 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_RGB_RANDOM_ENABLE, 1);
555 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, 1);
556 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, 2);
557 		} else {
558 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1);
559 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, 2);
560 		}
561 		break;
562 	default:
563 		/* not needed */
564 		break;
565 	}
566 
567 	WREG32(mmFMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
568 }
569 
570 
571 /* display watermark setup */
572 /**
573  * dce_v10_0_line_buffer_adjust - Set up the line buffer
574  *
575  * @adev: amdgpu_device pointer
576  * @amdgpu_crtc: the selected display controller
577  * @mode: the current display mode on the selected display
578  * controller
579  *
580  * Setup up the line buffer allocation for
581  * the selected display controller (CIK).
582  * Returns the line buffer size in pixels.
583  */
584 static u32 dce_v10_0_line_buffer_adjust(struct amdgpu_device *adev,
585 				       struct amdgpu_crtc *amdgpu_crtc,
586 				       struct drm_display_mode *mode)
587 {
588 	u32 tmp, buffer_alloc, i, mem_cfg;
589 	u32 pipe_offset = amdgpu_crtc->crtc_id;
590 	/*
591 	 * Line Buffer Setup
592 	 * There are 6 line buffers, one for each display controllers.
593 	 * There are 3 partitions per LB. Select the number of partitions
594 	 * to enable based on the display width.  For display widths larger
595 	 * than 4096, you need use to use 2 display controllers and combine
596 	 * them using the stereo blender.
597 	 */
598 	if (amdgpu_crtc->base.enabled && mode) {
599 		if (mode->crtc_hdisplay < 1920) {
600 			mem_cfg = 1;
601 			buffer_alloc = 2;
602 		} else if (mode->crtc_hdisplay < 2560) {
603 			mem_cfg = 2;
604 			buffer_alloc = 2;
605 		} else if (mode->crtc_hdisplay < 4096) {
606 			mem_cfg = 0;
607 			buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
608 		} else {
609 			DRM_DEBUG_KMS("Mode too big for LB!\n");
610 			mem_cfg = 0;
611 			buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
612 		}
613 	} else {
614 		mem_cfg = 1;
615 		buffer_alloc = 0;
616 	}
617 
618 	tmp = RREG32(mmLB_MEMORY_CTRL + amdgpu_crtc->crtc_offset);
619 	tmp = REG_SET_FIELD(tmp, LB_MEMORY_CTRL, LB_MEMORY_CONFIG, mem_cfg);
620 	WREG32(mmLB_MEMORY_CTRL + amdgpu_crtc->crtc_offset, tmp);
621 
622 	tmp = RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset);
623 	tmp = REG_SET_FIELD(tmp, PIPE0_DMIF_BUFFER_CONTROL, DMIF_BUFFERS_ALLOCATED, buffer_alloc);
624 	WREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset, tmp);
625 
626 	for (i = 0; i < adev->usec_timeout; i++) {
627 		tmp = RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset);
628 		if (REG_GET_FIELD(tmp, PIPE0_DMIF_BUFFER_CONTROL, DMIF_BUFFERS_ALLOCATION_COMPLETED))
629 			break;
630 		udelay(1);
631 	}
632 
633 	if (amdgpu_crtc->base.enabled && mode) {
634 		switch (mem_cfg) {
635 		case 0:
636 		default:
637 			return 4096 * 2;
638 		case 1:
639 			return 1920 * 2;
640 		case 2:
641 			return 2560 * 2;
642 		}
643 	}
644 
645 	/* controller not enabled, so no lb used */
646 	return 0;
647 }
648 
649 /**
650  * cik_get_number_of_dram_channels - get the number of dram channels
651  *
652  * @adev: amdgpu_device pointer
653  *
654  * Look up the number of video ram channels (CIK).
655  * Used for display watermark bandwidth calculations
656  * Returns the number of dram channels
657  */
658 static u32 cik_get_number_of_dram_channels(struct amdgpu_device *adev)
659 {
660 	u32 tmp = RREG32(mmMC_SHARED_CHMAP);
661 
662 	switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) {
663 	case 0:
664 	default:
665 		return 1;
666 	case 1:
667 		return 2;
668 	case 2:
669 		return 4;
670 	case 3:
671 		return 8;
672 	case 4:
673 		return 3;
674 	case 5:
675 		return 6;
676 	case 6:
677 		return 10;
678 	case 7:
679 		return 12;
680 	case 8:
681 		return 16;
682 	}
683 }
684 
685 struct dce10_wm_params {
686 	u32 dram_channels; /* number of dram channels */
687 	u32 yclk;          /* bandwidth per dram data pin in kHz */
688 	u32 sclk;          /* engine clock in kHz */
689 	u32 disp_clk;      /* display clock in kHz */
690 	u32 src_width;     /* viewport width */
691 	u32 active_time;   /* active display time in ns */
692 	u32 blank_time;    /* blank time in ns */
693 	bool interlaced;    /* mode is interlaced */
694 	fixed20_12 vsc;    /* vertical scale ratio */
695 	u32 num_heads;     /* number of active crtcs */
696 	u32 bytes_per_pixel; /* bytes per pixel display + overlay */
697 	u32 lb_size;       /* line buffer allocated to pipe */
698 	u32 vtaps;         /* vertical scaler taps */
699 };
700 
701 /**
702  * dce_v10_0_dram_bandwidth - get the dram bandwidth
703  *
704  * @wm: watermark calculation data
705  *
706  * Calculate the raw dram bandwidth (CIK).
707  * Used for display watermark bandwidth calculations
708  * Returns the dram bandwidth in MBytes/s
709  */
710 static u32 dce_v10_0_dram_bandwidth(struct dce10_wm_params *wm)
711 {
712 	/* Calculate raw DRAM Bandwidth */
713 	fixed20_12 dram_efficiency; /* 0.7 */
714 	fixed20_12 yclk, dram_channels, bandwidth;
715 	fixed20_12 a;
716 
717 	a.full = dfixed_const(1000);
718 	yclk.full = dfixed_const(wm->yclk);
719 	yclk.full = dfixed_div(yclk, a);
720 	dram_channels.full = dfixed_const(wm->dram_channels * 4);
721 	a.full = dfixed_const(10);
722 	dram_efficiency.full = dfixed_const(7);
723 	dram_efficiency.full = dfixed_div(dram_efficiency, a);
724 	bandwidth.full = dfixed_mul(dram_channels, yclk);
725 	bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
726 
727 	return dfixed_trunc(bandwidth);
728 }
729 
730 /**
731  * dce_v10_0_dram_bandwidth_for_display - get the dram bandwidth for display
732  *
733  * @wm: watermark calculation data
734  *
735  * Calculate the dram bandwidth used for display (CIK).
736  * Used for display watermark bandwidth calculations
737  * Returns the dram bandwidth for display in MBytes/s
738  */
739 static u32 dce_v10_0_dram_bandwidth_for_display(struct dce10_wm_params *wm)
740 {
741 	/* Calculate DRAM Bandwidth and the part allocated to display. */
742 	fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
743 	fixed20_12 yclk, dram_channels, bandwidth;
744 	fixed20_12 a;
745 
746 	a.full = dfixed_const(1000);
747 	yclk.full = dfixed_const(wm->yclk);
748 	yclk.full = dfixed_div(yclk, a);
749 	dram_channels.full = dfixed_const(wm->dram_channels * 4);
750 	a.full = dfixed_const(10);
751 	disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
752 	disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
753 	bandwidth.full = dfixed_mul(dram_channels, yclk);
754 	bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
755 
756 	return dfixed_trunc(bandwidth);
757 }
758 
759 /**
760  * dce_v10_0_data_return_bandwidth - get the data return bandwidth
761  *
762  * @wm: watermark calculation data
763  *
764  * Calculate the data return bandwidth used for display (CIK).
765  * Used for display watermark bandwidth calculations
766  * Returns the data return bandwidth in MBytes/s
767  */
768 static u32 dce_v10_0_data_return_bandwidth(struct dce10_wm_params *wm)
769 {
770 	/* Calculate the display Data return Bandwidth */
771 	fixed20_12 return_efficiency; /* 0.8 */
772 	fixed20_12 sclk, bandwidth;
773 	fixed20_12 a;
774 
775 	a.full = dfixed_const(1000);
776 	sclk.full = dfixed_const(wm->sclk);
777 	sclk.full = dfixed_div(sclk, a);
778 	a.full = dfixed_const(10);
779 	return_efficiency.full = dfixed_const(8);
780 	return_efficiency.full = dfixed_div(return_efficiency, a);
781 	a.full = dfixed_const(32);
782 	bandwidth.full = dfixed_mul(a, sclk);
783 	bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
784 
785 	return dfixed_trunc(bandwidth);
786 }
787 
788 /**
789  * dce_v10_0_dmif_request_bandwidth - get the dmif bandwidth
790  *
791  * @wm: watermark calculation data
792  *
793  * Calculate the dmif bandwidth used for display (CIK).
794  * Used for display watermark bandwidth calculations
795  * Returns the dmif bandwidth in MBytes/s
796  */
797 static u32 dce_v10_0_dmif_request_bandwidth(struct dce10_wm_params *wm)
798 {
799 	/* Calculate the DMIF Request Bandwidth */
800 	fixed20_12 disp_clk_request_efficiency; /* 0.8 */
801 	fixed20_12 disp_clk, bandwidth;
802 	fixed20_12 a, b;
803 
804 	a.full = dfixed_const(1000);
805 	disp_clk.full = dfixed_const(wm->disp_clk);
806 	disp_clk.full = dfixed_div(disp_clk, a);
807 	a.full = dfixed_const(32);
808 	b.full = dfixed_mul(a, disp_clk);
809 
810 	a.full = dfixed_const(10);
811 	disp_clk_request_efficiency.full = dfixed_const(8);
812 	disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
813 
814 	bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency);
815 
816 	return dfixed_trunc(bandwidth);
817 }
818 
819 /**
820  * dce_v10_0_available_bandwidth - get the min available bandwidth
821  *
822  * @wm: watermark calculation data
823  *
824  * Calculate the min available bandwidth used for display (CIK).
825  * Used for display watermark bandwidth calculations
826  * Returns the min available bandwidth in MBytes/s
827  */
828 static u32 dce_v10_0_available_bandwidth(struct dce10_wm_params *wm)
829 {
830 	/* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
831 	u32 dram_bandwidth = dce_v10_0_dram_bandwidth(wm);
832 	u32 data_return_bandwidth = dce_v10_0_data_return_bandwidth(wm);
833 	u32 dmif_req_bandwidth = dce_v10_0_dmif_request_bandwidth(wm);
834 
835 	return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
836 }
837 
838 /**
839  * dce_v10_0_average_bandwidth - get the average available bandwidth
840  *
841  * @wm: watermark calculation data
842  *
843  * Calculate the average available bandwidth used for display (CIK).
844  * Used for display watermark bandwidth calculations
845  * Returns the average available bandwidth in MBytes/s
846  */
847 static u32 dce_v10_0_average_bandwidth(struct dce10_wm_params *wm)
848 {
849 	/* Calculate the display mode Average Bandwidth
850 	 * DisplayMode should contain the source and destination dimensions,
851 	 * timing, etc.
852 	 */
853 	fixed20_12 bpp;
854 	fixed20_12 line_time;
855 	fixed20_12 src_width;
856 	fixed20_12 bandwidth;
857 	fixed20_12 a;
858 
859 	a.full = dfixed_const(1000);
860 	line_time.full = dfixed_const(wm->active_time + wm->blank_time);
861 	line_time.full = dfixed_div(line_time, a);
862 	bpp.full = dfixed_const(wm->bytes_per_pixel);
863 	src_width.full = dfixed_const(wm->src_width);
864 	bandwidth.full = dfixed_mul(src_width, bpp);
865 	bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
866 	bandwidth.full = dfixed_div(bandwidth, line_time);
867 
868 	return dfixed_trunc(bandwidth);
869 }
870 
871 /**
872  * dce_v10_0_latency_watermark - get the latency watermark
873  *
874  * @wm: watermark calculation data
875  *
876  * Calculate the latency watermark (CIK).
877  * Used for display watermark bandwidth calculations
878  * Returns the latency watermark in ns
879  */
880 static u32 dce_v10_0_latency_watermark(struct dce10_wm_params *wm)
881 {
882 	/* First calculate the latency in ns */
883 	u32 mc_latency = 2000; /* 2000 ns. */
884 	u32 available_bandwidth = dce_v10_0_available_bandwidth(wm);
885 	u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
886 	u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
887 	u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
888 	u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
889 		(wm->num_heads * cursor_line_pair_return_time);
890 	u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
891 	u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
892 	u32 tmp, dmif_size = 12288;
893 	fixed20_12 a, b, c;
894 
895 	if (wm->num_heads == 0)
896 		return 0;
897 
898 	a.full = dfixed_const(2);
899 	b.full = dfixed_const(1);
900 	if ((wm->vsc.full > a.full) ||
901 	    ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
902 	    (wm->vtaps >= 5) ||
903 	    ((wm->vsc.full >= a.full) && wm->interlaced))
904 		max_src_lines_per_dst_line = 4;
905 	else
906 		max_src_lines_per_dst_line = 2;
907 
908 	a.full = dfixed_const(available_bandwidth);
909 	b.full = dfixed_const(wm->num_heads);
910 	a.full = dfixed_div(a, b);
911 	tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512);
912 	tmp = min(dfixed_trunc(a), tmp);
913 
914 	lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000);
915 
916 	a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
917 	b.full = dfixed_const(1000);
918 	c.full = dfixed_const(lb_fill_bw);
919 	b.full = dfixed_div(c, b);
920 	a.full = dfixed_div(a, b);
921 	line_fill_time = dfixed_trunc(a);
922 
923 	if (line_fill_time < wm->active_time)
924 		return latency;
925 	else
926 		return latency + (line_fill_time - wm->active_time);
927 
928 }
929 
930 /**
931  * dce_v10_0_average_bandwidth_vs_dram_bandwidth_for_display - check
932  * average and available dram bandwidth
933  *
934  * @wm: watermark calculation data
935  *
936  * Check if the display average bandwidth fits in the display
937  * dram bandwidth (CIK).
938  * Used for display watermark bandwidth calculations
939  * Returns true if the display fits, false if not.
940  */
941 static bool dce_v10_0_average_bandwidth_vs_dram_bandwidth_for_display(struct dce10_wm_params *wm)
942 {
943 	if (dce_v10_0_average_bandwidth(wm) <=
944 	    (dce_v10_0_dram_bandwidth_for_display(wm) / wm->num_heads))
945 		return true;
946 	else
947 		return false;
948 }
949 
950 /**
951  * dce_v10_0_average_bandwidth_vs_available_bandwidth - check
952  * average and available bandwidth
953  *
954  * @wm: watermark calculation data
955  *
956  * Check if the display average bandwidth fits in the display
957  * available bandwidth (CIK).
958  * Used for display watermark bandwidth calculations
959  * Returns true if the display fits, false if not.
960  */
961 static bool dce_v10_0_average_bandwidth_vs_available_bandwidth(struct dce10_wm_params *wm)
962 {
963 	if (dce_v10_0_average_bandwidth(wm) <=
964 	    (dce_v10_0_available_bandwidth(wm) / wm->num_heads))
965 		return true;
966 	else
967 		return false;
968 }
969 
970 /**
971  * dce_v10_0_check_latency_hiding - check latency hiding
972  *
973  * @wm: watermark calculation data
974  *
975  * Check latency hiding (CIK).
976  * Used for display watermark bandwidth calculations
977  * Returns true if the display fits, false if not.
978  */
979 static bool dce_v10_0_check_latency_hiding(struct dce10_wm_params *wm)
980 {
981 	u32 lb_partitions = wm->lb_size / wm->src_width;
982 	u32 line_time = wm->active_time + wm->blank_time;
983 	u32 latency_tolerant_lines;
984 	u32 latency_hiding;
985 	fixed20_12 a;
986 
987 	a.full = dfixed_const(1);
988 	if (wm->vsc.full > a.full)
989 		latency_tolerant_lines = 1;
990 	else {
991 		if (lb_partitions <= (wm->vtaps + 1))
992 			latency_tolerant_lines = 1;
993 		else
994 			latency_tolerant_lines = 2;
995 	}
996 
997 	latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
998 
999 	if (dce_v10_0_latency_watermark(wm) <= latency_hiding)
1000 		return true;
1001 	else
1002 		return false;
1003 }
1004 
1005 /**
1006  * dce_v10_0_program_watermarks - program display watermarks
1007  *
1008  * @adev: amdgpu_device pointer
1009  * @amdgpu_crtc: the selected display controller
1010  * @lb_size: line buffer size
1011  * @num_heads: number of display controllers in use
1012  *
1013  * Calculate and program the display watermarks for the
1014  * selected display controller (CIK).
1015  */
1016 static void dce_v10_0_program_watermarks(struct amdgpu_device *adev,
1017 					struct amdgpu_crtc *amdgpu_crtc,
1018 					u32 lb_size, u32 num_heads)
1019 {
1020 	struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
1021 	struct dce10_wm_params wm_low, wm_high;
1022 	u32 active_time;
1023 	u32 line_time = 0;
1024 	u32 latency_watermark_a = 0, latency_watermark_b = 0;
1025 	u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
1026 
1027 	if (amdgpu_crtc->base.enabled && num_heads && mode) {
1028 		active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
1029 					    (u32)mode->clock);
1030 		line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
1031 					  (u32)mode->clock);
1032 		line_time = min(line_time, (u32)65535);
1033 
1034 		/* watermark for high clocks */
1035 		if (adev->pm.dpm_enabled) {
1036 			wm_high.yclk =
1037 				amdgpu_dpm_get_mclk(adev, false) * 10;
1038 			wm_high.sclk =
1039 				amdgpu_dpm_get_sclk(adev, false) * 10;
1040 		} else {
1041 			wm_high.yclk = adev->pm.current_mclk * 10;
1042 			wm_high.sclk = adev->pm.current_sclk * 10;
1043 		}
1044 
1045 		wm_high.disp_clk = mode->clock;
1046 		wm_high.src_width = mode->crtc_hdisplay;
1047 		wm_high.active_time = active_time;
1048 		wm_high.blank_time = line_time - wm_high.active_time;
1049 		wm_high.interlaced = false;
1050 		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1051 			wm_high.interlaced = true;
1052 		wm_high.vsc = amdgpu_crtc->vsc;
1053 		wm_high.vtaps = 1;
1054 		if (amdgpu_crtc->rmx_type != RMX_OFF)
1055 			wm_high.vtaps = 2;
1056 		wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
1057 		wm_high.lb_size = lb_size;
1058 		wm_high.dram_channels = cik_get_number_of_dram_channels(adev);
1059 		wm_high.num_heads = num_heads;
1060 
1061 		/* set for high clocks */
1062 		latency_watermark_a = min(dce_v10_0_latency_watermark(&wm_high), (u32)65535);
1063 
1064 		/* possibly force display priority to high */
1065 		/* should really do this at mode validation time... */
1066 		if (!dce_v10_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
1067 		    !dce_v10_0_average_bandwidth_vs_available_bandwidth(&wm_high) ||
1068 		    !dce_v10_0_check_latency_hiding(&wm_high) ||
1069 		    (adev->mode_info.disp_priority == 2)) {
1070 			DRM_DEBUG_KMS("force priority to high\n");
1071 		}
1072 
1073 		/* watermark for low clocks */
1074 		if (adev->pm.dpm_enabled) {
1075 			wm_low.yclk =
1076 				amdgpu_dpm_get_mclk(adev, true) * 10;
1077 			wm_low.sclk =
1078 				amdgpu_dpm_get_sclk(adev, true) * 10;
1079 		} else {
1080 			wm_low.yclk = adev->pm.current_mclk * 10;
1081 			wm_low.sclk = adev->pm.current_sclk * 10;
1082 		}
1083 
1084 		wm_low.disp_clk = mode->clock;
1085 		wm_low.src_width = mode->crtc_hdisplay;
1086 		wm_low.active_time = active_time;
1087 		wm_low.blank_time = line_time - wm_low.active_time;
1088 		wm_low.interlaced = false;
1089 		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1090 			wm_low.interlaced = true;
1091 		wm_low.vsc = amdgpu_crtc->vsc;
1092 		wm_low.vtaps = 1;
1093 		if (amdgpu_crtc->rmx_type != RMX_OFF)
1094 			wm_low.vtaps = 2;
1095 		wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
1096 		wm_low.lb_size = lb_size;
1097 		wm_low.dram_channels = cik_get_number_of_dram_channels(adev);
1098 		wm_low.num_heads = num_heads;
1099 
1100 		/* set for low clocks */
1101 		latency_watermark_b = min(dce_v10_0_latency_watermark(&wm_low), (u32)65535);
1102 
1103 		/* possibly force display priority to high */
1104 		/* should really do this at mode validation time... */
1105 		if (!dce_v10_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
1106 		    !dce_v10_0_average_bandwidth_vs_available_bandwidth(&wm_low) ||
1107 		    !dce_v10_0_check_latency_hiding(&wm_low) ||
1108 		    (adev->mode_info.disp_priority == 2)) {
1109 			DRM_DEBUG_KMS("force priority to high\n");
1110 		}
1111 		lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
1112 	}
1113 
1114 	/* select wm A */
1115 	wm_mask = RREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset);
1116 	tmp = REG_SET_FIELD(wm_mask, DPG_WATERMARK_MASK_CONTROL, URGENCY_WATERMARK_MASK, 1);
1117 	WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp);
1118 	tmp = RREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset);
1119 	tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, latency_watermark_a);
1120 	tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_HIGH_WATERMARK, line_time);
1121 	WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset, tmp);
1122 	/* select wm B */
1123 	tmp = REG_SET_FIELD(wm_mask, DPG_WATERMARK_MASK_CONTROL, URGENCY_WATERMARK_MASK, 2);
1124 	WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp);
1125 	tmp = RREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset);
1126 	tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, latency_watermark_b);
1127 	tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_HIGH_WATERMARK, line_time);
1128 	WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset, tmp);
1129 	/* restore original selection */
1130 	WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, wm_mask);
1131 
1132 	/* save values for DPM */
1133 	amdgpu_crtc->line_time = line_time;
1134 	amdgpu_crtc->wm_high = latency_watermark_a;
1135 	amdgpu_crtc->wm_low = latency_watermark_b;
1136 	/* Save number of lines the linebuffer leads before the scanout */
1137 	amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
1138 }
1139 
1140 /**
1141  * dce_v10_0_bandwidth_update - program display watermarks
1142  *
1143  * @adev: amdgpu_device pointer
1144  *
1145  * Calculate and program the display watermarks and line
1146  * buffer allocation (CIK).
1147  */
1148 static void dce_v10_0_bandwidth_update(struct amdgpu_device *adev)
1149 {
1150 	struct drm_display_mode *mode = NULL;
1151 	u32 num_heads = 0, lb_size;
1152 	int i;
1153 
1154 	amdgpu_display_update_priority(adev);
1155 
1156 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
1157 		if (adev->mode_info.crtcs[i]->base.enabled)
1158 			num_heads++;
1159 	}
1160 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
1161 		mode = &adev->mode_info.crtcs[i]->base.mode;
1162 		lb_size = dce_v10_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i], mode);
1163 		dce_v10_0_program_watermarks(adev, adev->mode_info.crtcs[i],
1164 					    lb_size, num_heads);
1165 	}
1166 }
1167 
1168 static void dce_v10_0_audio_get_connected_pins(struct amdgpu_device *adev)
1169 {
1170 	int i;
1171 	u32 offset, tmp;
1172 
1173 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1174 		offset = adev->mode_info.audio.pin[i].offset;
1175 		tmp = RREG32_AUDIO_ENDPT(offset,
1176 					 ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT);
1177 		if (((tmp &
1178 		AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK) >>
1179 		AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT) == 1)
1180 			adev->mode_info.audio.pin[i].connected = false;
1181 		else
1182 			adev->mode_info.audio.pin[i].connected = true;
1183 	}
1184 }
1185 
1186 static struct amdgpu_audio_pin *dce_v10_0_audio_get_pin(struct amdgpu_device *adev)
1187 {
1188 	int i;
1189 
1190 	dce_v10_0_audio_get_connected_pins(adev);
1191 
1192 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1193 		if (adev->mode_info.audio.pin[i].connected)
1194 			return &adev->mode_info.audio.pin[i];
1195 	}
1196 	DRM_ERROR("No connected audio pins found!\n");
1197 	return NULL;
1198 }
1199 
1200 static void dce_v10_0_afmt_audio_select_pin(struct drm_encoder *encoder)
1201 {
1202 	struct amdgpu_device *adev = encoder->dev->dev_private;
1203 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1204 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1205 	u32 tmp;
1206 
1207 	if (!dig || !dig->afmt || !dig->afmt->pin)
1208 		return;
1209 
1210 	tmp = RREG32(mmAFMT_AUDIO_SRC_CONTROL + dig->afmt->offset);
1211 	tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_SRC_CONTROL, AFMT_AUDIO_SRC_SELECT, dig->afmt->pin->id);
1212 	WREG32(mmAFMT_AUDIO_SRC_CONTROL + dig->afmt->offset, tmp);
1213 }
1214 
1215 static void dce_v10_0_audio_write_latency_fields(struct drm_encoder *encoder,
1216 						struct drm_display_mode *mode)
1217 {
1218 	struct amdgpu_device *adev = encoder->dev->dev_private;
1219 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1220 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1221 	struct drm_connector *connector;
1222 	struct amdgpu_connector *amdgpu_connector = NULL;
1223 	u32 tmp;
1224 	int interlace = 0;
1225 
1226 	if (!dig || !dig->afmt || !dig->afmt->pin)
1227 		return;
1228 
1229 	list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
1230 		if (connector->encoder == encoder) {
1231 			amdgpu_connector = to_amdgpu_connector(connector);
1232 			break;
1233 		}
1234 	}
1235 
1236 	if (!amdgpu_connector) {
1237 		DRM_ERROR("Couldn't find encoder's connector\n");
1238 		return;
1239 	}
1240 
1241 	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1242 		interlace = 1;
1243 	if (connector->latency_present[interlace]) {
1244 		tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
1245 				    VIDEO_LIPSYNC, connector->video_latency[interlace]);
1246 		tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
1247 				    AUDIO_LIPSYNC, connector->audio_latency[interlace]);
1248 	} else {
1249 		tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
1250 				    VIDEO_LIPSYNC, 0);
1251 		tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
1252 				    AUDIO_LIPSYNC, 0);
1253 	}
1254 	WREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
1255 			   ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
1256 }
1257 
1258 static void dce_v10_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
1259 {
1260 	struct amdgpu_device *adev = encoder->dev->dev_private;
1261 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1262 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1263 	struct drm_connector *connector;
1264 	struct amdgpu_connector *amdgpu_connector = NULL;
1265 	u32 tmp;
1266 	u8 *sadb = NULL;
1267 	int sad_count;
1268 
1269 	if (!dig || !dig->afmt || !dig->afmt->pin)
1270 		return;
1271 
1272 	list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
1273 		if (connector->encoder == encoder) {
1274 			amdgpu_connector = to_amdgpu_connector(connector);
1275 			break;
1276 		}
1277 	}
1278 
1279 	if (!amdgpu_connector) {
1280 		DRM_ERROR("Couldn't find encoder's connector\n");
1281 		return;
1282 	}
1283 
1284 	sad_count = drm_edid_to_speaker_allocation(amdgpu_connector_edid(connector), &sadb);
1285 	if (sad_count < 0) {
1286 		DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
1287 		sad_count = 0;
1288 	}
1289 
1290 	/* program the speaker allocation */
1291 	tmp = RREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
1292 				 ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
1293 	tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1294 			    DP_CONNECTION, 0);
1295 	/* set HDMI mode */
1296 	tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1297 			    HDMI_CONNECTION, 1);
1298 	if (sad_count)
1299 		tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1300 				    SPEAKER_ALLOCATION, sadb[0]);
1301 	else
1302 		tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1303 				    SPEAKER_ALLOCATION, 5); /* stereo */
1304 	WREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
1305 			   ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
1306 
1307 	kfree(sadb);
1308 }
1309 
1310 static void dce_v10_0_audio_write_sad_regs(struct drm_encoder *encoder)
1311 {
1312 	struct amdgpu_device *adev = encoder->dev->dev_private;
1313 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1314 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1315 	struct drm_connector *connector;
1316 	struct amdgpu_connector *amdgpu_connector = NULL;
1317 	struct cea_sad *sads;
1318 	int i, sad_count;
1319 
1320 	static const u16 eld_reg_to_type[][2] = {
1321 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM },
1322 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 },
1323 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 },
1324 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 },
1325 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 },
1326 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC },
1327 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS },
1328 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC },
1329 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 },
1330 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD },
1331 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP },
1332 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
1333 	};
1334 
1335 	if (!dig || !dig->afmt || !dig->afmt->pin)
1336 		return;
1337 
1338 	list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
1339 		if (connector->encoder == encoder) {
1340 			amdgpu_connector = to_amdgpu_connector(connector);
1341 			break;
1342 		}
1343 	}
1344 
1345 	if (!amdgpu_connector) {
1346 		DRM_ERROR("Couldn't find encoder's connector\n");
1347 		return;
1348 	}
1349 
1350 	sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads);
1351 	if (sad_count <= 0) {
1352 		DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
1353 		return;
1354 	}
1355 	BUG_ON(!sads);
1356 
1357 	for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
1358 		u32 tmp = 0;
1359 		u8 stereo_freqs = 0;
1360 		int max_channels = -1;
1361 		int j;
1362 
1363 		for (j = 0; j < sad_count; j++) {
1364 			struct cea_sad *sad = &sads[j];
1365 
1366 			if (sad->format == eld_reg_to_type[i][1]) {
1367 				if (sad->channels > max_channels) {
1368 					tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
1369 							    MAX_CHANNELS, sad->channels);
1370 					tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
1371 							    DESCRIPTOR_BYTE_2, sad->byte2);
1372 					tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
1373 							    SUPPORTED_FREQUENCIES, sad->freq);
1374 					max_channels = sad->channels;
1375 				}
1376 
1377 				if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
1378 					stereo_freqs |= sad->freq;
1379 				else
1380 					break;
1381 			}
1382 		}
1383 
1384 		tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
1385 				    SUPPORTED_FREQUENCIES_STEREO, stereo_freqs);
1386 		WREG32_AUDIO_ENDPT(dig->afmt->pin->offset, eld_reg_to_type[i][0], tmp);
1387 	}
1388 
1389 	kfree(sads);
1390 }
1391 
1392 static void dce_v10_0_audio_enable(struct amdgpu_device *adev,
1393 				  struct amdgpu_audio_pin *pin,
1394 				  bool enable)
1395 {
1396 	if (!pin)
1397 		return;
1398 
1399 	WREG32_AUDIO_ENDPT(pin->offset, ixAZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
1400 			   enable ? AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK : 0);
1401 }
1402 
1403 static const u32 pin_offsets[] =
1404 {
1405 	AUD0_REGISTER_OFFSET,
1406 	AUD1_REGISTER_OFFSET,
1407 	AUD2_REGISTER_OFFSET,
1408 	AUD3_REGISTER_OFFSET,
1409 	AUD4_REGISTER_OFFSET,
1410 	AUD5_REGISTER_OFFSET,
1411 	AUD6_REGISTER_OFFSET,
1412 };
1413 
1414 static int dce_v10_0_audio_init(struct amdgpu_device *adev)
1415 {
1416 	int i;
1417 
1418 	if (!amdgpu_audio)
1419 		return 0;
1420 
1421 	adev->mode_info.audio.enabled = true;
1422 
1423 	adev->mode_info.audio.num_pins = 7;
1424 
1425 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1426 		adev->mode_info.audio.pin[i].channels = -1;
1427 		adev->mode_info.audio.pin[i].rate = -1;
1428 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
1429 		adev->mode_info.audio.pin[i].status_bits = 0;
1430 		adev->mode_info.audio.pin[i].category_code = 0;
1431 		adev->mode_info.audio.pin[i].connected = false;
1432 		adev->mode_info.audio.pin[i].offset = pin_offsets[i];
1433 		adev->mode_info.audio.pin[i].id = i;
1434 		/* disable audio.  it will be set up later */
1435 		/* XXX remove once we switch to ip funcs */
1436 		dce_v10_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
1437 	}
1438 
1439 	return 0;
1440 }
1441 
1442 static void dce_v10_0_audio_fini(struct amdgpu_device *adev)
1443 {
1444 	int i;
1445 
1446 	if (!amdgpu_audio)
1447 		return;
1448 
1449 	if (!adev->mode_info.audio.enabled)
1450 		return;
1451 
1452 	for (i = 0; i < adev->mode_info.audio.num_pins; i++)
1453 		dce_v10_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
1454 
1455 	adev->mode_info.audio.enabled = false;
1456 }
1457 
1458 /*
1459  * update the N and CTS parameters for a given pixel clock rate
1460  */
1461 static void dce_v10_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock)
1462 {
1463 	struct drm_device *dev = encoder->dev;
1464 	struct amdgpu_device *adev = dev->dev_private;
1465 	struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock);
1466 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1467 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1468 	u32 tmp;
1469 
1470 	tmp = RREG32(mmHDMI_ACR_32_0 + dig->afmt->offset);
1471 	tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_0, HDMI_ACR_CTS_32, acr.cts_32khz);
1472 	WREG32(mmHDMI_ACR_32_0 + dig->afmt->offset, tmp);
1473 	tmp = RREG32(mmHDMI_ACR_32_1 + dig->afmt->offset);
1474 	tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_1, HDMI_ACR_N_32, acr.n_32khz);
1475 	WREG32(mmHDMI_ACR_32_1 + dig->afmt->offset, tmp);
1476 
1477 	tmp = RREG32(mmHDMI_ACR_44_0 + dig->afmt->offset);
1478 	tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_0, HDMI_ACR_CTS_44, acr.cts_44_1khz);
1479 	WREG32(mmHDMI_ACR_44_0 + dig->afmt->offset, tmp);
1480 	tmp = RREG32(mmHDMI_ACR_44_1 + dig->afmt->offset);
1481 	tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_1, HDMI_ACR_N_44, acr.n_44_1khz);
1482 	WREG32(mmHDMI_ACR_44_1 + dig->afmt->offset, tmp);
1483 
1484 	tmp = RREG32(mmHDMI_ACR_48_0 + dig->afmt->offset);
1485 	tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_0, HDMI_ACR_CTS_48, acr.cts_48khz);
1486 	WREG32(mmHDMI_ACR_48_0 + dig->afmt->offset, tmp);
1487 	tmp = RREG32(mmHDMI_ACR_48_1 + dig->afmt->offset);
1488 	tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_1, HDMI_ACR_N_48, acr.n_48khz);
1489 	WREG32(mmHDMI_ACR_48_1 + dig->afmt->offset, tmp);
1490 
1491 }
1492 
1493 /*
1494  * build a HDMI Video Info Frame
1495  */
1496 static void dce_v10_0_afmt_update_avi_infoframe(struct drm_encoder *encoder,
1497 					       void *buffer, size_t size)
1498 {
1499 	struct drm_device *dev = encoder->dev;
1500 	struct amdgpu_device *adev = dev->dev_private;
1501 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1502 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1503 	uint8_t *frame = buffer + 3;
1504 	uint8_t *header = buffer;
1505 
1506 	WREG32(mmAFMT_AVI_INFO0 + dig->afmt->offset,
1507 		frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
1508 	WREG32(mmAFMT_AVI_INFO1 + dig->afmt->offset,
1509 		frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x7] << 24));
1510 	WREG32(mmAFMT_AVI_INFO2 + dig->afmt->offset,
1511 		frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24));
1512 	WREG32(mmAFMT_AVI_INFO3 + dig->afmt->offset,
1513 		frame[0xC] | (frame[0xD] << 8) | (header[1] << 24));
1514 }
1515 
1516 static void dce_v10_0_audio_set_dto(struct drm_encoder *encoder, u32 clock)
1517 {
1518 	struct drm_device *dev = encoder->dev;
1519 	struct amdgpu_device *adev = dev->dev_private;
1520 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1521 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1522 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
1523 	u32 dto_phase = 24 * 1000;
1524 	u32 dto_modulo = clock;
1525 	u32 tmp;
1526 
1527 	if (!dig || !dig->afmt)
1528 		return;
1529 
1530 	/* XXX two dtos; generally use dto0 for hdmi */
1531 	/* Express [24MHz / target pixel clock] as an exact rational
1532 	 * number (coefficient of two integer numbers.  DCCG_AUDIO_DTOx_PHASE
1533 	 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
1534 	 */
1535 	tmp = RREG32(mmDCCG_AUDIO_DTO_SOURCE);
1536 	tmp = REG_SET_FIELD(tmp, DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL,
1537 			    amdgpu_crtc->crtc_id);
1538 	WREG32(mmDCCG_AUDIO_DTO_SOURCE, tmp);
1539 	WREG32(mmDCCG_AUDIO_DTO0_PHASE, dto_phase);
1540 	WREG32(mmDCCG_AUDIO_DTO0_MODULE, dto_modulo);
1541 }
1542 
1543 /*
1544  * update the info frames with the data from the current display mode
1545  */
1546 static void dce_v10_0_afmt_setmode(struct drm_encoder *encoder,
1547 				  struct drm_display_mode *mode)
1548 {
1549 	struct drm_device *dev = encoder->dev;
1550 	struct amdgpu_device *adev = dev->dev_private;
1551 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1552 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1553 	struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
1554 	u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
1555 	struct hdmi_avi_infoframe frame;
1556 	ssize_t err;
1557 	u32 tmp;
1558 	int bpc = 8;
1559 
1560 	if (!dig || !dig->afmt)
1561 		return;
1562 
1563 	/* Silent, r600_hdmi_enable will raise WARN for us */
1564 	if (!dig->afmt->enabled)
1565 		return;
1566 
1567 	/* hdmi deep color mode general control packets setup, if bpc > 8 */
1568 	if (encoder->crtc) {
1569 		struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
1570 		bpc = amdgpu_crtc->bpc;
1571 	}
1572 
1573 	/* disable audio prior to setting up hw */
1574 	dig->afmt->pin = dce_v10_0_audio_get_pin(adev);
1575 	dce_v10_0_audio_enable(adev, dig->afmt->pin, false);
1576 
1577 	dce_v10_0_audio_set_dto(encoder, mode->clock);
1578 
1579 	tmp = RREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset);
1580 	tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, 1);
1581 	WREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset, tmp); /* send null packets when required */
1582 
1583 	WREG32(mmAFMT_AUDIO_CRC_CONTROL + dig->afmt->offset, 0x1000);
1584 
1585 	tmp = RREG32(mmHDMI_CONTROL + dig->afmt->offset);
1586 	switch (bpc) {
1587 	case 0:
1588 	case 6:
1589 	case 8:
1590 	case 16:
1591 	default:
1592 		tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, 0);
1593 		tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 0);
1594 		DRM_DEBUG("%s: Disabling hdmi deep color for %d bpc.\n",
1595 			  connector->name, bpc);
1596 		break;
1597 	case 10:
1598 		tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, 1);
1599 		tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 1);
1600 		DRM_DEBUG("%s: Enabling hdmi deep color 30 for 10 bpc.\n",
1601 			  connector->name);
1602 		break;
1603 	case 12:
1604 		tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, 1);
1605 		tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 2);
1606 		DRM_DEBUG("%s: Enabling hdmi deep color 36 for 12 bpc.\n",
1607 			  connector->name);
1608 		break;
1609 	}
1610 	WREG32(mmHDMI_CONTROL + dig->afmt->offset, tmp);
1611 
1612 	tmp = RREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset);
1613 	tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, 1); /* send null packets when required */
1614 	tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_SEND, 1); /* send general control packets */
1615 	tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_CONT, 1); /* send general control packets every frame */
1616 	WREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset, tmp);
1617 
1618 	tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset);
1619 	/* enable audio info frames (frames won't be set until audio is enabled) */
1620 	tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, 1);
1621 	/* required for audio info values to be updated */
1622 	tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_CONT, 1);
1623 	WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
1624 
1625 	tmp = RREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset);
1626 	/* required for audio info values to be updated */
1627 	tmp = REG_SET_FIELD(tmp, AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, 1);
1628 	WREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
1629 
1630 	tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset);
1631 	/* anything other than 0 */
1632 	tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1, HDMI_AUDIO_INFO_LINE, 2);
1633 	WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp);
1634 
1635 	WREG32(mmHDMI_GC + dig->afmt->offset, 0); /* unset HDMI_GC_AVMUTE */
1636 
1637 	tmp = RREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset);
1638 	/* set the default audio delay */
1639 	tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_DELAY_EN, 1);
1640 	/* should be suffient for all audio modes and small enough for all hblanks */
1641 	tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_PACKETS_PER_LINE, 3);
1642 	WREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
1643 
1644 	tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
1645 	/* allow 60958 channel status fields to be updated */
1646 	tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, 1);
1647 	WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
1648 
1649 	tmp = RREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset);
1650 	if (bpc > 8)
1651 		/* clear SW CTS value */
1652 		tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE, 0);
1653 	else
1654 		/* select SW CTS value */
1655 		tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE, 1);
1656 	/* allow hw to sent ACR packets when required */
1657 	tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_AUTO_SEND, 1);
1658 	WREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset, tmp);
1659 
1660 	dce_v10_0_afmt_update_ACR(encoder, mode->clock);
1661 
1662 	tmp = RREG32(mmAFMT_60958_0 + dig->afmt->offset);
1663 	tmp = REG_SET_FIELD(tmp, AFMT_60958_0, AFMT_60958_CS_CHANNEL_NUMBER_L, 1);
1664 	WREG32(mmAFMT_60958_0 + dig->afmt->offset, tmp);
1665 
1666 	tmp = RREG32(mmAFMT_60958_1 + dig->afmt->offset);
1667 	tmp = REG_SET_FIELD(tmp, AFMT_60958_1, AFMT_60958_CS_CHANNEL_NUMBER_R, 2);
1668 	WREG32(mmAFMT_60958_1 + dig->afmt->offset, tmp);
1669 
1670 	tmp = RREG32(mmAFMT_60958_2 + dig->afmt->offset);
1671 	tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_2, 3);
1672 	tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_3, 4);
1673 	tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_4, 5);
1674 	tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_5, 6);
1675 	tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_6, 7);
1676 	tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_7, 8);
1677 	WREG32(mmAFMT_60958_2 + dig->afmt->offset, tmp);
1678 
1679 	dce_v10_0_audio_write_speaker_allocation(encoder);
1680 
1681 	WREG32(mmAFMT_AUDIO_PACKET_CONTROL2 + dig->afmt->offset,
1682 	       (0xff << AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE__SHIFT));
1683 
1684 	dce_v10_0_afmt_audio_select_pin(encoder);
1685 	dce_v10_0_audio_write_sad_regs(encoder);
1686 	dce_v10_0_audio_write_latency_fields(encoder, mode);
1687 
1688 	err = drm_hdmi_avi_infoframe_from_display_mode(&frame, connector, mode);
1689 	if (err < 0) {
1690 		DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
1691 		return;
1692 	}
1693 
1694 	err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
1695 	if (err < 0) {
1696 		DRM_ERROR("failed to pack AVI infoframe: %zd\n", err);
1697 		return;
1698 	}
1699 
1700 	dce_v10_0_afmt_update_avi_infoframe(encoder, buffer, sizeof(buffer));
1701 
1702 	tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset);
1703 	/* enable AVI info frames */
1704 	tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_SEND, 1);
1705 	/* required for audio info values to be updated */
1706 	tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_CONT, 1);
1707 	WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
1708 
1709 	tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset);
1710 	tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1, HDMI_AVI_INFO_LINE, 2);
1711 	WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp);
1712 
1713 	tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
1714 	/* send audio packets */
1715 	tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, 1);
1716 	WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
1717 
1718 	WREG32(mmAFMT_RAMP_CONTROL0 + dig->afmt->offset, 0x00FFFFFF);
1719 	WREG32(mmAFMT_RAMP_CONTROL1 + dig->afmt->offset, 0x007FFFFF);
1720 	WREG32(mmAFMT_RAMP_CONTROL2 + dig->afmt->offset, 0x00000001);
1721 	WREG32(mmAFMT_RAMP_CONTROL3 + dig->afmt->offset, 0x00000001);
1722 
1723 	/* enable audio after to setting up hw */
1724 	dce_v10_0_audio_enable(adev, dig->afmt->pin, true);
1725 }
1726 
1727 static void dce_v10_0_afmt_enable(struct drm_encoder *encoder, bool enable)
1728 {
1729 	struct drm_device *dev = encoder->dev;
1730 	struct amdgpu_device *adev = dev->dev_private;
1731 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1732 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1733 
1734 	if (!dig || !dig->afmt)
1735 		return;
1736 
1737 	/* Silent, r600_hdmi_enable will raise WARN for us */
1738 	if (enable && dig->afmt->enabled)
1739 		return;
1740 	if (!enable && !dig->afmt->enabled)
1741 		return;
1742 
1743 	if (!enable && dig->afmt->pin) {
1744 		dce_v10_0_audio_enable(adev, dig->afmt->pin, false);
1745 		dig->afmt->pin = NULL;
1746 	}
1747 
1748 	dig->afmt->enabled = enable;
1749 
1750 	DRM_DEBUG("%sabling AFMT interface @ 0x%04X for encoder 0x%x\n",
1751 		  enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id);
1752 }
1753 
1754 static int dce_v10_0_afmt_init(struct amdgpu_device *adev)
1755 {
1756 	int i;
1757 
1758 	for (i = 0; i < adev->mode_info.num_dig; i++)
1759 		adev->mode_info.afmt[i] = NULL;
1760 
1761 	/* DCE10 has audio blocks tied to DIG encoders */
1762 	for (i = 0; i < adev->mode_info.num_dig; i++) {
1763 		adev->mode_info.afmt[i] = kzalloc(sizeof(struct amdgpu_afmt), GFP_KERNEL);
1764 		if (adev->mode_info.afmt[i]) {
1765 			adev->mode_info.afmt[i]->offset = dig_offsets[i];
1766 			adev->mode_info.afmt[i]->id = i;
1767 		} else {
1768 			int j;
1769 			for (j = 0; j < i; j++) {
1770 				kfree(adev->mode_info.afmt[j]);
1771 				adev->mode_info.afmt[j] = NULL;
1772 			}
1773 			return -ENOMEM;
1774 		}
1775 	}
1776 	return 0;
1777 }
1778 
1779 static void dce_v10_0_afmt_fini(struct amdgpu_device *adev)
1780 {
1781 	int i;
1782 
1783 	for (i = 0; i < adev->mode_info.num_dig; i++) {
1784 		kfree(adev->mode_info.afmt[i]);
1785 		adev->mode_info.afmt[i] = NULL;
1786 	}
1787 }
1788 
1789 static const u32 vga_control_regs[6] =
1790 {
1791 	mmD1VGA_CONTROL,
1792 	mmD2VGA_CONTROL,
1793 	mmD3VGA_CONTROL,
1794 	mmD4VGA_CONTROL,
1795 	mmD5VGA_CONTROL,
1796 	mmD6VGA_CONTROL,
1797 };
1798 
1799 static void dce_v10_0_vga_enable(struct drm_crtc *crtc, bool enable)
1800 {
1801 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1802 	struct drm_device *dev = crtc->dev;
1803 	struct amdgpu_device *adev = dev->dev_private;
1804 	u32 vga_control;
1805 
1806 	vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1;
1807 	if (enable)
1808 		WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control | 1);
1809 	else
1810 		WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control);
1811 }
1812 
1813 static void dce_v10_0_grph_enable(struct drm_crtc *crtc, bool enable)
1814 {
1815 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1816 	struct drm_device *dev = crtc->dev;
1817 	struct amdgpu_device *adev = dev->dev_private;
1818 
1819 	if (enable)
1820 		WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 1);
1821 	else
1822 		WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 0);
1823 }
1824 
1825 static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
1826 				     struct drm_framebuffer *fb,
1827 				     int x, int y, int atomic)
1828 {
1829 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1830 	struct drm_device *dev = crtc->dev;
1831 	struct amdgpu_device *adev = dev->dev_private;
1832 	struct drm_framebuffer *target_fb;
1833 	struct drm_gem_object *obj;
1834 	struct amdgpu_bo *abo;
1835 	uint64_t fb_location, tiling_flags;
1836 	uint32_t fb_format, fb_pitch_pixels;
1837 	u32 fb_swap = REG_SET_FIELD(0, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, ENDIAN_NONE);
1838 	u32 pipe_config;
1839 	u32 tmp, viewport_w, viewport_h;
1840 	int r;
1841 	bool bypass_lut = false;
1842 	struct drm_format_name_buf format_name;
1843 
1844 	/* no fb bound */
1845 	if (!atomic && !crtc->primary->fb) {
1846 		DRM_DEBUG_KMS("No FB bound\n");
1847 		return 0;
1848 	}
1849 
1850 	if (atomic)
1851 		target_fb = fb;
1852 	else
1853 		target_fb = crtc->primary->fb;
1854 
1855 	/* If atomic, assume fb object is pinned & idle & fenced and
1856 	 * just update base pointers
1857 	 */
1858 	obj = target_fb->obj[0];
1859 	abo = gem_to_amdgpu_bo(obj);
1860 	r = amdgpu_bo_reserve(abo, false);
1861 	if (unlikely(r != 0))
1862 		return r;
1863 
1864 	if (!atomic) {
1865 		r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM);
1866 		if (unlikely(r != 0)) {
1867 			amdgpu_bo_unreserve(abo);
1868 			return -EINVAL;
1869 		}
1870 	}
1871 	fb_location = amdgpu_bo_gpu_offset(abo);
1872 
1873 	amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
1874 	amdgpu_bo_unreserve(abo);
1875 
1876 	pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
1877 
1878 	switch (target_fb->format->format) {
1879 	case DRM_FORMAT_C8:
1880 		fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 0);
1881 		fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0);
1882 		break;
1883 	case DRM_FORMAT_XRGB4444:
1884 	case DRM_FORMAT_ARGB4444:
1885 		fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1);
1886 		fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 2);
1887 #ifdef __BIG_ENDIAN
1888 		fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
1889 					ENDIAN_8IN16);
1890 #endif
1891 		break;
1892 	case DRM_FORMAT_XRGB1555:
1893 	case DRM_FORMAT_ARGB1555:
1894 		fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1);
1895 		fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0);
1896 #ifdef __BIG_ENDIAN
1897 		fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
1898 					ENDIAN_8IN16);
1899 #endif
1900 		break;
1901 	case DRM_FORMAT_BGRX5551:
1902 	case DRM_FORMAT_BGRA5551:
1903 		fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1);
1904 		fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 5);
1905 #ifdef __BIG_ENDIAN
1906 		fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
1907 					ENDIAN_8IN16);
1908 #endif
1909 		break;
1910 	case DRM_FORMAT_RGB565:
1911 		fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1);
1912 		fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 1);
1913 #ifdef __BIG_ENDIAN
1914 		fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
1915 					ENDIAN_8IN16);
1916 #endif
1917 		break;
1918 	case DRM_FORMAT_XRGB8888:
1919 	case DRM_FORMAT_ARGB8888:
1920 		fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2);
1921 		fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0);
1922 #ifdef __BIG_ENDIAN
1923 		fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
1924 					ENDIAN_8IN32);
1925 #endif
1926 		break;
1927 	case DRM_FORMAT_XRGB2101010:
1928 	case DRM_FORMAT_ARGB2101010:
1929 		fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2);
1930 		fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 1);
1931 #ifdef __BIG_ENDIAN
1932 		fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
1933 					ENDIAN_8IN32);
1934 #endif
1935 		/* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
1936 		bypass_lut = true;
1937 		break;
1938 	case DRM_FORMAT_BGRX1010102:
1939 	case DRM_FORMAT_BGRA1010102:
1940 		fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2);
1941 		fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 4);
1942 #ifdef __BIG_ENDIAN
1943 		fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
1944 					ENDIAN_8IN32);
1945 #endif
1946 		/* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
1947 		bypass_lut = true;
1948 		break;
1949 	case DRM_FORMAT_XBGR8888:
1950 	case DRM_FORMAT_ABGR8888:
1951 		fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2);
1952 		fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0);
1953 		fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_RED_CROSSBAR, 2);
1954 		fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_BLUE_CROSSBAR, 2);
1955 #ifdef __BIG_ENDIAN
1956 		fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
1957 					ENDIAN_8IN32);
1958 #endif
1959 		break;
1960 	default:
1961 		DRM_ERROR("Unsupported screen format %s\n",
1962 		          drm_get_format_name(target_fb->format->format, &format_name));
1963 		return -EINVAL;
1964 	}
1965 
1966 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_2D_TILED_THIN1) {
1967 		unsigned bankw, bankh, mtaspect, tile_split, num_banks;
1968 
1969 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
1970 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
1971 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
1972 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
1973 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
1974 
1975 		fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_NUM_BANKS, num_banks);
1976 		fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_ARRAY_MODE,
1977 					  ARRAY_2D_TILED_THIN1);
1978 		fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_TILE_SPLIT,
1979 					  tile_split);
1980 		fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_BANK_WIDTH, bankw);
1981 		fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_BANK_HEIGHT, bankh);
1982 		fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_MACRO_TILE_ASPECT,
1983 					  mtaspect);
1984 		fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_MICRO_TILE_MODE,
1985 					  ADDR_SURF_MICRO_TILING_DISPLAY);
1986 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1) {
1987 		fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_ARRAY_MODE,
1988 					  ARRAY_1D_TILED_THIN1);
1989 	}
1990 
1991 	fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_PIPE_CONFIG,
1992 				  pipe_config);
1993 
1994 	dce_v10_0_vga_enable(crtc, false);
1995 
1996 	/* Make sure surface address is updated at vertical blank rather than
1997 	 * horizontal blank
1998 	 */
1999 	tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset);
2000 	tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL,
2001 			    GRPH_SURFACE_UPDATE_H_RETRACE_EN, 0);
2002 	WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2003 
2004 	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
2005 	       upper_32_bits(fb_location));
2006 	WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
2007 	       upper_32_bits(fb_location));
2008 	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
2009 	       (u32)fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK);
2010 	WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
2011 	       (u32) fb_location & GRPH_SECONDARY_SURFACE_ADDRESS__GRPH_SECONDARY_SURFACE_ADDRESS_MASK);
2012 	WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
2013 	WREG32(mmGRPH_SWAP_CNTL + amdgpu_crtc->crtc_offset, fb_swap);
2014 
2015 	/*
2016 	 * The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT
2017 	 * for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to
2018 	 * retain the full precision throughout the pipeline.
2019 	 */
2020 	tmp = RREG32(mmGRPH_LUT_10BIT_BYPASS + amdgpu_crtc->crtc_offset);
2021 	if (bypass_lut)
2022 		tmp = REG_SET_FIELD(tmp, GRPH_LUT_10BIT_BYPASS, GRPH_LUT_10BIT_BYPASS_EN, 1);
2023 	else
2024 		tmp = REG_SET_FIELD(tmp, GRPH_LUT_10BIT_BYPASS, GRPH_LUT_10BIT_BYPASS_EN, 0);
2025 	WREG32(mmGRPH_LUT_10BIT_BYPASS + amdgpu_crtc->crtc_offset, tmp);
2026 
2027 	if (bypass_lut)
2028 		DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n");
2029 
2030 	WREG32(mmGRPH_SURFACE_OFFSET_X + amdgpu_crtc->crtc_offset, 0);
2031 	WREG32(mmGRPH_SURFACE_OFFSET_Y + amdgpu_crtc->crtc_offset, 0);
2032 	WREG32(mmGRPH_X_START + amdgpu_crtc->crtc_offset, 0);
2033 	WREG32(mmGRPH_Y_START + amdgpu_crtc->crtc_offset, 0);
2034 	WREG32(mmGRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width);
2035 	WREG32(mmGRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height);
2036 
2037 	fb_pitch_pixels = target_fb->pitches[0] / target_fb->format->cpp[0];
2038 	WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels);
2039 
2040 	dce_v10_0_grph_enable(crtc, true);
2041 
2042 	WREG32(mmLB_DESKTOP_HEIGHT + amdgpu_crtc->crtc_offset,
2043 	       target_fb->height);
2044 
2045 	x &= ~3;
2046 	y &= ~1;
2047 	WREG32(mmVIEWPORT_START + amdgpu_crtc->crtc_offset,
2048 	       (x << 16) | y);
2049 	viewport_w = crtc->mode.hdisplay;
2050 	viewport_h = (crtc->mode.vdisplay + 1) & ~1;
2051 	WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
2052 	       (viewport_w << 16) | viewport_h);
2053 
2054 	/* set pageflip to happen anywhere in vblank interval */
2055 	WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
2056 
2057 	if (!atomic && fb && fb != crtc->primary->fb) {
2058 		abo = gem_to_amdgpu_bo(fb->obj[0]);
2059 		r = amdgpu_bo_reserve(abo, true);
2060 		if (unlikely(r != 0))
2061 			return r;
2062 		amdgpu_bo_unpin(abo);
2063 		amdgpu_bo_unreserve(abo);
2064 	}
2065 
2066 	/* Bytes per pixel may have changed */
2067 	dce_v10_0_bandwidth_update(adev);
2068 
2069 	return 0;
2070 }
2071 
2072 static void dce_v10_0_set_interleave(struct drm_crtc *crtc,
2073 				     struct drm_display_mode *mode)
2074 {
2075 	struct drm_device *dev = crtc->dev;
2076 	struct amdgpu_device *adev = dev->dev_private;
2077 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2078 	u32 tmp;
2079 
2080 	tmp = RREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset);
2081 	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
2082 		tmp = REG_SET_FIELD(tmp, LB_DATA_FORMAT, INTERLEAVE_EN, 1);
2083 	else
2084 		tmp = REG_SET_FIELD(tmp, LB_DATA_FORMAT, INTERLEAVE_EN, 0);
2085 	WREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset, tmp);
2086 }
2087 
2088 static void dce_v10_0_crtc_load_lut(struct drm_crtc *crtc)
2089 {
2090 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2091 	struct drm_device *dev = crtc->dev;
2092 	struct amdgpu_device *adev = dev->dev_private;
2093 	u16 *r, *g, *b;
2094 	int i;
2095 	u32 tmp;
2096 
2097 	DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id);
2098 
2099 	tmp = RREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset);
2100 	tmp = REG_SET_FIELD(tmp, INPUT_CSC_CONTROL, INPUT_CSC_GRPH_MODE, 0);
2101 	tmp = REG_SET_FIELD(tmp, INPUT_CSC_CONTROL, INPUT_CSC_OVL_MODE, 0);
2102 	WREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2103 
2104 	tmp = RREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset);
2105 	tmp = REG_SET_FIELD(tmp, PRESCALE_GRPH_CONTROL, GRPH_PRESCALE_BYPASS, 1);
2106 	WREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2107 
2108 	tmp = RREG32(mmPRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset);
2109 	tmp = REG_SET_FIELD(tmp, PRESCALE_OVL_CONTROL, OVL_PRESCALE_BYPASS, 1);
2110 	WREG32(mmPRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2111 
2112 	tmp = RREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset);
2113 	tmp = REG_SET_FIELD(tmp, INPUT_GAMMA_CONTROL, GRPH_INPUT_GAMMA_MODE, 0);
2114 	tmp = REG_SET_FIELD(tmp, INPUT_GAMMA_CONTROL, OVL_INPUT_GAMMA_MODE, 0);
2115 	WREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2116 
2117 	WREG32(mmDC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0);
2118 
2119 	WREG32(mmDC_LUT_BLACK_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0);
2120 	WREG32(mmDC_LUT_BLACK_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0);
2121 	WREG32(mmDC_LUT_BLACK_OFFSET_RED + amdgpu_crtc->crtc_offset, 0);
2122 
2123 	WREG32(mmDC_LUT_WHITE_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0xffff);
2124 	WREG32(mmDC_LUT_WHITE_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0xffff);
2125 	WREG32(mmDC_LUT_WHITE_OFFSET_RED + amdgpu_crtc->crtc_offset, 0xffff);
2126 
2127 	WREG32(mmDC_LUT_RW_MODE + amdgpu_crtc->crtc_offset, 0);
2128 	WREG32(mmDC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007);
2129 
2130 	WREG32(mmDC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0);
2131 	r = crtc->gamma_store;
2132 	g = r + crtc->gamma_size;
2133 	b = g + crtc->gamma_size;
2134 	for (i = 0; i < 256; i++) {
2135 		WREG32(mmDC_LUT_30_COLOR + amdgpu_crtc->crtc_offset,
2136 		       ((*r++ & 0xffc0) << 14) |
2137 		       ((*g++ & 0xffc0) << 4) |
2138 		       (*b++ >> 6));
2139 	}
2140 
2141 	tmp = RREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset);
2142 	tmp = REG_SET_FIELD(tmp, DEGAMMA_CONTROL, GRPH_DEGAMMA_MODE, 0);
2143 	tmp = REG_SET_FIELD(tmp, DEGAMMA_CONTROL, OVL_DEGAMMA_MODE, 0);
2144 	tmp = REG_SET_FIELD(tmp, DEGAMMA_CONTROL, CURSOR_DEGAMMA_MODE, 0);
2145 	WREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2146 
2147 	tmp = RREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset);
2148 	tmp = REG_SET_FIELD(tmp, GAMUT_REMAP_CONTROL, GRPH_GAMUT_REMAP_MODE, 0);
2149 	tmp = REG_SET_FIELD(tmp, GAMUT_REMAP_CONTROL, OVL_GAMUT_REMAP_MODE, 0);
2150 	WREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2151 
2152 	tmp = RREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset);
2153 	tmp = REG_SET_FIELD(tmp, REGAMMA_CONTROL, GRPH_REGAMMA_MODE, 0);
2154 	tmp = REG_SET_FIELD(tmp, REGAMMA_CONTROL, OVL_REGAMMA_MODE, 0);
2155 	WREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2156 
2157 	tmp = RREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset);
2158 	tmp = REG_SET_FIELD(tmp, OUTPUT_CSC_CONTROL, OUTPUT_CSC_GRPH_MODE, 0);
2159 	tmp = REG_SET_FIELD(tmp, OUTPUT_CSC_CONTROL, OUTPUT_CSC_OVL_MODE, 0);
2160 	WREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2161 
2162 	/* XXX match this to the depth of the crtc fmt block, move to modeset? */
2163 	WREG32(mmDENORM_CONTROL + amdgpu_crtc->crtc_offset, 0);
2164 	/* XXX this only needs to be programmed once per crtc at startup,
2165 	 * not sure where the best place for it is
2166 	 */
2167 	tmp = RREG32(mmALPHA_CONTROL + amdgpu_crtc->crtc_offset);
2168 	tmp = REG_SET_FIELD(tmp, ALPHA_CONTROL, CURSOR_ALPHA_BLND_ENA, 1);
2169 	WREG32(mmALPHA_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2170 }
2171 
2172 static int dce_v10_0_pick_dig_encoder(struct drm_encoder *encoder)
2173 {
2174 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
2175 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
2176 
2177 	switch (amdgpu_encoder->encoder_id) {
2178 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
2179 		if (dig->linkb)
2180 			return 1;
2181 		else
2182 			return 0;
2183 		break;
2184 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
2185 		if (dig->linkb)
2186 			return 3;
2187 		else
2188 			return 2;
2189 		break;
2190 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
2191 		if (dig->linkb)
2192 			return 5;
2193 		else
2194 			return 4;
2195 		break;
2196 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
2197 		return 6;
2198 		break;
2199 	default:
2200 		DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id);
2201 		return 0;
2202 	}
2203 }
2204 
2205 /**
2206  * dce_v10_0_pick_pll - Allocate a PPLL for use by the crtc.
2207  *
2208  * @crtc: drm crtc
2209  *
2210  * Returns the PPLL (Pixel PLL) to be used by the crtc.  For DP monitors
2211  * a single PPLL can be used for all DP crtcs/encoders.  For non-DP
2212  * monitors a dedicated PPLL must be used.  If a particular board has
2213  * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming
2214  * as there is no need to program the PLL itself.  If we are not able to
2215  * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to
2216  * avoid messing up an existing monitor.
2217  *
2218  * Asic specific PLL information
2219  *
2220  * DCE 10.x
2221  * Tonga
2222  * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP)
2223  * CI
2224  * - PPLL0, PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
2225  *
2226  */
2227 static u32 dce_v10_0_pick_pll(struct drm_crtc *crtc)
2228 {
2229 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2230 	struct drm_device *dev = crtc->dev;
2231 	struct amdgpu_device *adev = dev->dev_private;
2232 	u32 pll_in_use;
2233 	int pll;
2234 
2235 	if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) {
2236 		if (adev->clock.dp_extclk)
2237 			/* skip PPLL programming if using ext clock */
2238 			return ATOM_PPLL_INVALID;
2239 		else {
2240 			/* use the same PPLL for all DP monitors */
2241 			pll = amdgpu_pll_get_shared_dp_ppll(crtc);
2242 			if (pll != ATOM_PPLL_INVALID)
2243 				return pll;
2244 		}
2245 	} else {
2246 		/* use the same PPLL for all monitors with the same clock */
2247 		pll = amdgpu_pll_get_shared_nondp_ppll(crtc);
2248 		if (pll != ATOM_PPLL_INVALID)
2249 			return pll;
2250 	}
2251 
2252 	/* DCE10 has PPLL0, PPLL1, and PPLL2 */
2253 	pll_in_use = amdgpu_pll_get_use_mask(crtc);
2254 	if (!(pll_in_use & (1 << ATOM_PPLL2)))
2255 		return ATOM_PPLL2;
2256 	if (!(pll_in_use & (1 << ATOM_PPLL1)))
2257 		return ATOM_PPLL1;
2258 	if (!(pll_in_use & (1 << ATOM_PPLL0)))
2259 		return ATOM_PPLL0;
2260 	DRM_ERROR("unable to allocate a PPLL\n");
2261 	return ATOM_PPLL_INVALID;
2262 }
2263 
2264 static void dce_v10_0_lock_cursor(struct drm_crtc *crtc, bool lock)
2265 {
2266 	struct amdgpu_device *adev = crtc->dev->dev_private;
2267 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2268 	uint32_t cur_lock;
2269 
2270 	cur_lock = RREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset);
2271 	if (lock)
2272 		cur_lock = REG_SET_FIELD(cur_lock, CUR_UPDATE, CURSOR_UPDATE_LOCK, 1);
2273 	else
2274 		cur_lock = REG_SET_FIELD(cur_lock, CUR_UPDATE, CURSOR_UPDATE_LOCK, 0);
2275 	WREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset, cur_lock);
2276 }
2277 
2278 static void dce_v10_0_hide_cursor(struct drm_crtc *crtc)
2279 {
2280 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2281 	struct amdgpu_device *adev = crtc->dev->dev_private;
2282 	u32 tmp;
2283 
2284 	tmp = RREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
2285 	tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 0);
2286 	WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2287 }
2288 
2289 static void dce_v10_0_show_cursor(struct drm_crtc *crtc)
2290 {
2291 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2292 	struct amdgpu_device *adev = crtc->dev->dev_private;
2293 	u32 tmp;
2294 
2295 	WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
2296 	       upper_32_bits(amdgpu_crtc->cursor_addr));
2297 	WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
2298 	       lower_32_bits(amdgpu_crtc->cursor_addr));
2299 
2300 	tmp = RREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
2301 	tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 1);
2302 	tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_MODE, 2);
2303 	WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2304 }
2305 
2306 static int dce_v10_0_cursor_move_locked(struct drm_crtc *crtc,
2307 					int x, int y)
2308 {
2309 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2310 	struct amdgpu_device *adev = crtc->dev->dev_private;
2311 	int xorigin = 0, yorigin = 0;
2312 
2313 	amdgpu_crtc->cursor_x = x;
2314 	amdgpu_crtc->cursor_y = y;
2315 
2316 	/* avivo cursor are offset into the total surface */
2317 	x += crtc->x;
2318 	y += crtc->y;
2319 	DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
2320 
2321 	if (x < 0) {
2322 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
2323 		x = 0;
2324 	}
2325 	if (y < 0) {
2326 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
2327 		y = 0;
2328 	}
2329 
2330 	WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
2331 	WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
2332 	WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
2333 	       ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
2334 
2335 	return 0;
2336 }
2337 
2338 static int dce_v10_0_crtc_cursor_move(struct drm_crtc *crtc,
2339 				      int x, int y)
2340 {
2341 	int ret;
2342 
2343 	dce_v10_0_lock_cursor(crtc, true);
2344 	ret = dce_v10_0_cursor_move_locked(crtc, x, y);
2345 	dce_v10_0_lock_cursor(crtc, false);
2346 
2347 	return ret;
2348 }
2349 
2350 static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc,
2351 				      struct drm_file *file_priv,
2352 				      uint32_t handle,
2353 				      uint32_t width,
2354 				      uint32_t height,
2355 				      int32_t hot_x,
2356 				      int32_t hot_y)
2357 {
2358 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2359 	struct drm_gem_object *obj;
2360 	struct amdgpu_bo *aobj;
2361 	int ret;
2362 
2363 	if (!handle) {
2364 		/* turn off cursor */
2365 		dce_v10_0_hide_cursor(crtc);
2366 		obj = NULL;
2367 		goto unpin;
2368 	}
2369 
2370 	if ((width > amdgpu_crtc->max_cursor_width) ||
2371 	    (height > amdgpu_crtc->max_cursor_height)) {
2372 		DRM_ERROR("bad cursor width or height %d x %d\n", width, height);
2373 		return -EINVAL;
2374 	}
2375 
2376 	obj = drm_gem_object_lookup(file_priv, handle);
2377 	if (!obj) {
2378 		DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id);
2379 		return -ENOENT;
2380 	}
2381 
2382 	aobj = gem_to_amdgpu_bo(obj);
2383 	ret = amdgpu_bo_reserve(aobj, false);
2384 	if (ret != 0) {
2385 		drm_gem_object_put_unlocked(obj);
2386 		return ret;
2387 	}
2388 
2389 	ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
2390 	amdgpu_bo_unreserve(aobj);
2391 	if (ret) {
2392 		DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
2393 		drm_gem_object_put_unlocked(obj);
2394 		return ret;
2395 	}
2396 	amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
2397 
2398 	dce_v10_0_lock_cursor(crtc, true);
2399 
2400 	if (width != amdgpu_crtc->cursor_width ||
2401 	    height != amdgpu_crtc->cursor_height ||
2402 	    hot_x != amdgpu_crtc->cursor_hot_x ||
2403 	    hot_y != amdgpu_crtc->cursor_hot_y) {
2404 		int x, y;
2405 
2406 		x = amdgpu_crtc->cursor_x + amdgpu_crtc->cursor_hot_x - hot_x;
2407 		y = amdgpu_crtc->cursor_y + amdgpu_crtc->cursor_hot_y - hot_y;
2408 
2409 		dce_v10_0_cursor_move_locked(crtc, x, y);
2410 
2411 		amdgpu_crtc->cursor_width = width;
2412 		amdgpu_crtc->cursor_height = height;
2413 		amdgpu_crtc->cursor_hot_x = hot_x;
2414 		amdgpu_crtc->cursor_hot_y = hot_y;
2415 	}
2416 
2417 	dce_v10_0_show_cursor(crtc);
2418 	dce_v10_0_lock_cursor(crtc, false);
2419 
2420 unpin:
2421 	if (amdgpu_crtc->cursor_bo) {
2422 		struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2423 		ret = amdgpu_bo_reserve(aobj, true);
2424 		if (likely(ret == 0)) {
2425 			amdgpu_bo_unpin(aobj);
2426 			amdgpu_bo_unreserve(aobj);
2427 		}
2428 		drm_gem_object_put_unlocked(amdgpu_crtc->cursor_bo);
2429 	}
2430 
2431 	amdgpu_crtc->cursor_bo = obj;
2432 	return 0;
2433 }
2434 
2435 static void dce_v10_0_cursor_reset(struct drm_crtc *crtc)
2436 {
2437 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2438 
2439 	if (amdgpu_crtc->cursor_bo) {
2440 		dce_v10_0_lock_cursor(crtc, true);
2441 
2442 		dce_v10_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
2443 					     amdgpu_crtc->cursor_y);
2444 
2445 		dce_v10_0_show_cursor(crtc);
2446 
2447 		dce_v10_0_lock_cursor(crtc, false);
2448 	}
2449 }
2450 
2451 static int dce_v10_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
2452 				    u16 *blue, uint32_t size,
2453 				    struct drm_modeset_acquire_ctx *ctx)
2454 {
2455 	dce_v10_0_crtc_load_lut(crtc);
2456 
2457 	return 0;
2458 }
2459 
2460 static void dce_v10_0_crtc_destroy(struct drm_crtc *crtc)
2461 {
2462 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2463 
2464 	drm_crtc_cleanup(crtc);
2465 	kfree(amdgpu_crtc);
2466 }
2467 
2468 static const struct drm_crtc_funcs dce_v10_0_crtc_funcs = {
2469 	.cursor_set2 = dce_v10_0_crtc_cursor_set2,
2470 	.cursor_move = dce_v10_0_crtc_cursor_move,
2471 	.gamma_set = dce_v10_0_crtc_gamma_set,
2472 	.set_config = amdgpu_display_crtc_set_config,
2473 	.destroy = dce_v10_0_crtc_destroy,
2474 	.page_flip_target = amdgpu_display_crtc_page_flip_target,
2475 };
2476 
2477 static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode)
2478 {
2479 	struct drm_device *dev = crtc->dev;
2480 	struct amdgpu_device *adev = dev->dev_private;
2481 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2482 	unsigned type;
2483 
2484 	switch (mode) {
2485 	case DRM_MODE_DPMS_ON:
2486 		amdgpu_crtc->enabled = true;
2487 		amdgpu_atombios_crtc_enable(crtc, ATOM_ENABLE);
2488 		dce_v10_0_vga_enable(crtc, true);
2489 		amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
2490 		dce_v10_0_vga_enable(crtc, false);
2491 		/* Make sure VBLANK and PFLIP interrupts are still enabled */
2492 		type = amdgpu_display_crtc_idx_to_irq_type(adev,
2493 						amdgpu_crtc->crtc_id);
2494 		amdgpu_irq_update(adev, &adev->crtc_irq, type);
2495 		amdgpu_irq_update(adev, &adev->pageflip_irq, type);
2496 		drm_crtc_vblank_on(crtc);
2497 		dce_v10_0_crtc_load_lut(crtc);
2498 		break;
2499 	case DRM_MODE_DPMS_STANDBY:
2500 	case DRM_MODE_DPMS_SUSPEND:
2501 	case DRM_MODE_DPMS_OFF:
2502 		drm_crtc_vblank_off(crtc);
2503 		if (amdgpu_crtc->enabled) {
2504 			dce_v10_0_vga_enable(crtc, true);
2505 			amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
2506 			dce_v10_0_vga_enable(crtc, false);
2507 		}
2508 		amdgpu_atombios_crtc_enable(crtc, ATOM_DISABLE);
2509 		amdgpu_crtc->enabled = false;
2510 		break;
2511 	}
2512 	/* adjust pm to dpms */
2513 	amdgpu_pm_compute_clocks(adev);
2514 }
2515 
2516 static void dce_v10_0_crtc_prepare(struct drm_crtc *crtc)
2517 {
2518 	/* disable crtc pair power gating before programming */
2519 	amdgpu_atombios_crtc_powergate(crtc, ATOM_DISABLE);
2520 	amdgpu_atombios_crtc_lock(crtc, ATOM_ENABLE);
2521 	dce_v10_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2522 }
2523 
2524 static void dce_v10_0_crtc_commit(struct drm_crtc *crtc)
2525 {
2526 	dce_v10_0_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
2527 	amdgpu_atombios_crtc_lock(crtc, ATOM_DISABLE);
2528 }
2529 
2530 static void dce_v10_0_crtc_disable(struct drm_crtc *crtc)
2531 {
2532 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2533 	struct drm_device *dev = crtc->dev;
2534 	struct amdgpu_device *adev = dev->dev_private;
2535 	struct amdgpu_atom_ss ss;
2536 	int i;
2537 
2538 	dce_v10_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2539 	if (crtc->primary->fb) {
2540 		int r;
2541 		struct amdgpu_bo *abo;
2542 
2543 		abo = gem_to_amdgpu_bo(crtc->primary->fb->obj[0]);
2544 		r = amdgpu_bo_reserve(abo, true);
2545 		if (unlikely(r))
2546 			DRM_ERROR("failed to reserve abo before unpin\n");
2547 		else {
2548 			amdgpu_bo_unpin(abo);
2549 			amdgpu_bo_unreserve(abo);
2550 		}
2551 	}
2552 	/* disable the GRPH */
2553 	dce_v10_0_grph_enable(crtc, false);
2554 
2555 	amdgpu_atombios_crtc_powergate(crtc, ATOM_ENABLE);
2556 
2557 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2558 		if (adev->mode_info.crtcs[i] &&
2559 		    adev->mode_info.crtcs[i]->enabled &&
2560 		    i != amdgpu_crtc->crtc_id &&
2561 		    amdgpu_crtc->pll_id == adev->mode_info.crtcs[i]->pll_id) {
2562 			/* one other crtc is using this pll don't turn
2563 			 * off the pll
2564 			 */
2565 			goto done;
2566 		}
2567 	}
2568 
2569 	switch (amdgpu_crtc->pll_id) {
2570 	case ATOM_PPLL0:
2571 	case ATOM_PPLL1:
2572 	case ATOM_PPLL2:
2573 		/* disable the ppll */
2574 		amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
2575 					  0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
2576 		break;
2577 	default:
2578 		break;
2579 	}
2580 done:
2581 	amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2582 	amdgpu_crtc->adjusted_clock = 0;
2583 	amdgpu_crtc->encoder = NULL;
2584 	amdgpu_crtc->connector = NULL;
2585 }
2586 
2587 static int dce_v10_0_crtc_mode_set(struct drm_crtc *crtc,
2588 				  struct drm_display_mode *mode,
2589 				  struct drm_display_mode *adjusted_mode,
2590 				  int x, int y, struct drm_framebuffer *old_fb)
2591 {
2592 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2593 
2594 	if (!amdgpu_crtc->adjusted_clock)
2595 		return -EINVAL;
2596 
2597 	amdgpu_atombios_crtc_set_pll(crtc, adjusted_mode);
2598 	amdgpu_atombios_crtc_set_dtd_timing(crtc, adjusted_mode);
2599 	dce_v10_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2600 	amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode);
2601 	amdgpu_atombios_crtc_scaler_setup(crtc);
2602 	dce_v10_0_cursor_reset(crtc);
2603 	/* update the hw version fpr dpm */
2604 	amdgpu_crtc->hw_mode = *adjusted_mode;
2605 
2606 	return 0;
2607 }
2608 
2609 static bool dce_v10_0_crtc_mode_fixup(struct drm_crtc *crtc,
2610 				     const struct drm_display_mode *mode,
2611 				     struct drm_display_mode *adjusted_mode)
2612 {
2613 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2614 	struct drm_device *dev = crtc->dev;
2615 	struct drm_encoder *encoder;
2616 
2617 	/* assign the encoder to the amdgpu crtc to avoid repeated lookups later */
2618 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
2619 		if (encoder->crtc == crtc) {
2620 			amdgpu_crtc->encoder = encoder;
2621 			amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder);
2622 			break;
2623 		}
2624 	}
2625 	if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) {
2626 		amdgpu_crtc->encoder = NULL;
2627 		amdgpu_crtc->connector = NULL;
2628 		return false;
2629 	}
2630 	if (!amdgpu_display_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
2631 		return false;
2632 	if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode))
2633 		return false;
2634 	/* pick pll */
2635 	amdgpu_crtc->pll_id = dce_v10_0_pick_pll(crtc);
2636 	/* if we can't get a PPLL for a non-DP encoder, fail */
2637 	if ((amdgpu_crtc->pll_id == ATOM_PPLL_INVALID) &&
2638 	    !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder)))
2639 		return false;
2640 
2641 	return true;
2642 }
2643 
2644 static int dce_v10_0_crtc_set_base(struct drm_crtc *crtc, int x, int y,
2645 				  struct drm_framebuffer *old_fb)
2646 {
2647 	return dce_v10_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2648 }
2649 
2650 static int dce_v10_0_crtc_set_base_atomic(struct drm_crtc *crtc,
2651 					 struct drm_framebuffer *fb,
2652 					 int x, int y, enum mode_set_atomic state)
2653 {
2654        return dce_v10_0_crtc_do_set_base(crtc, fb, x, y, 1);
2655 }
2656 
2657 static const struct drm_crtc_helper_funcs dce_v10_0_crtc_helper_funcs = {
2658 	.dpms = dce_v10_0_crtc_dpms,
2659 	.mode_fixup = dce_v10_0_crtc_mode_fixup,
2660 	.mode_set = dce_v10_0_crtc_mode_set,
2661 	.mode_set_base = dce_v10_0_crtc_set_base,
2662 	.mode_set_base_atomic = dce_v10_0_crtc_set_base_atomic,
2663 	.prepare = dce_v10_0_crtc_prepare,
2664 	.commit = dce_v10_0_crtc_commit,
2665 	.disable = dce_v10_0_crtc_disable,
2666 };
2667 
2668 static int dce_v10_0_crtc_init(struct amdgpu_device *adev, int index)
2669 {
2670 	struct amdgpu_crtc *amdgpu_crtc;
2671 
2672 	amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) +
2673 			      (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
2674 	if (amdgpu_crtc == NULL)
2675 		return -ENOMEM;
2676 
2677 	drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_v10_0_crtc_funcs);
2678 
2679 	drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
2680 	amdgpu_crtc->crtc_id = index;
2681 	adev->mode_info.crtcs[index] = amdgpu_crtc;
2682 
2683 	amdgpu_crtc->max_cursor_width = 128;
2684 	amdgpu_crtc->max_cursor_height = 128;
2685 	adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
2686 	adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
2687 
2688 	switch (amdgpu_crtc->crtc_id) {
2689 	case 0:
2690 	default:
2691 		amdgpu_crtc->crtc_offset = CRTC0_REGISTER_OFFSET;
2692 		break;
2693 	case 1:
2694 		amdgpu_crtc->crtc_offset = CRTC1_REGISTER_OFFSET;
2695 		break;
2696 	case 2:
2697 		amdgpu_crtc->crtc_offset = CRTC2_REGISTER_OFFSET;
2698 		break;
2699 	case 3:
2700 		amdgpu_crtc->crtc_offset = CRTC3_REGISTER_OFFSET;
2701 		break;
2702 	case 4:
2703 		amdgpu_crtc->crtc_offset = CRTC4_REGISTER_OFFSET;
2704 		break;
2705 	case 5:
2706 		amdgpu_crtc->crtc_offset = CRTC5_REGISTER_OFFSET;
2707 		break;
2708 	}
2709 
2710 	amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2711 	amdgpu_crtc->adjusted_clock = 0;
2712 	amdgpu_crtc->encoder = NULL;
2713 	amdgpu_crtc->connector = NULL;
2714 	drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v10_0_crtc_helper_funcs);
2715 
2716 	return 0;
2717 }
2718 
2719 static int dce_v10_0_early_init(void *handle)
2720 {
2721 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2722 
2723 	adev->audio_endpt_rreg = &dce_v10_0_audio_endpt_rreg;
2724 	adev->audio_endpt_wreg = &dce_v10_0_audio_endpt_wreg;
2725 
2726 	dce_v10_0_set_display_funcs(adev);
2727 
2728 	adev->mode_info.num_crtc = dce_v10_0_get_num_crtc(adev);
2729 
2730 	switch (adev->asic_type) {
2731 	case CHIP_FIJI:
2732 	case CHIP_TONGA:
2733 		adev->mode_info.num_hpd = 6;
2734 		adev->mode_info.num_dig = 7;
2735 		break;
2736 	default:
2737 		/* FIXME: not supported yet */
2738 		return -EINVAL;
2739 	}
2740 
2741 	dce_v10_0_set_irq_funcs(adev);
2742 
2743 	return 0;
2744 }
2745 
2746 static int dce_v10_0_sw_init(void *handle)
2747 {
2748 	int r, i;
2749 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2750 
2751 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2752 		r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + 1, &adev->crtc_irq);
2753 		if (r)
2754 			return r;
2755 	}
2756 
2757 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP; i < 20; i += 2) {
2758 		r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i, &adev->pageflip_irq);
2759 		if (r)
2760 			return r;
2761 	}
2762 
2763 	/* HPD hotplug */
2764 	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2765 	if (r)
2766 		return r;
2767 
2768 	adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
2769 
2770 	adev->ddev->mode_config.async_page_flip = true;
2771 
2772 	adev->ddev->mode_config.max_width = 16384;
2773 	adev->ddev->mode_config.max_height = 16384;
2774 
2775 	adev->ddev->mode_config.preferred_depth = 24;
2776 	adev->ddev->mode_config.prefer_shadow = 1;
2777 
2778 	adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
2779 
2780 	r = amdgpu_display_modeset_create_props(adev);
2781 	if (r)
2782 		return r;
2783 
2784 	adev->ddev->mode_config.max_width = 16384;
2785 	adev->ddev->mode_config.max_height = 16384;
2786 
2787 	/* allocate crtcs */
2788 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2789 		r = dce_v10_0_crtc_init(adev, i);
2790 		if (r)
2791 			return r;
2792 	}
2793 
2794 	if (amdgpu_atombios_get_connector_info_from_object_table(adev))
2795 		amdgpu_display_print_display_setup(adev->ddev);
2796 	else
2797 		return -EINVAL;
2798 
2799 	/* setup afmt */
2800 	r = dce_v10_0_afmt_init(adev);
2801 	if (r)
2802 		return r;
2803 
2804 	r = dce_v10_0_audio_init(adev);
2805 	if (r)
2806 		return r;
2807 
2808 	drm_kms_helper_poll_init(adev->ddev);
2809 
2810 	adev->mode_info.mode_config_initialized = true;
2811 	return 0;
2812 }
2813 
2814 static int dce_v10_0_sw_fini(void *handle)
2815 {
2816 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2817 
2818 	kfree(adev->mode_info.bios_hardcoded_edid);
2819 
2820 	drm_kms_helper_poll_fini(adev->ddev);
2821 
2822 	dce_v10_0_audio_fini(adev);
2823 
2824 	dce_v10_0_afmt_fini(adev);
2825 
2826 	drm_mode_config_cleanup(adev->ddev);
2827 	adev->mode_info.mode_config_initialized = false;
2828 
2829 	return 0;
2830 }
2831 
2832 static int dce_v10_0_hw_init(void *handle)
2833 {
2834 	int i;
2835 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2836 
2837 	dce_v10_0_init_golden_registers(adev);
2838 
2839 	/* disable vga render */
2840 	dce_v10_0_set_vga_render_state(adev, false);
2841 	/* init dig PHYs, disp eng pll */
2842 	amdgpu_atombios_encoder_init_dig(adev);
2843 	amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
2844 
2845 	/* initialize hpd */
2846 	dce_v10_0_hpd_init(adev);
2847 
2848 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
2849 		dce_v10_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
2850 	}
2851 
2852 	dce_v10_0_pageflip_interrupt_init(adev);
2853 
2854 	return 0;
2855 }
2856 
2857 static int dce_v10_0_hw_fini(void *handle)
2858 {
2859 	int i;
2860 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2861 
2862 	dce_v10_0_hpd_fini(adev);
2863 
2864 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
2865 		dce_v10_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
2866 	}
2867 
2868 	dce_v10_0_pageflip_interrupt_fini(adev);
2869 
2870 	return 0;
2871 }
2872 
2873 static int dce_v10_0_suspend(void *handle)
2874 {
2875 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2876 
2877 	adev->mode_info.bl_level =
2878 		amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
2879 
2880 	return dce_v10_0_hw_fini(handle);
2881 }
2882 
2883 static int dce_v10_0_resume(void *handle)
2884 {
2885 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2886 	int ret;
2887 
2888 	amdgpu_atombios_encoder_set_backlight_level_to_reg(adev,
2889 							   adev->mode_info.bl_level);
2890 
2891 	ret = dce_v10_0_hw_init(handle);
2892 
2893 	/* turn on the BL */
2894 	if (adev->mode_info.bl_encoder) {
2895 		u8 bl_level = amdgpu_display_backlight_get_level(adev,
2896 								  adev->mode_info.bl_encoder);
2897 		amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
2898 						    bl_level);
2899 	}
2900 
2901 	return ret;
2902 }
2903 
2904 static bool dce_v10_0_is_idle(void *handle)
2905 {
2906 	return true;
2907 }
2908 
2909 static int dce_v10_0_wait_for_idle(void *handle)
2910 {
2911 	return 0;
2912 }
2913 
2914 static bool dce_v10_0_check_soft_reset(void *handle)
2915 {
2916 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2917 
2918 	return dce_v10_0_is_display_hung(adev);
2919 }
2920 
2921 static int dce_v10_0_soft_reset(void *handle)
2922 {
2923 	u32 srbm_soft_reset = 0, tmp;
2924 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2925 
2926 	if (dce_v10_0_is_display_hung(adev))
2927 		srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK;
2928 
2929 	if (srbm_soft_reset) {
2930 		tmp = RREG32(mmSRBM_SOFT_RESET);
2931 		tmp |= srbm_soft_reset;
2932 		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
2933 		WREG32(mmSRBM_SOFT_RESET, tmp);
2934 		tmp = RREG32(mmSRBM_SOFT_RESET);
2935 
2936 		udelay(50);
2937 
2938 		tmp &= ~srbm_soft_reset;
2939 		WREG32(mmSRBM_SOFT_RESET, tmp);
2940 		tmp = RREG32(mmSRBM_SOFT_RESET);
2941 
2942 		/* Wait a little for things to settle down */
2943 		udelay(50);
2944 	}
2945 	return 0;
2946 }
2947 
2948 static void dce_v10_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
2949 						     int crtc,
2950 						     enum amdgpu_interrupt_state state)
2951 {
2952 	u32 lb_interrupt_mask;
2953 
2954 	if (crtc >= adev->mode_info.num_crtc) {
2955 		DRM_DEBUG("invalid crtc %d\n", crtc);
2956 		return;
2957 	}
2958 
2959 	switch (state) {
2960 	case AMDGPU_IRQ_STATE_DISABLE:
2961 		lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]);
2962 		lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK,
2963 						  VBLANK_INTERRUPT_MASK, 0);
2964 		WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask);
2965 		break;
2966 	case AMDGPU_IRQ_STATE_ENABLE:
2967 		lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]);
2968 		lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK,
2969 						  VBLANK_INTERRUPT_MASK, 1);
2970 		WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask);
2971 		break;
2972 	default:
2973 		break;
2974 	}
2975 }
2976 
2977 static void dce_v10_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev,
2978 						    int crtc,
2979 						    enum amdgpu_interrupt_state state)
2980 {
2981 	u32 lb_interrupt_mask;
2982 
2983 	if (crtc >= adev->mode_info.num_crtc) {
2984 		DRM_DEBUG("invalid crtc %d\n", crtc);
2985 		return;
2986 	}
2987 
2988 	switch (state) {
2989 	case AMDGPU_IRQ_STATE_DISABLE:
2990 		lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]);
2991 		lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK,
2992 						  VLINE_INTERRUPT_MASK, 0);
2993 		WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask);
2994 		break;
2995 	case AMDGPU_IRQ_STATE_ENABLE:
2996 		lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]);
2997 		lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK,
2998 						  VLINE_INTERRUPT_MASK, 1);
2999 		WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask);
3000 		break;
3001 	default:
3002 		break;
3003 	}
3004 }
3005 
3006 static int dce_v10_0_set_hpd_irq_state(struct amdgpu_device *adev,
3007 				       struct amdgpu_irq_src *source,
3008 				       unsigned hpd,
3009 				       enum amdgpu_interrupt_state state)
3010 {
3011 	u32 tmp;
3012 
3013 	if (hpd >= adev->mode_info.num_hpd) {
3014 		DRM_DEBUG("invalid hdp %d\n", hpd);
3015 		return 0;
3016 	}
3017 
3018 	switch (state) {
3019 	case AMDGPU_IRQ_STATE_DISABLE:
3020 		tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]);
3021 		tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 0);
3022 		WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp);
3023 		break;
3024 	case AMDGPU_IRQ_STATE_ENABLE:
3025 		tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]);
3026 		tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 1);
3027 		WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp);
3028 		break;
3029 	default:
3030 		break;
3031 	}
3032 
3033 	return 0;
3034 }
3035 
3036 static int dce_v10_0_set_crtc_irq_state(struct amdgpu_device *adev,
3037 					struct amdgpu_irq_src *source,
3038 					unsigned type,
3039 					enum amdgpu_interrupt_state state)
3040 {
3041 	switch (type) {
3042 	case AMDGPU_CRTC_IRQ_VBLANK1:
3043 		dce_v10_0_set_crtc_vblank_interrupt_state(adev, 0, state);
3044 		break;
3045 	case AMDGPU_CRTC_IRQ_VBLANK2:
3046 		dce_v10_0_set_crtc_vblank_interrupt_state(adev, 1, state);
3047 		break;
3048 	case AMDGPU_CRTC_IRQ_VBLANK3:
3049 		dce_v10_0_set_crtc_vblank_interrupt_state(adev, 2, state);
3050 		break;
3051 	case AMDGPU_CRTC_IRQ_VBLANK4:
3052 		dce_v10_0_set_crtc_vblank_interrupt_state(adev, 3, state);
3053 		break;
3054 	case AMDGPU_CRTC_IRQ_VBLANK5:
3055 		dce_v10_0_set_crtc_vblank_interrupt_state(adev, 4, state);
3056 		break;
3057 	case AMDGPU_CRTC_IRQ_VBLANK6:
3058 		dce_v10_0_set_crtc_vblank_interrupt_state(adev, 5, state);
3059 		break;
3060 	case AMDGPU_CRTC_IRQ_VLINE1:
3061 		dce_v10_0_set_crtc_vline_interrupt_state(adev, 0, state);
3062 		break;
3063 	case AMDGPU_CRTC_IRQ_VLINE2:
3064 		dce_v10_0_set_crtc_vline_interrupt_state(adev, 1, state);
3065 		break;
3066 	case AMDGPU_CRTC_IRQ_VLINE3:
3067 		dce_v10_0_set_crtc_vline_interrupt_state(adev, 2, state);
3068 		break;
3069 	case AMDGPU_CRTC_IRQ_VLINE4:
3070 		dce_v10_0_set_crtc_vline_interrupt_state(adev, 3, state);
3071 		break;
3072 	case AMDGPU_CRTC_IRQ_VLINE5:
3073 		dce_v10_0_set_crtc_vline_interrupt_state(adev, 4, state);
3074 		break;
3075 	case AMDGPU_CRTC_IRQ_VLINE6:
3076 		dce_v10_0_set_crtc_vline_interrupt_state(adev, 5, state);
3077 		break;
3078 	default:
3079 		break;
3080 	}
3081 	return 0;
3082 }
3083 
3084 static int dce_v10_0_set_pageflip_irq_state(struct amdgpu_device *adev,
3085 					    struct amdgpu_irq_src *src,
3086 					    unsigned type,
3087 					    enum amdgpu_interrupt_state state)
3088 {
3089 	u32 reg;
3090 
3091 	if (type >= adev->mode_info.num_crtc) {
3092 		DRM_ERROR("invalid pageflip crtc %d\n", type);
3093 		return -EINVAL;
3094 	}
3095 
3096 	reg = RREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type]);
3097 	if (state == AMDGPU_IRQ_STATE_DISABLE)
3098 		WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
3099 		       reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
3100 	else
3101 		WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
3102 		       reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
3103 
3104 	return 0;
3105 }
3106 
3107 static int dce_v10_0_pageflip_irq(struct amdgpu_device *adev,
3108 				  struct amdgpu_irq_src *source,
3109 				  struct amdgpu_iv_entry *entry)
3110 {
3111 	unsigned long flags;
3112 	unsigned crtc_id;
3113 	struct amdgpu_crtc *amdgpu_crtc;
3114 	struct amdgpu_flip_work *works;
3115 
3116 	crtc_id = (entry->src_id - 8) >> 1;
3117 	amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
3118 
3119 	if (crtc_id >= adev->mode_info.num_crtc) {
3120 		DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
3121 		return -EINVAL;
3122 	}
3123 
3124 	if (RREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id]) &
3125 	    GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK)
3126 		WREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id],
3127 		       GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK);
3128 
3129 	/* IRQ could occur when in initial stage */
3130 	if (amdgpu_crtc == NULL)
3131 		return 0;
3132 
3133 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
3134 	works = amdgpu_crtc->pflip_works;
3135 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) {
3136 		DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
3137 						 "AMDGPU_FLIP_SUBMITTED(%d)\n",
3138 						 amdgpu_crtc->pflip_status,
3139 						 AMDGPU_FLIP_SUBMITTED);
3140 		spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
3141 		return 0;
3142 	}
3143 
3144 	/* page flip completed. clean up */
3145 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
3146 	amdgpu_crtc->pflip_works = NULL;
3147 
3148 	/* wakeup usersapce */
3149 	if (works->event)
3150 		drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
3151 
3152 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
3153 
3154 	drm_crtc_vblank_put(&amdgpu_crtc->base);
3155 	schedule_work(&works->unpin_work);
3156 
3157 	return 0;
3158 }
3159 
3160 static void dce_v10_0_hpd_int_ack(struct amdgpu_device *adev,
3161 				  int hpd)
3162 {
3163 	u32 tmp;
3164 
3165 	if (hpd >= adev->mode_info.num_hpd) {
3166 		DRM_DEBUG("invalid hdp %d\n", hpd);
3167 		return;
3168 	}
3169 
3170 	tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]);
3171 	tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_ACK, 1);
3172 	WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp);
3173 }
3174 
3175 static void dce_v10_0_crtc_vblank_int_ack(struct amdgpu_device *adev,
3176 					  int crtc)
3177 {
3178 	u32 tmp;
3179 
3180 	if (crtc >= adev->mode_info.num_crtc) {
3181 		DRM_DEBUG("invalid crtc %d\n", crtc);
3182 		return;
3183 	}
3184 
3185 	tmp = RREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc]);
3186 	tmp = REG_SET_FIELD(tmp, LB_VBLANK_STATUS, VBLANK_ACK, 1);
3187 	WREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc], tmp);
3188 }
3189 
3190 static void dce_v10_0_crtc_vline_int_ack(struct amdgpu_device *adev,
3191 					 int crtc)
3192 {
3193 	u32 tmp;
3194 
3195 	if (crtc >= adev->mode_info.num_crtc) {
3196 		DRM_DEBUG("invalid crtc %d\n", crtc);
3197 		return;
3198 	}
3199 
3200 	tmp = RREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc]);
3201 	tmp = REG_SET_FIELD(tmp, LB_VLINE_STATUS, VLINE_ACK, 1);
3202 	WREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc], tmp);
3203 }
3204 
3205 static int dce_v10_0_crtc_irq(struct amdgpu_device *adev,
3206 			      struct amdgpu_irq_src *source,
3207 			      struct amdgpu_iv_entry *entry)
3208 {
3209 	unsigned crtc = entry->src_id - 1;
3210 	uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg);
3211 	unsigned int irq_type = amdgpu_display_crtc_idx_to_irq_type(adev, crtc);
3212 
3213 	switch (entry->src_data[0]) {
3214 	case 0: /* vblank */
3215 		if (disp_int & interrupt_status_offsets[crtc].vblank)
3216 			dce_v10_0_crtc_vblank_int_ack(adev, crtc);
3217 		else
3218 			DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3219 
3220 		if (amdgpu_irq_enabled(adev, source, irq_type)) {
3221 			drm_handle_vblank(adev->ddev, crtc);
3222 		}
3223 		DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
3224 
3225 		break;
3226 	case 1: /* vline */
3227 		if (disp_int & interrupt_status_offsets[crtc].vline)
3228 			dce_v10_0_crtc_vline_int_ack(adev, crtc);
3229 		else
3230 			DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3231 
3232 		DRM_DEBUG("IH: D%d vline\n", crtc + 1);
3233 
3234 		break;
3235 	default:
3236 		DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]);
3237 		break;
3238 	}
3239 
3240 	return 0;
3241 }
3242 
3243 static int dce_v10_0_hpd_irq(struct amdgpu_device *adev,
3244 			     struct amdgpu_irq_src *source,
3245 			     struct amdgpu_iv_entry *entry)
3246 {
3247 	uint32_t disp_int, mask;
3248 	unsigned hpd;
3249 
3250 	if (entry->src_data[0] >= adev->mode_info.num_hpd) {
3251 		DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]);
3252 		return 0;
3253 	}
3254 
3255 	hpd = entry->src_data[0];
3256 	disp_int = RREG32(interrupt_status_offsets[hpd].reg);
3257 	mask = interrupt_status_offsets[hpd].hpd;
3258 
3259 	if (disp_int & mask) {
3260 		dce_v10_0_hpd_int_ack(adev, hpd);
3261 		schedule_work(&adev->hotplug_work);
3262 		DRM_DEBUG("IH: HPD%d\n", hpd + 1);
3263 	}
3264 
3265 	return 0;
3266 }
3267 
3268 static int dce_v10_0_set_clockgating_state(void *handle,
3269 					  enum amd_clockgating_state state)
3270 {
3271 	return 0;
3272 }
3273 
3274 static int dce_v10_0_set_powergating_state(void *handle,
3275 					  enum amd_powergating_state state)
3276 {
3277 	return 0;
3278 }
3279 
3280 static const struct amd_ip_funcs dce_v10_0_ip_funcs = {
3281 	.name = "dce_v10_0",
3282 	.early_init = dce_v10_0_early_init,
3283 	.late_init = NULL,
3284 	.sw_init = dce_v10_0_sw_init,
3285 	.sw_fini = dce_v10_0_sw_fini,
3286 	.hw_init = dce_v10_0_hw_init,
3287 	.hw_fini = dce_v10_0_hw_fini,
3288 	.suspend = dce_v10_0_suspend,
3289 	.resume = dce_v10_0_resume,
3290 	.is_idle = dce_v10_0_is_idle,
3291 	.wait_for_idle = dce_v10_0_wait_for_idle,
3292 	.check_soft_reset = dce_v10_0_check_soft_reset,
3293 	.soft_reset = dce_v10_0_soft_reset,
3294 	.set_clockgating_state = dce_v10_0_set_clockgating_state,
3295 	.set_powergating_state = dce_v10_0_set_powergating_state,
3296 };
3297 
3298 static void
3299 dce_v10_0_encoder_mode_set(struct drm_encoder *encoder,
3300 			  struct drm_display_mode *mode,
3301 			  struct drm_display_mode *adjusted_mode)
3302 {
3303 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3304 
3305 	amdgpu_encoder->pixel_clock = adjusted_mode->clock;
3306 
3307 	/* need to call this here rather than in prepare() since we need some crtc info */
3308 	amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
3309 
3310 	/* set scaler clears this on some chips */
3311 	dce_v10_0_set_interleave(encoder->crtc, mode);
3312 
3313 	if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
3314 		dce_v10_0_afmt_enable(encoder, true);
3315 		dce_v10_0_afmt_setmode(encoder, adjusted_mode);
3316 	}
3317 }
3318 
3319 static void dce_v10_0_encoder_prepare(struct drm_encoder *encoder)
3320 {
3321 	struct amdgpu_device *adev = encoder->dev->dev_private;
3322 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3323 	struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
3324 
3325 	if ((amdgpu_encoder->active_device &
3326 	     (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
3327 	    (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) !=
3328 	     ENCODER_OBJECT_ID_NONE)) {
3329 		struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
3330 		if (dig) {
3331 			dig->dig_encoder = dce_v10_0_pick_dig_encoder(encoder);
3332 			if (amdgpu_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT)
3333 				dig->afmt = adev->mode_info.afmt[dig->dig_encoder];
3334 		}
3335 	}
3336 
3337 	amdgpu_atombios_scratch_regs_lock(adev, true);
3338 
3339 	if (connector) {
3340 		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
3341 
3342 		/* select the clock/data port if it uses a router */
3343 		if (amdgpu_connector->router.cd_valid)
3344 			amdgpu_i2c_router_select_cd_port(amdgpu_connector);
3345 
3346 		/* turn eDP panel on for mode set */
3347 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
3348 			amdgpu_atombios_encoder_set_edp_panel_power(connector,
3349 							     ATOM_TRANSMITTER_ACTION_POWER_ON);
3350 	}
3351 
3352 	/* this is needed for the pll/ss setup to work correctly in some cases */
3353 	amdgpu_atombios_encoder_set_crtc_source(encoder);
3354 	/* set up the FMT blocks */
3355 	dce_v10_0_program_fmt(encoder);
3356 }
3357 
3358 static void dce_v10_0_encoder_commit(struct drm_encoder *encoder)
3359 {
3360 	struct drm_device *dev = encoder->dev;
3361 	struct amdgpu_device *adev = dev->dev_private;
3362 
3363 	/* need to call this here as we need the crtc set up */
3364 	amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
3365 	amdgpu_atombios_scratch_regs_lock(adev, false);
3366 }
3367 
3368 static void dce_v10_0_encoder_disable(struct drm_encoder *encoder)
3369 {
3370 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3371 	struct amdgpu_encoder_atom_dig *dig;
3372 
3373 	amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
3374 
3375 	if (amdgpu_atombios_encoder_is_digital(encoder)) {
3376 		if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
3377 			dce_v10_0_afmt_enable(encoder, false);
3378 		dig = amdgpu_encoder->enc_priv;
3379 		dig->dig_encoder = -1;
3380 	}
3381 	amdgpu_encoder->active_device = 0;
3382 }
3383 
3384 /* these are handled by the primary encoders */
3385 static void dce_v10_0_ext_prepare(struct drm_encoder *encoder)
3386 {
3387 
3388 }
3389 
3390 static void dce_v10_0_ext_commit(struct drm_encoder *encoder)
3391 {
3392 
3393 }
3394 
3395 static void
3396 dce_v10_0_ext_mode_set(struct drm_encoder *encoder,
3397 		      struct drm_display_mode *mode,
3398 		      struct drm_display_mode *adjusted_mode)
3399 {
3400 
3401 }
3402 
3403 static void dce_v10_0_ext_disable(struct drm_encoder *encoder)
3404 {
3405 
3406 }
3407 
3408 static void
3409 dce_v10_0_ext_dpms(struct drm_encoder *encoder, int mode)
3410 {
3411 
3412 }
3413 
3414 static const struct drm_encoder_helper_funcs dce_v10_0_ext_helper_funcs = {
3415 	.dpms = dce_v10_0_ext_dpms,
3416 	.prepare = dce_v10_0_ext_prepare,
3417 	.mode_set = dce_v10_0_ext_mode_set,
3418 	.commit = dce_v10_0_ext_commit,
3419 	.disable = dce_v10_0_ext_disable,
3420 	/* no detect for TMDS/LVDS yet */
3421 };
3422 
3423 static const struct drm_encoder_helper_funcs dce_v10_0_dig_helper_funcs = {
3424 	.dpms = amdgpu_atombios_encoder_dpms,
3425 	.mode_fixup = amdgpu_atombios_encoder_mode_fixup,
3426 	.prepare = dce_v10_0_encoder_prepare,
3427 	.mode_set = dce_v10_0_encoder_mode_set,
3428 	.commit = dce_v10_0_encoder_commit,
3429 	.disable = dce_v10_0_encoder_disable,
3430 	.detect = amdgpu_atombios_encoder_dig_detect,
3431 };
3432 
3433 static const struct drm_encoder_helper_funcs dce_v10_0_dac_helper_funcs = {
3434 	.dpms = amdgpu_atombios_encoder_dpms,
3435 	.mode_fixup = amdgpu_atombios_encoder_mode_fixup,
3436 	.prepare = dce_v10_0_encoder_prepare,
3437 	.mode_set = dce_v10_0_encoder_mode_set,
3438 	.commit = dce_v10_0_encoder_commit,
3439 	.detect = amdgpu_atombios_encoder_dac_detect,
3440 };
3441 
3442 static void dce_v10_0_encoder_destroy(struct drm_encoder *encoder)
3443 {
3444 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3445 	if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3446 		amdgpu_atombios_encoder_fini_backlight(amdgpu_encoder);
3447 	kfree(amdgpu_encoder->enc_priv);
3448 	drm_encoder_cleanup(encoder);
3449 	kfree(amdgpu_encoder);
3450 }
3451 
3452 static const struct drm_encoder_funcs dce_v10_0_encoder_funcs = {
3453 	.destroy = dce_v10_0_encoder_destroy,
3454 };
3455 
3456 static void dce_v10_0_encoder_add(struct amdgpu_device *adev,
3457 				 uint32_t encoder_enum,
3458 				 uint32_t supported_device,
3459 				 u16 caps)
3460 {
3461 	struct drm_device *dev = adev->ddev;
3462 	struct drm_encoder *encoder;
3463 	struct amdgpu_encoder *amdgpu_encoder;
3464 
3465 	/* see if we already added it */
3466 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
3467 		amdgpu_encoder = to_amdgpu_encoder(encoder);
3468 		if (amdgpu_encoder->encoder_enum == encoder_enum) {
3469 			amdgpu_encoder->devices |= supported_device;
3470 			return;
3471 		}
3472 
3473 	}
3474 
3475 	/* add a new one */
3476 	amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL);
3477 	if (!amdgpu_encoder)
3478 		return;
3479 
3480 	encoder = &amdgpu_encoder->base;
3481 	switch (adev->mode_info.num_crtc) {
3482 	case 1:
3483 		encoder->possible_crtcs = 0x1;
3484 		break;
3485 	case 2:
3486 	default:
3487 		encoder->possible_crtcs = 0x3;
3488 		break;
3489 	case 4:
3490 		encoder->possible_crtcs = 0xf;
3491 		break;
3492 	case 6:
3493 		encoder->possible_crtcs = 0x3f;
3494 		break;
3495 	}
3496 
3497 	amdgpu_encoder->enc_priv = NULL;
3498 
3499 	amdgpu_encoder->encoder_enum = encoder_enum;
3500 	amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
3501 	amdgpu_encoder->devices = supported_device;
3502 	amdgpu_encoder->rmx_type = RMX_OFF;
3503 	amdgpu_encoder->underscan_type = UNDERSCAN_OFF;
3504 	amdgpu_encoder->is_ext_encoder = false;
3505 	amdgpu_encoder->caps = caps;
3506 
3507 	switch (amdgpu_encoder->encoder_id) {
3508 	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
3509 	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
3510 		drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs,
3511 				 DRM_MODE_ENCODER_DAC, NULL);
3512 		drm_encoder_helper_add(encoder, &dce_v10_0_dac_helper_funcs);
3513 		break;
3514 	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
3515 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
3516 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
3517 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
3518 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
3519 		if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
3520 			amdgpu_encoder->rmx_type = RMX_FULL;
3521 			drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs,
3522 					 DRM_MODE_ENCODER_LVDS, NULL);
3523 			amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder);
3524 		} else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
3525 			drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs,
3526 					 DRM_MODE_ENCODER_DAC, NULL);
3527 			amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3528 		} else {
3529 			drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs,
3530 					 DRM_MODE_ENCODER_TMDS, NULL);
3531 			amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3532 		}
3533 		drm_encoder_helper_add(encoder, &dce_v10_0_dig_helper_funcs);
3534 		break;
3535 	case ENCODER_OBJECT_ID_SI170B:
3536 	case ENCODER_OBJECT_ID_CH7303:
3537 	case ENCODER_OBJECT_ID_EXTERNAL_SDVOA:
3538 	case ENCODER_OBJECT_ID_EXTERNAL_SDVOB:
3539 	case ENCODER_OBJECT_ID_TITFP513:
3540 	case ENCODER_OBJECT_ID_VT1623:
3541 	case ENCODER_OBJECT_ID_HDMI_SI1930:
3542 	case ENCODER_OBJECT_ID_TRAVIS:
3543 	case ENCODER_OBJECT_ID_NUTMEG:
3544 		/* these are handled by the primary encoders */
3545 		amdgpu_encoder->is_ext_encoder = true;
3546 		if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3547 			drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs,
3548 					 DRM_MODE_ENCODER_LVDS, NULL);
3549 		else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
3550 			drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs,
3551 					 DRM_MODE_ENCODER_DAC, NULL);
3552 		else
3553 			drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs,
3554 					 DRM_MODE_ENCODER_TMDS, NULL);
3555 		drm_encoder_helper_add(encoder, &dce_v10_0_ext_helper_funcs);
3556 		break;
3557 	}
3558 }
3559 
3560 static const struct amdgpu_display_funcs dce_v10_0_display_funcs = {
3561 	.bandwidth_update = &dce_v10_0_bandwidth_update,
3562 	.vblank_get_counter = &dce_v10_0_vblank_get_counter,
3563 	.backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level,
3564 	.backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level,
3565 	.hpd_sense = &dce_v10_0_hpd_sense,
3566 	.hpd_set_polarity = &dce_v10_0_hpd_set_polarity,
3567 	.hpd_get_gpio_reg = &dce_v10_0_hpd_get_gpio_reg,
3568 	.page_flip = &dce_v10_0_page_flip,
3569 	.page_flip_get_scanoutpos = &dce_v10_0_crtc_get_scanoutpos,
3570 	.add_encoder = &dce_v10_0_encoder_add,
3571 	.add_connector = &amdgpu_connector_add,
3572 };
3573 
3574 static void dce_v10_0_set_display_funcs(struct amdgpu_device *adev)
3575 {
3576 	adev->mode_info.funcs = &dce_v10_0_display_funcs;
3577 }
3578 
3579 static const struct amdgpu_irq_src_funcs dce_v10_0_crtc_irq_funcs = {
3580 	.set = dce_v10_0_set_crtc_irq_state,
3581 	.process = dce_v10_0_crtc_irq,
3582 };
3583 
3584 static const struct amdgpu_irq_src_funcs dce_v10_0_pageflip_irq_funcs = {
3585 	.set = dce_v10_0_set_pageflip_irq_state,
3586 	.process = dce_v10_0_pageflip_irq,
3587 };
3588 
3589 static const struct amdgpu_irq_src_funcs dce_v10_0_hpd_irq_funcs = {
3590 	.set = dce_v10_0_set_hpd_irq_state,
3591 	.process = dce_v10_0_hpd_irq,
3592 };
3593 
3594 static void dce_v10_0_set_irq_funcs(struct amdgpu_device *adev)
3595 {
3596 	if (adev->mode_info.num_crtc > 0)
3597 		adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_VLINE1 + adev->mode_info.num_crtc;
3598 	else
3599 		adev->crtc_irq.num_types = 0;
3600 	adev->crtc_irq.funcs = &dce_v10_0_crtc_irq_funcs;
3601 
3602 	adev->pageflip_irq.num_types = adev->mode_info.num_crtc;
3603 	adev->pageflip_irq.funcs = &dce_v10_0_pageflip_irq_funcs;
3604 
3605 	adev->hpd_irq.num_types = adev->mode_info.num_hpd;
3606 	adev->hpd_irq.funcs = &dce_v10_0_hpd_irq_funcs;
3607 }
3608 
3609 const struct amdgpu_ip_block_version dce_v10_0_ip_block =
3610 {
3611 	.type = AMD_IP_BLOCK_TYPE_DCE,
3612 	.major = 10,
3613 	.minor = 0,
3614 	.rev = 0,
3615 	.funcs = &dce_v10_0_ip_funcs,
3616 };
3617 
3618 const struct amdgpu_ip_block_version dce_v10_1_ip_block =
3619 {
3620 	.type = AMD_IP_BLOCK_TYPE_DCE,
3621 	.major = 10,
3622 	.minor = 1,
3623 	.rev = 0,
3624 	.funcs = &dce_v10_0_ip_funcs,
3625 };
3626