1 /*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24 #include <linux/pci.h>
25
26 #include <drm/drm_edid.h>
27 #include <drm/drm_fourcc.h>
28 #include <drm/drm_modeset_helper.h>
29 #include <drm/drm_modeset_helper_vtables.h>
30 #include <drm/drm_vblank.h>
31
32 #include "amdgpu.h"
33 #include "amdgpu_pm.h"
34 #include "amdgpu_i2c.h"
35 #include "atom.h"
36 #include "amdgpu_atombios.h"
37 #include "atombios_crtc.h"
38 #include "atombios_encoders.h"
39 #include "amdgpu_pll.h"
40 #include "amdgpu_connectors.h"
41 #include "amdgpu_display.h"
42
43 #include "dce_v6_0.h"
44 #include "sid.h"
45
46 #include "bif/bif_3_0_d.h"
47 #include "bif/bif_3_0_sh_mask.h"
48
49 #include "oss/oss_1_0_d.h"
50 #include "oss/oss_1_0_sh_mask.h"
51
52 #include "gca/gfx_6_0_d.h"
53 #include "gca/gfx_6_0_sh_mask.h"
54 #include "gca/gfx_7_2_enum.h"
55
56 #include "gmc/gmc_6_0_d.h"
57 #include "gmc/gmc_6_0_sh_mask.h"
58
59 #include "dce/dce_6_0_d.h"
60 #include "dce/dce_6_0_sh_mask.h"
61
62 #include "si_enums.h"
63
64 static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev);
65 static void dce_v6_0_set_irq_funcs(struct amdgpu_device *adev);
66
67 static const u32 crtc_offsets[6] =
68 {
69 CRTC0_REGISTER_OFFSET,
70 CRTC1_REGISTER_OFFSET,
71 CRTC2_REGISTER_OFFSET,
72 CRTC3_REGISTER_OFFSET,
73 CRTC4_REGISTER_OFFSET,
74 CRTC5_REGISTER_OFFSET
75 };
76
77 static const u32 hpd_offsets[] =
78 {
79 HPD0_REGISTER_OFFSET,
80 HPD1_REGISTER_OFFSET,
81 HPD2_REGISTER_OFFSET,
82 HPD3_REGISTER_OFFSET,
83 HPD4_REGISTER_OFFSET,
84 HPD5_REGISTER_OFFSET
85 };
86
87 static const uint32_t dig_offsets[] = {
88 CRTC0_REGISTER_OFFSET,
89 CRTC1_REGISTER_OFFSET,
90 CRTC2_REGISTER_OFFSET,
91 CRTC3_REGISTER_OFFSET,
92 CRTC4_REGISTER_OFFSET,
93 CRTC5_REGISTER_OFFSET,
94 (0x13830 - 0x7030) >> 2,
95 };
96
97 static const struct {
98 uint32_t reg;
99 uint32_t vblank;
100 uint32_t vline;
101 uint32_t hpd;
102
103 } interrupt_status_offsets[6] = { {
104 .reg = mmDISP_INTERRUPT_STATUS,
105 .vblank = DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK,
106 .vline = DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK,
107 .hpd = DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK
108 }, {
109 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE,
110 .vblank = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK,
111 .vline = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK,
112 .hpd = DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK
113 }, {
114 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE2,
115 .vblank = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK,
116 .vline = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK,
117 .hpd = DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK
118 }, {
119 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE3,
120 .vblank = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK,
121 .vline = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK,
122 .hpd = DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK
123 }, {
124 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE4,
125 .vblank = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK,
126 .vline = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK,
127 .hpd = DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK
128 }, {
129 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE5,
130 .vblank = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK,
131 .vline = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK,
132 .hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK
133 } };
134
dce_v6_0_audio_endpt_rreg(struct amdgpu_device * adev,u32 block_offset,u32 reg)135 static u32 dce_v6_0_audio_endpt_rreg(struct amdgpu_device *adev,
136 u32 block_offset, u32 reg)
137 {
138 unsigned long flags;
139 u32 r;
140
141 spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
142 WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
143 r = RREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset);
144 spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
145
146 return r;
147 }
148
dce_v6_0_audio_endpt_wreg(struct amdgpu_device * adev,u32 block_offset,u32 reg,u32 v)149 static void dce_v6_0_audio_endpt_wreg(struct amdgpu_device *adev,
150 u32 block_offset, u32 reg, u32 v)
151 {
152 unsigned long flags;
153
154 spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
155 WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset,
156 reg | AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_WRITE_EN_MASK);
157 WREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset, v);
158 spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
159 }
160
dce_v6_0_vblank_get_counter(struct amdgpu_device * adev,int crtc)161 static u32 dce_v6_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
162 {
163 if (crtc >= adev->mode_info.num_crtc)
164 return 0;
165 else
166 return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
167 }
168
dce_v6_0_pageflip_interrupt_init(struct amdgpu_device * adev)169 static void dce_v6_0_pageflip_interrupt_init(struct amdgpu_device *adev)
170 {
171 unsigned i;
172
173 /* Enable pflip interrupts */
174 for (i = 0; i < adev->mode_info.num_crtc; i++)
175 amdgpu_irq_get(adev, &adev->pageflip_irq, i);
176 }
177
dce_v6_0_pageflip_interrupt_fini(struct amdgpu_device * adev)178 static void dce_v6_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
179 {
180 unsigned i;
181
182 /* Disable pflip interrupts */
183 for (i = 0; i < adev->mode_info.num_crtc; i++)
184 amdgpu_irq_put(adev, &adev->pageflip_irq, i);
185 }
186
187 /**
188 * dce_v6_0_page_flip - pageflip callback.
189 *
190 * @adev: amdgpu_device pointer
191 * @crtc_id: crtc to cleanup pageflip on
192 * @crtc_base: new address of the crtc (GPU MC address)
193 * @async: asynchronous flip
194 *
195 * Does the actual pageflip (evergreen+).
196 * During vblank we take the crtc lock and wait for the update_pending
197 * bit to go high, when it does, we release the lock, and allow the
198 * double buffered update to take place.
199 * Returns the current update pending status.
200 */
dce_v6_0_page_flip(struct amdgpu_device * adev,int crtc_id,u64 crtc_base,bool async)201 static void dce_v6_0_page_flip(struct amdgpu_device *adev,
202 int crtc_id, u64 crtc_base, bool async)
203 {
204 struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
205 struct drm_framebuffer *fb = amdgpu_crtc->base.primary->fb;
206
207 /* flip at hsync for async, default is vsync */
208 WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, async ?
209 GRPH_FLIP_CONTROL__GRPH_SURFACE_UPDATE_H_RETRACE_EN_MASK : 0);
210 /* update pitch */
211 WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset,
212 fb->pitches[0] / fb->format->cpp[0]);
213 /* update the scanout addresses */
214 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
215 upper_32_bits(crtc_base));
216 /* writing to the low address triggers the update */
217 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
218 (u32)crtc_base);
219 /* post the write */
220 RREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset);
221 }
222
dce_v6_0_crtc_get_scanoutpos(struct amdgpu_device * adev,int crtc,u32 * vbl,u32 * position)223 static int dce_v6_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
224 u32 *vbl, u32 *position)
225 {
226 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
227 return -EINVAL;
228
229 *vbl = RREG32(mmCRTC_V_BLANK_START_END + crtc_offsets[crtc]);
230 *position = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
231
232 return 0;
233 }
234
235 /**
236 * dce_v6_0_hpd_sense - hpd sense callback.
237 *
238 * @adev: amdgpu_device pointer
239 * @hpd: hpd (hotplug detect) pin
240 *
241 * Checks if a digital monitor is connected (evergreen+).
242 * Returns true if connected, false if not connected.
243 */
dce_v6_0_hpd_sense(struct amdgpu_device * adev,enum amdgpu_hpd_id hpd)244 static bool dce_v6_0_hpd_sense(struct amdgpu_device *adev,
245 enum amdgpu_hpd_id hpd)
246 {
247 bool connected = false;
248
249 if (hpd >= adev->mode_info.num_hpd)
250 return connected;
251
252 if (RREG32(mmDC_HPD1_INT_STATUS + hpd_offsets[hpd]) &
253 DC_HPD1_INT_STATUS__DC_HPD1_SENSE_MASK)
254 connected = true;
255
256 return connected;
257 }
258
259 /**
260 * dce_v6_0_hpd_set_polarity - hpd set polarity callback.
261 *
262 * @adev: amdgpu_device pointer
263 * @hpd: hpd (hotplug detect) pin
264 *
265 * Set the polarity of the hpd pin (evergreen+).
266 */
dce_v6_0_hpd_set_polarity(struct amdgpu_device * adev,enum amdgpu_hpd_id hpd)267 static void dce_v6_0_hpd_set_polarity(struct amdgpu_device *adev,
268 enum amdgpu_hpd_id hpd)
269 {
270 u32 tmp;
271 bool connected = dce_v6_0_hpd_sense(adev, hpd);
272
273 if (hpd >= adev->mode_info.num_hpd)
274 return;
275
276 tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
277 if (connected)
278 tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
279 else
280 tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
281 WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
282 }
283
dce_v6_0_hpd_int_ack(struct amdgpu_device * adev,int hpd)284 static void dce_v6_0_hpd_int_ack(struct amdgpu_device *adev,
285 int hpd)
286 {
287 u32 tmp;
288
289 if (hpd >= adev->mode_info.num_hpd) {
290 DRM_DEBUG("invalid hpd %d\n", hpd);
291 return;
292 }
293
294 tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
295 tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
296 WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
297 }
298
299 /**
300 * dce_v6_0_hpd_init - hpd setup callback.
301 *
302 * @adev: amdgpu_device pointer
303 *
304 * Setup the hpd pins used by the card (evergreen+).
305 * Enable the pin, set the polarity, and enable the hpd interrupts.
306 */
dce_v6_0_hpd_init(struct amdgpu_device * adev)307 static void dce_v6_0_hpd_init(struct amdgpu_device *adev)
308 {
309 struct drm_device *dev = adev_to_drm(adev);
310 struct drm_connector *connector;
311 struct drm_connector_list_iter iter;
312 u32 tmp;
313
314 drm_connector_list_iter_begin(dev, &iter);
315 drm_for_each_connector_iter(connector, &iter) {
316 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
317
318 if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
319 continue;
320
321 tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
322 tmp |= DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
323 WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
324
325 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
326 connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
327 /* don't try to enable hpd on eDP or LVDS avoid breaking the
328 * aux dp channel on imac and help (but not completely fix)
329 * https://bugzilla.redhat.com/show_bug.cgi?id=726143
330 * also avoid interrupt storms during dpms.
331 */
332 tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
333 tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
334 WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
335 continue;
336 }
337
338 dce_v6_0_hpd_int_ack(adev, amdgpu_connector->hpd.hpd);
339 dce_v6_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
340 amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
341 }
342 drm_connector_list_iter_end(&iter);
343 }
344
345 /**
346 * dce_v6_0_hpd_fini - hpd tear down callback.
347 *
348 * @adev: amdgpu_device pointer
349 *
350 * Tear down the hpd pins used by the card (evergreen+).
351 * Disable the hpd interrupts.
352 */
dce_v6_0_hpd_fini(struct amdgpu_device * adev)353 static void dce_v6_0_hpd_fini(struct amdgpu_device *adev)
354 {
355 struct drm_device *dev = adev_to_drm(adev);
356 struct drm_connector *connector;
357 struct drm_connector_list_iter iter;
358 u32 tmp;
359
360 drm_connector_list_iter_begin(dev, &iter);
361 drm_for_each_connector_iter(connector, &iter) {
362 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
363
364 if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
365 continue;
366
367 tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
368 tmp &= ~DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
369 WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
370
371 amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
372 }
373 drm_connector_list_iter_end(&iter);
374 }
375
dce_v6_0_hpd_get_gpio_reg(struct amdgpu_device * adev)376 static u32 dce_v6_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
377 {
378 return mmDC_GPIO_HPD_A;
379 }
380
dce_v6_0_is_display_hung(struct amdgpu_device * adev)381 static bool dce_v6_0_is_display_hung(struct amdgpu_device *adev)
382 {
383 u32 crtc_hung = 0;
384 u32 crtc_status[6];
385 u32 i, j, tmp;
386
387 for (i = 0; i < adev->mode_info.num_crtc; i++) {
388 if (RREG32(mmCRTC_CONTROL + crtc_offsets[i]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK) {
389 crtc_status[i] = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
390 crtc_hung |= (1 << i);
391 }
392 }
393
394 for (j = 0; j < 10; j++) {
395 for (i = 0; i < adev->mode_info.num_crtc; i++) {
396 if (crtc_hung & (1 << i)) {
397 tmp = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
398 if (tmp != crtc_status[i])
399 crtc_hung &= ~(1 << i);
400 }
401 }
402 if (crtc_hung == 0)
403 return false;
404 udelay(100);
405 }
406
407 return true;
408 }
409
dce_v6_0_set_vga_render_state(struct amdgpu_device * adev,bool render)410 static void dce_v6_0_set_vga_render_state(struct amdgpu_device *adev,
411 bool render)
412 {
413 if (!render)
414 WREG32(mmVGA_RENDER_CONTROL,
415 RREG32(mmVGA_RENDER_CONTROL) & ~VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL_MASK);
416 }
417
dce_v6_0_get_num_crtc(struct amdgpu_device * adev)418 static int dce_v6_0_get_num_crtc(struct amdgpu_device *adev)
419 {
420 switch (adev->asic_type) {
421 case CHIP_TAHITI:
422 case CHIP_PITCAIRN:
423 case CHIP_VERDE:
424 return 6;
425 case CHIP_OLAND:
426 return 2;
427 default:
428 return 0;
429 }
430 }
431
dce_v6_0_disable_dce(struct amdgpu_device * adev)432 void dce_v6_0_disable_dce(struct amdgpu_device *adev)
433 {
434 /*Disable VGA render and enabled crtc, if has DCE engine*/
435 if (amdgpu_atombios_has_dce_engine_info(adev)) {
436 u32 tmp;
437 int crtc_enabled, i;
438
439 dce_v6_0_set_vga_render_state(adev, false);
440
441 /*Disable crtc*/
442 for (i = 0; i < dce_v6_0_get_num_crtc(adev); i++) {
443 crtc_enabled = RREG32(mmCRTC_CONTROL + crtc_offsets[i]) &
444 CRTC_CONTROL__CRTC_MASTER_EN_MASK;
445 if (crtc_enabled) {
446 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
447 tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
448 tmp &= ~CRTC_CONTROL__CRTC_MASTER_EN_MASK;
449 WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
450 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
451 }
452 }
453 }
454 }
455
dce_v6_0_program_fmt(struct drm_encoder * encoder)456 static void dce_v6_0_program_fmt(struct drm_encoder *encoder)
457 {
458 struct drm_device *dev = encoder->dev;
459 struct amdgpu_device *adev = drm_to_adev(dev);
460 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
461 struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
462 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
463 int bpc = 0;
464 u32 tmp = 0;
465 enum amdgpu_connector_dither dither = AMDGPU_FMT_DITHER_DISABLE;
466
467 if (connector) {
468 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
469 bpc = amdgpu_connector_get_monitor_bpc(connector);
470 dither = amdgpu_connector->dither;
471 }
472
473 /* LVDS FMT is set up by atom */
474 if (amdgpu_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
475 return;
476
477 if (bpc == 0)
478 return;
479
480
481 switch (bpc) {
482 case 6:
483 if (dither == AMDGPU_FMT_DITHER_ENABLE)
484 /* XXX sort out optimal dither settings */
485 tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
486 FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
487 FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK);
488 else
489 tmp |= FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK;
490 break;
491 case 8:
492 if (dither == AMDGPU_FMT_DITHER_ENABLE)
493 /* XXX sort out optimal dither settings */
494 tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
495 FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
496 FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK |
497 FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK |
498 FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH_MASK);
499 else
500 tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK |
501 FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH_MASK);
502 break;
503 case 10:
504 default:
505 /* not needed */
506 break;
507 }
508
509 WREG32(mmFMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
510 }
511
512 /**
513 * si_get_number_of_dram_channels - get the number of dram channels
514 *
515 * @adev: amdgpu_device pointer
516 *
517 * Look up the number of video ram channels (CIK).
518 * Used for display watermark bandwidth calculations
519 * Returns the number of dram channels
520 */
si_get_number_of_dram_channels(struct amdgpu_device * adev)521 static u32 si_get_number_of_dram_channels(struct amdgpu_device *adev)
522 {
523 u32 tmp = RREG32(mmMC_SHARED_CHMAP);
524
525 switch ((tmp & MC_SHARED_CHMAP__NOOFCHAN_MASK) >> MC_SHARED_CHMAP__NOOFCHAN__SHIFT) {
526 case 0:
527 default:
528 return 1;
529 case 1:
530 return 2;
531 case 2:
532 return 4;
533 case 3:
534 return 8;
535 case 4:
536 return 3;
537 case 5:
538 return 6;
539 case 6:
540 return 10;
541 case 7:
542 return 12;
543 case 8:
544 return 16;
545 }
546 }
547
548 struct dce6_wm_params {
549 u32 dram_channels; /* number of dram channels */
550 u32 yclk; /* bandwidth per dram data pin in kHz */
551 u32 sclk; /* engine clock in kHz */
552 u32 disp_clk; /* display clock in kHz */
553 u32 src_width; /* viewport width */
554 u32 active_time; /* active display time in ns */
555 u32 blank_time; /* blank time in ns */
556 bool interlaced; /* mode is interlaced */
557 fixed20_12 vsc; /* vertical scale ratio */
558 u32 num_heads; /* number of active crtcs */
559 u32 bytes_per_pixel; /* bytes per pixel display + overlay */
560 u32 lb_size; /* line buffer allocated to pipe */
561 u32 vtaps; /* vertical scaler taps */
562 };
563
564 /**
565 * dce_v6_0_dram_bandwidth - get the dram bandwidth
566 *
567 * @wm: watermark calculation data
568 *
569 * Calculate the raw dram bandwidth (CIK).
570 * Used for display watermark bandwidth calculations
571 * Returns the dram bandwidth in MBytes/s
572 */
dce_v6_0_dram_bandwidth(struct dce6_wm_params * wm)573 static u32 dce_v6_0_dram_bandwidth(struct dce6_wm_params *wm)
574 {
575 /* Calculate raw DRAM Bandwidth */
576 fixed20_12 dram_efficiency; /* 0.7 */
577 fixed20_12 yclk, dram_channels, bandwidth;
578 fixed20_12 a;
579
580 a.full = dfixed_const(1000);
581 yclk.full = dfixed_const(wm->yclk);
582 yclk.full = dfixed_div(yclk, a);
583 dram_channels.full = dfixed_const(wm->dram_channels * 4);
584 a.full = dfixed_const(10);
585 dram_efficiency.full = dfixed_const(7);
586 dram_efficiency.full = dfixed_div(dram_efficiency, a);
587 bandwidth.full = dfixed_mul(dram_channels, yclk);
588 bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
589
590 return dfixed_trunc(bandwidth);
591 }
592
593 /**
594 * dce_v6_0_dram_bandwidth_for_display - get the dram bandwidth for display
595 *
596 * @wm: watermark calculation data
597 *
598 * Calculate the dram bandwidth used for display (CIK).
599 * Used for display watermark bandwidth calculations
600 * Returns the dram bandwidth for display in MBytes/s
601 */
dce_v6_0_dram_bandwidth_for_display(struct dce6_wm_params * wm)602 static u32 dce_v6_0_dram_bandwidth_for_display(struct dce6_wm_params *wm)
603 {
604 /* Calculate DRAM Bandwidth and the part allocated to display. */
605 fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
606 fixed20_12 yclk, dram_channels, bandwidth;
607 fixed20_12 a;
608
609 a.full = dfixed_const(1000);
610 yclk.full = dfixed_const(wm->yclk);
611 yclk.full = dfixed_div(yclk, a);
612 dram_channels.full = dfixed_const(wm->dram_channels * 4);
613 a.full = dfixed_const(10);
614 disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
615 disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
616 bandwidth.full = dfixed_mul(dram_channels, yclk);
617 bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
618
619 return dfixed_trunc(bandwidth);
620 }
621
622 /**
623 * dce_v6_0_data_return_bandwidth - get the data return bandwidth
624 *
625 * @wm: watermark calculation data
626 *
627 * Calculate the data return bandwidth used for display (CIK).
628 * Used for display watermark bandwidth calculations
629 * Returns the data return bandwidth in MBytes/s
630 */
dce_v6_0_data_return_bandwidth(struct dce6_wm_params * wm)631 static u32 dce_v6_0_data_return_bandwidth(struct dce6_wm_params *wm)
632 {
633 /* Calculate the display Data return Bandwidth */
634 fixed20_12 return_efficiency; /* 0.8 */
635 fixed20_12 sclk, bandwidth;
636 fixed20_12 a;
637
638 a.full = dfixed_const(1000);
639 sclk.full = dfixed_const(wm->sclk);
640 sclk.full = dfixed_div(sclk, a);
641 a.full = dfixed_const(10);
642 return_efficiency.full = dfixed_const(8);
643 return_efficiency.full = dfixed_div(return_efficiency, a);
644 a.full = dfixed_const(32);
645 bandwidth.full = dfixed_mul(a, sclk);
646 bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
647
648 return dfixed_trunc(bandwidth);
649 }
650
651 /**
652 * dce_v6_0_dmif_request_bandwidth - get the dmif bandwidth
653 *
654 * @wm: watermark calculation data
655 *
656 * Calculate the dmif bandwidth used for display (CIK).
657 * Used for display watermark bandwidth calculations
658 * Returns the dmif bandwidth in MBytes/s
659 */
dce_v6_0_dmif_request_bandwidth(struct dce6_wm_params * wm)660 static u32 dce_v6_0_dmif_request_bandwidth(struct dce6_wm_params *wm)
661 {
662 /* Calculate the DMIF Request Bandwidth */
663 fixed20_12 disp_clk_request_efficiency; /* 0.8 */
664 fixed20_12 disp_clk, bandwidth;
665 fixed20_12 a, b;
666
667 a.full = dfixed_const(1000);
668 disp_clk.full = dfixed_const(wm->disp_clk);
669 disp_clk.full = dfixed_div(disp_clk, a);
670 a.full = dfixed_const(32);
671 b.full = dfixed_mul(a, disp_clk);
672
673 a.full = dfixed_const(10);
674 disp_clk_request_efficiency.full = dfixed_const(8);
675 disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
676
677 bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency);
678
679 return dfixed_trunc(bandwidth);
680 }
681
682 /**
683 * dce_v6_0_available_bandwidth - get the min available bandwidth
684 *
685 * @wm: watermark calculation data
686 *
687 * Calculate the min available bandwidth used for display (CIK).
688 * Used for display watermark bandwidth calculations
689 * Returns the min available bandwidth in MBytes/s
690 */
dce_v6_0_available_bandwidth(struct dce6_wm_params * wm)691 static u32 dce_v6_0_available_bandwidth(struct dce6_wm_params *wm)
692 {
693 /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
694 u32 dram_bandwidth = dce_v6_0_dram_bandwidth(wm);
695 u32 data_return_bandwidth = dce_v6_0_data_return_bandwidth(wm);
696 u32 dmif_req_bandwidth = dce_v6_0_dmif_request_bandwidth(wm);
697
698 return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
699 }
700
701 /**
702 * dce_v6_0_average_bandwidth - get the average available bandwidth
703 *
704 * @wm: watermark calculation data
705 *
706 * Calculate the average available bandwidth used for display (CIK).
707 * Used for display watermark bandwidth calculations
708 * Returns the average available bandwidth in MBytes/s
709 */
dce_v6_0_average_bandwidth(struct dce6_wm_params * wm)710 static u32 dce_v6_0_average_bandwidth(struct dce6_wm_params *wm)
711 {
712 /* Calculate the display mode Average Bandwidth
713 * DisplayMode should contain the source and destination dimensions,
714 * timing, etc.
715 */
716 fixed20_12 bpp;
717 fixed20_12 line_time;
718 fixed20_12 src_width;
719 fixed20_12 bandwidth;
720 fixed20_12 a;
721
722 a.full = dfixed_const(1000);
723 line_time.full = dfixed_const(wm->active_time + wm->blank_time);
724 line_time.full = dfixed_div(line_time, a);
725 bpp.full = dfixed_const(wm->bytes_per_pixel);
726 src_width.full = dfixed_const(wm->src_width);
727 bandwidth.full = dfixed_mul(src_width, bpp);
728 bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
729 bandwidth.full = dfixed_div(bandwidth, line_time);
730
731 return dfixed_trunc(bandwidth);
732 }
733
734 /**
735 * dce_v6_0_latency_watermark - get the latency watermark
736 *
737 * @wm: watermark calculation data
738 *
739 * Calculate the latency watermark (CIK).
740 * Used for display watermark bandwidth calculations
741 * Returns the latency watermark in ns
742 */
dce_v6_0_latency_watermark(struct dce6_wm_params * wm)743 static u32 dce_v6_0_latency_watermark(struct dce6_wm_params *wm)
744 {
745 /* First calculate the latency in ns */
746 u32 mc_latency = 2000; /* 2000 ns. */
747 u32 available_bandwidth = dce_v6_0_available_bandwidth(wm);
748 u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
749 u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
750 u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
751 u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
752 (wm->num_heads * cursor_line_pair_return_time);
753 u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
754 u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
755 u32 tmp, dmif_size = 12288;
756 fixed20_12 a, b, c;
757
758 if (wm->num_heads == 0)
759 return 0;
760
761 a.full = dfixed_const(2);
762 b.full = dfixed_const(1);
763 if ((wm->vsc.full > a.full) ||
764 ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
765 (wm->vtaps >= 5) ||
766 ((wm->vsc.full >= a.full) && wm->interlaced))
767 max_src_lines_per_dst_line = 4;
768 else
769 max_src_lines_per_dst_line = 2;
770
771 a.full = dfixed_const(available_bandwidth);
772 b.full = dfixed_const(wm->num_heads);
773 a.full = dfixed_div(a, b);
774 tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512);
775 tmp = min(dfixed_trunc(a), tmp);
776
777 lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000);
778
779 a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
780 b.full = dfixed_const(1000);
781 c.full = dfixed_const(lb_fill_bw);
782 b.full = dfixed_div(c, b);
783 a.full = dfixed_div(a, b);
784 line_fill_time = dfixed_trunc(a);
785
786 if (line_fill_time < wm->active_time)
787 return latency;
788 else
789 return latency + (line_fill_time - wm->active_time);
790
791 }
792
793 /**
794 * dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display - check
795 * average and available dram bandwidth
796 *
797 * @wm: watermark calculation data
798 *
799 * Check if the display average bandwidth fits in the display
800 * dram bandwidth (CIK).
801 * Used for display watermark bandwidth calculations
802 * Returns true if the display fits, false if not.
803 */
dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(struct dce6_wm_params * wm)804 static bool dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(struct dce6_wm_params *wm)
805 {
806 if (dce_v6_0_average_bandwidth(wm) <=
807 (dce_v6_0_dram_bandwidth_for_display(wm) / wm->num_heads))
808 return true;
809 else
810 return false;
811 }
812
813 /**
814 * dce_v6_0_average_bandwidth_vs_available_bandwidth - check
815 * average and available bandwidth
816 *
817 * @wm: watermark calculation data
818 *
819 * Check if the display average bandwidth fits in the display
820 * available bandwidth (CIK).
821 * Used for display watermark bandwidth calculations
822 * Returns true if the display fits, false if not.
823 */
dce_v6_0_average_bandwidth_vs_available_bandwidth(struct dce6_wm_params * wm)824 static bool dce_v6_0_average_bandwidth_vs_available_bandwidth(struct dce6_wm_params *wm)
825 {
826 if (dce_v6_0_average_bandwidth(wm) <=
827 (dce_v6_0_available_bandwidth(wm) / wm->num_heads))
828 return true;
829 else
830 return false;
831 }
832
833 /**
834 * dce_v6_0_check_latency_hiding - check latency hiding
835 *
836 * @wm: watermark calculation data
837 *
838 * Check latency hiding (CIK).
839 * Used for display watermark bandwidth calculations
840 * Returns true if the display fits, false if not.
841 */
dce_v6_0_check_latency_hiding(struct dce6_wm_params * wm)842 static bool dce_v6_0_check_latency_hiding(struct dce6_wm_params *wm)
843 {
844 u32 lb_partitions = wm->lb_size / wm->src_width;
845 u32 line_time = wm->active_time + wm->blank_time;
846 u32 latency_tolerant_lines;
847 u32 latency_hiding;
848 fixed20_12 a;
849
850 a.full = dfixed_const(1);
851 if (wm->vsc.full > a.full)
852 latency_tolerant_lines = 1;
853 else {
854 if (lb_partitions <= (wm->vtaps + 1))
855 latency_tolerant_lines = 1;
856 else
857 latency_tolerant_lines = 2;
858 }
859
860 latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
861
862 if (dce_v6_0_latency_watermark(wm) <= latency_hiding)
863 return true;
864 else
865 return false;
866 }
867
868 /**
869 * dce_v6_0_program_watermarks - program display watermarks
870 *
871 * @adev: amdgpu_device pointer
872 * @amdgpu_crtc: the selected display controller
873 * @lb_size: line buffer size
874 * @num_heads: number of display controllers in use
875 *
876 * Calculate and program the display watermarks for the
877 * selected display controller (CIK).
878 */
dce_v6_0_program_watermarks(struct amdgpu_device * adev,struct amdgpu_crtc * amdgpu_crtc,u32 lb_size,u32 num_heads)879 static void dce_v6_0_program_watermarks(struct amdgpu_device *adev,
880 struct amdgpu_crtc *amdgpu_crtc,
881 u32 lb_size, u32 num_heads)
882 {
883 struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
884 struct dce6_wm_params wm_low, wm_high;
885 u32 dram_channels;
886 u32 active_time;
887 u32 line_time = 0;
888 u32 latency_watermark_a = 0, latency_watermark_b = 0;
889 u32 priority_a_mark = 0, priority_b_mark = 0;
890 u32 priority_a_cnt = PRIORITY_OFF;
891 u32 priority_b_cnt = PRIORITY_OFF;
892 u32 tmp, arb_control3, lb_vblank_lead_lines = 0;
893 fixed20_12 a, b, c;
894
895 if (amdgpu_crtc->base.enabled && num_heads && mode) {
896 active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
897 (u32)mode->clock);
898 line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
899 (u32)mode->clock);
900 line_time = min_t(u32, line_time, 65535);
901 priority_a_cnt = 0;
902 priority_b_cnt = 0;
903
904 dram_channels = si_get_number_of_dram_channels(adev);
905
906 /* watermark for high clocks */
907 if (adev->pm.dpm_enabled) {
908 wm_high.yclk =
909 amdgpu_dpm_get_mclk(adev, false) * 10;
910 wm_high.sclk =
911 amdgpu_dpm_get_sclk(adev, false) * 10;
912 } else {
913 wm_high.yclk = adev->pm.current_mclk * 10;
914 wm_high.sclk = adev->pm.current_sclk * 10;
915 }
916
917 wm_high.disp_clk = mode->clock;
918 wm_high.src_width = mode->crtc_hdisplay;
919 wm_high.active_time = active_time;
920 wm_high.blank_time = line_time - wm_high.active_time;
921 wm_high.interlaced = false;
922 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
923 wm_high.interlaced = true;
924 wm_high.vsc = amdgpu_crtc->vsc;
925 wm_high.vtaps = 1;
926 if (amdgpu_crtc->rmx_type != RMX_OFF)
927 wm_high.vtaps = 2;
928 wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
929 wm_high.lb_size = lb_size;
930 wm_high.dram_channels = dram_channels;
931 wm_high.num_heads = num_heads;
932
933 /* watermark for low clocks */
934 if (adev->pm.dpm_enabled) {
935 wm_low.yclk =
936 amdgpu_dpm_get_mclk(adev, true) * 10;
937 wm_low.sclk =
938 amdgpu_dpm_get_sclk(adev, true) * 10;
939 } else {
940 wm_low.yclk = adev->pm.current_mclk * 10;
941 wm_low.sclk = adev->pm.current_sclk * 10;
942 }
943
944 wm_low.disp_clk = mode->clock;
945 wm_low.src_width = mode->crtc_hdisplay;
946 wm_low.active_time = active_time;
947 wm_low.blank_time = line_time - wm_low.active_time;
948 wm_low.interlaced = false;
949 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
950 wm_low.interlaced = true;
951 wm_low.vsc = amdgpu_crtc->vsc;
952 wm_low.vtaps = 1;
953 if (amdgpu_crtc->rmx_type != RMX_OFF)
954 wm_low.vtaps = 2;
955 wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
956 wm_low.lb_size = lb_size;
957 wm_low.dram_channels = dram_channels;
958 wm_low.num_heads = num_heads;
959
960 /* set for high clocks */
961 latency_watermark_a = min_t(u32, dce_v6_0_latency_watermark(&wm_high), 65535);
962 /* set for low clocks */
963 latency_watermark_b = min_t(u32, dce_v6_0_latency_watermark(&wm_low), 65535);
964
965 /* possibly force display priority to high */
966 /* should really do this at mode validation time... */
967 if (!dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
968 !dce_v6_0_average_bandwidth_vs_available_bandwidth(&wm_high) ||
969 !dce_v6_0_check_latency_hiding(&wm_high) ||
970 (adev->mode_info.disp_priority == 2)) {
971 DRM_DEBUG_KMS("force priority to high\n");
972 priority_a_cnt |= PRIORITY_ALWAYS_ON;
973 priority_b_cnt |= PRIORITY_ALWAYS_ON;
974 }
975 if (!dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
976 !dce_v6_0_average_bandwidth_vs_available_bandwidth(&wm_low) ||
977 !dce_v6_0_check_latency_hiding(&wm_low) ||
978 (adev->mode_info.disp_priority == 2)) {
979 DRM_DEBUG_KMS("force priority to high\n");
980 priority_a_cnt |= PRIORITY_ALWAYS_ON;
981 priority_b_cnt |= PRIORITY_ALWAYS_ON;
982 }
983
984 a.full = dfixed_const(1000);
985 b.full = dfixed_const(mode->clock);
986 b.full = dfixed_div(b, a);
987 c.full = dfixed_const(latency_watermark_a);
988 c.full = dfixed_mul(c, b);
989 c.full = dfixed_mul(c, amdgpu_crtc->hsc);
990 c.full = dfixed_div(c, a);
991 a.full = dfixed_const(16);
992 c.full = dfixed_div(c, a);
993 priority_a_mark = dfixed_trunc(c);
994 priority_a_cnt |= priority_a_mark & PRIORITY_MARK_MASK;
995
996 a.full = dfixed_const(1000);
997 b.full = dfixed_const(mode->clock);
998 b.full = dfixed_div(b, a);
999 c.full = dfixed_const(latency_watermark_b);
1000 c.full = dfixed_mul(c, b);
1001 c.full = dfixed_mul(c, amdgpu_crtc->hsc);
1002 c.full = dfixed_div(c, a);
1003 a.full = dfixed_const(16);
1004 c.full = dfixed_div(c, a);
1005 priority_b_mark = dfixed_trunc(c);
1006 priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK;
1007
1008 lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
1009 }
1010
1011 /* select wm A */
1012 arb_control3 = RREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset);
1013 tmp = arb_control3;
1014 tmp &= ~(3 << DPG_PIPE_ARBITRATION_CONTROL3__URGENCY_WATERMARK_MASK__SHIFT);
1015 tmp |= (1 << DPG_PIPE_ARBITRATION_CONTROL3__URGENCY_WATERMARK_MASK__SHIFT);
1016 WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp);
1017 WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
1018 ((latency_watermark_a << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) |
1019 (line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT)));
1020 /* select wm B */
1021 tmp = RREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset);
1022 tmp &= ~(3 << DPG_PIPE_ARBITRATION_CONTROL3__URGENCY_WATERMARK_MASK__SHIFT);
1023 tmp |= (2 << DPG_PIPE_ARBITRATION_CONTROL3__URGENCY_WATERMARK_MASK__SHIFT);
1024 WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp);
1025 WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
1026 ((latency_watermark_b << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) |
1027 (line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT)));
1028 /* restore original selection */
1029 WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, arb_control3);
1030
1031 /* write the priority marks */
1032 WREG32(mmPRIORITY_A_CNT + amdgpu_crtc->crtc_offset, priority_a_cnt);
1033 WREG32(mmPRIORITY_B_CNT + amdgpu_crtc->crtc_offset, priority_b_cnt);
1034
1035 /* save values for DPM */
1036 amdgpu_crtc->line_time = line_time;
1037
1038 /* Save number of lines the linebuffer leads before the scanout */
1039 amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
1040 }
1041
1042 /* watermark setup */
1043 /**
1044 * dce_v6_0_line_buffer_adjust - Set up the line buffer
1045 *
1046 * @adev: amdgpu_device pointer
1047 * @amdgpu_crtc: the selected display controller
1048 * @mode: the current display mode on the selected display
1049 * controller
1050 * @other_mode: the display mode of another display controller
1051 * that may be sharing the line buffer
1052 *
1053 * Setup up the line buffer allocation for
1054 * the selected display controller (CIK).
1055 * Returns the line buffer size in pixels.
1056 */
dce_v6_0_line_buffer_adjust(struct amdgpu_device * adev,struct amdgpu_crtc * amdgpu_crtc,struct drm_display_mode * mode,struct drm_display_mode * other_mode)1057 static u32 dce_v6_0_line_buffer_adjust(struct amdgpu_device *adev,
1058 struct amdgpu_crtc *amdgpu_crtc,
1059 struct drm_display_mode *mode,
1060 struct drm_display_mode *other_mode)
1061 {
1062 u32 tmp, buffer_alloc, i;
1063 u32 pipe_offset = amdgpu_crtc->crtc_id * 0x8;
1064 /*
1065 * Line Buffer Setup
1066 * There are 3 line buffers, each one shared by 2 display controllers.
1067 * mmDC_LB_MEMORY_SPLIT controls how that line buffer is shared between
1068 * the display controllers. The paritioning is done via one of four
1069 * preset allocations specified in bits 21:20:
1070 * 0 - half lb
1071 * 2 - whole lb, other crtc must be disabled
1072 */
1073 /* this can get tricky if we have two large displays on a paired group
1074 * of crtcs. Ideally for multiple large displays we'd assign them to
1075 * non-linked crtcs for maximum line buffer allocation.
1076 */
1077 if (amdgpu_crtc->base.enabled && mode) {
1078 if (other_mode) {
1079 tmp = 0; /* 1/2 */
1080 buffer_alloc = 1;
1081 } else {
1082 tmp = 2; /* whole */
1083 buffer_alloc = 2;
1084 }
1085 } else {
1086 tmp = 0;
1087 buffer_alloc = 0;
1088 }
1089
1090 WREG32(mmDC_LB_MEMORY_SPLIT + amdgpu_crtc->crtc_offset,
1091 (tmp << DC_LB_MEMORY_SPLIT__DC_LB_MEMORY_CONFIG__SHIFT));
1092
1093 WREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
1094 (buffer_alloc << PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED__SHIFT));
1095 for (i = 0; i < adev->usec_timeout; i++) {
1096 if (RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
1097 PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED_MASK)
1098 break;
1099 udelay(1);
1100 }
1101
1102 if (amdgpu_crtc->base.enabled && mode) {
1103 switch (tmp) {
1104 case 0:
1105 default:
1106 return 4096 * 2;
1107 case 2:
1108 return 8192 * 2;
1109 }
1110 }
1111
1112 /* controller not enabled, so no lb used */
1113 return 0;
1114 }
1115
1116
1117 /**
1118 * dce_v6_0_bandwidth_update - program display watermarks
1119 *
1120 * @adev: amdgpu_device pointer
1121 *
1122 * Calculate and program the display watermarks and line
1123 * buffer allocation (CIK).
1124 */
dce_v6_0_bandwidth_update(struct amdgpu_device * adev)1125 static void dce_v6_0_bandwidth_update(struct amdgpu_device *adev)
1126 {
1127 struct drm_display_mode *mode0 = NULL;
1128 struct drm_display_mode *mode1 = NULL;
1129 u32 num_heads = 0, lb_size;
1130 int i;
1131
1132 if (!adev->mode_info.mode_config_initialized)
1133 return;
1134
1135 amdgpu_display_update_priority(adev);
1136
1137 for (i = 0; i < adev->mode_info.num_crtc; i++) {
1138 if (adev->mode_info.crtcs[i]->base.enabled)
1139 num_heads++;
1140 }
1141 for (i = 0; i < adev->mode_info.num_crtc; i += 2) {
1142 mode0 = &adev->mode_info.crtcs[i]->base.mode;
1143 mode1 = &adev->mode_info.crtcs[i+1]->base.mode;
1144 lb_size = dce_v6_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i], mode0, mode1);
1145 dce_v6_0_program_watermarks(adev, adev->mode_info.crtcs[i], lb_size, num_heads);
1146 lb_size = dce_v6_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i+1], mode1, mode0);
1147 dce_v6_0_program_watermarks(adev, adev->mode_info.crtcs[i+1], lb_size, num_heads);
1148 }
1149 }
1150
dce_v6_0_audio_get_connected_pins(struct amdgpu_device * adev)1151 static void dce_v6_0_audio_get_connected_pins(struct amdgpu_device *adev)
1152 {
1153 int i;
1154 u32 tmp;
1155
1156 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1157 tmp = RREG32_AUDIO_ENDPT(adev->mode_info.audio.pin[i].offset,
1158 ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT);
1159 if (REG_GET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT,
1160 PORT_CONNECTIVITY))
1161 adev->mode_info.audio.pin[i].connected = false;
1162 else
1163 adev->mode_info.audio.pin[i].connected = true;
1164 }
1165
1166 }
1167
dce_v6_0_audio_get_pin(struct amdgpu_device * adev)1168 static struct amdgpu_audio_pin *dce_v6_0_audio_get_pin(struct amdgpu_device *adev)
1169 {
1170 int i;
1171
1172 dce_v6_0_audio_get_connected_pins(adev);
1173
1174 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1175 if (adev->mode_info.audio.pin[i].connected)
1176 return &adev->mode_info.audio.pin[i];
1177 }
1178 DRM_ERROR("No connected audio pins found!\n");
1179 return NULL;
1180 }
1181
dce_v6_0_audio_select_pin(struct drm_encoder * encoder)1182 static void dce_v6_0_audio_select_pin(struct drm_encoder *encoder)
1183 {
1184 struct amdgpu_device *adev = drm_to_adev(encoder->dev);
1185 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1186 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1187
1188 if (!dig || !dig->afmt || !dig->afmt->pin)
1189 return;
1190
1191 WREG32(mmAFMT_AUDIO_SRC_CONTROL + dig->afmt->offset,
1192 REG_SET_FIELD(0, AFMT_AUDIO_SRC_CONTROL, AFMT_AUDIO_SRC_SELECT,
1193 dig->afmt->pin->id));
1194 }
1195
dce_v6_0_audio_write_latency_fields(struct drm_encoder * encoder,struct drm_display_mode * mode)1196 static void dce_v6_0_audio_write_latency_fields(struct drm_encoder *encoder,
1197 struct drm_display_mode *mode)
1198 {
1199 struct drm_device *dev = encoder->dev;
1200 struct amdgpu_device *adev = drm_to_adev(dev);
1201 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1202 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1203 struct drm_connector *connector;
1204 struct drm_connector_list_iter iter;
1205 struct amdgpu_connector *amdgpu_connector = NULL;
1206 int interlace = 0;
1207 u32 tmp;
1208
1209 drm_connector_list_iter_begin(dev, &iter);
1210 drm_for_each_connector_iter(connector, &iter) {
1211 if (connector->encoder == encoder) {
1212 amdgpu_connector = to_amdgpu_connector(connector);
1213 break;
1214 }
1215 }
1216 drm_connector_list_iter_end(&iter);
1217
1218 if (!amdgpu_connector) {
1219 DRM_ERROR("Couldn't find encoder's connector\n");
1220 return;
1221 }
1222
1223 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1224 interlace = 1;
1225
1226 if (connector->latency_present[interlace]) {
1227 tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
1228 VIDEO_LIPSYNC, connector->video_latency[interlace]);
1229 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
1230 AUDIO_LIPSYNC, connector->audio_latency[interlace]);
1231 } else {
1232 tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
1233 VIDEO_LIPSYNC, 0);
1234 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
1235 AUDIO_LIPSYNC, 0);
1236 }
1237 WREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
1238 ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
1239 }
1240
dce_v6_0_audio_write_speaker_allocation(struct drm_encoder * encoder)1241 static void dce_v6_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
1242 {
1243 struct drm_device *dev = encoder->dev;
1244 struct amdgpu_device *adev = drm_to_adev(dev);
1245 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1246 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1247 struct drm_connector *connector;
1248 struct drm_connector_list_iter iter;
1249 struct amdgpu_connector *amdgpu_connector = NULL;
1250 u8 *sadb = NULL;
1251 int sad_count;
1252 u32 tmp;
1253
1254 drm_connector_list_iter_begin(dev, &iter);
1255 drm_for_each_connector_iter(connector, &iter) {
1256 if (connector->encoder == encoder) {
1257 amdgpu_connector = to_amdgpu_connector(connector);
1258 break;
1259 }
1260 }
1261 drm_connector_list_iter_end(&iter);
1262
1263 if (!amdgpu_connector) {
1264 DRM_ERROR("Couldn't find encoder's connector\n");
1265 return;
1266 }
1267
1268 sad_count = drm_edid_to_speaker_allocation(amdgpu_connector->edid, &sadb);
1269 if (sad_count < 0) {
1270 DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
1271 sad_count = 0;
1272 }
1273
1274 /* program the speaker allocation */
1275 tmp = RREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
1276 ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
1277 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1278 HDMI_CONNECTION, 0);
1279 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1280 DP_CONNECTION, 0);
1281
1282 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort)
1283 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1284 DP_CONNECTION, 1);
1285 else
1286 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1287 HDMI_CONNECTION, 1);
1288
1289 if (sad_count)
1290 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1291 SPEAKER_ALLOCATION, sadb[0]);
1292 else
1293 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1294 SPEAKER_ALLOCATION, 5); /* stereo */
1295
1296 WREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
1297 ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
1298
1299 kfree(sadb);
1300 }
1301
dce_v6_0_audio_write_sad_regs(struct drm_encoder * encoder)1302 static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder)
1303 {
1304 struct drm_device *dev = encoder->dev;
1305 struct amdgpu_device *adev = drm_to_adev(dev);
1306 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1307 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1308 u32 offset;
1309 struct drm_connector *connector;
1310 struct drm_connector_list_iter iter;
1311 struct amdgpu_connector *amdgpu_connector = NULL;
1312 struct cea_sad *sads;
1313 int i, sad_count;
1314
1315 static const u16 eld_reg_to_type[][2] = {
1316 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM },
1317 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 },
1318 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 },
1319 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 },
1320 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 },
1321 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC },
1322 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS },
1323 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC },
1324 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 },
1325 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD },
1326 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP },
1327 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
1328 };
1329
1330 if (!dig || !dig->afmt || !dig->afmt->pin)
1331 return;
1332
1333 offset = dig->afmt->pin->offset;
1334
1335 drm_connector_list_iter_begin(dev, &iter);
1336 drm_for_each_connector_iter(connector, &iter) {
1337 if (connector->encoder == encoder) {
1338 amdgpu_connector = to_amdgpu_connector(connector);
1339 break;
1340 }
1341 }
1342 drm_connector_list_iter_end(&iter);
1343
1344 if (!amdgpu_connector) {
1345 DRM_ERROR("Couldn't find encoder's connector\n");
1346 return;
1347 }
1348
1349 sad_count = drm_edid_to_sad(amdgpu_connector->edid, &sads);
1350 if (sad_count < 0)
1351 DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
1352 if (sad_count <= 0)
1353 return;
1354
1355 for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
1356 u32 value = 0;
1357 u8 stereo_freqs = 0;
1358 int max_channels = -1;
1359 int j;
1360
1361 for (j = 0; j < sad_count; j++) {
1362 struct cea_sad *sad = &sads[j];
1363
1364 if (sad->format == eld_reg_to_type[i][1]) {
1365 if (sad->channels > max_channels) {
1366 value = (sad->channels <<
1367 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT) |
1368 (sad->byte2 <<
1369 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT) |
1370 (sad->freq <<
1371 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT);
1372 max_channels = sad->channels;
1373 }
1374
1375 if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
1376 stereo_freqs |= sad->freq;
1377 else
1378 break;
1379 }
1380 }
1381
1382 value |= (stereo_freqs <<
1383 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO__SHIFT);
1384
1385 WREG32_AUDIO_ENDPT(offset, eld_reg_to_type[i][0], value);
1386 }
1387
1388 kfree(sads);
1389 }
1390
dce_v6_0_audio_enable(struct amdgpu_device * adev,struct amdgpu_audio_pin * pin,bool enable)1391 static void dce_v6_0_audio_enable(struct amdgpu_device *adev,
1392 struct amdgpu_audio_pin *pin,
1393 bool enable)
1394 {
1395 if (!pin)
1396 return;
1397
1398 WREG32_AUDIO_ENDPT(pin->offset, ixAZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
1399 enable ? AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK : 0);
1400 }
1401
1402 static const u32 pin_offsets[7] =
1403 {
1404 AUD0_REGISTER_OFFSET,
1405 AUD1_REGISTER_OFFSET,
1406 AUD2_REGISTER_OFFSET,
1407 AUD3_REGISTER_OFFSET,
1408 AUD4_REGISTER_OFFSET,
1409 AUD5_REGISTER_OFFSET,
1410 AUD6_REGISTER_OFFSET,
1411 };
1412
dce_v6_0_audio_init(struct amdgpu_device * adev)1413 static int dce_v6_0_audio_init(struct amdgpu_device *adev)
1414 {
1415 int i;
1416
1417 if (!amdgpu_audio)
1418 return 0;
1419
1420 adev->mode_info.audio.enabled = true;
1421
1422 switch (adev->asic_type) {
1423 case CHIP_TAHITI:
1424 case CHIP_PITCAIRN:
1425 case CHIP_VERDE:
1426 default:
1427 adev->mode_info.audio.num_pins = 6;
1428 break;
1429 case CHIP_OLAND:
1430 adev->mode_info.audio.num_pins = 2;
1431 break;
1432 }
1433
1434 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1435 adev->mode_info.audio.pin[i].channels = -1;
1436 adev->mode_info.audio.pin[i].rate = -1;
1437 adev->mode_info.audio.pin[i].bits_per_sample = -1;
1438 adev->mode_info.audio.pin[i].status_bits = 0;
1439 adev->mode_info.audio.pin[i].category_code = 0;
1440 adev->mode_info.audio.pin[i].connected = false;
1441 adev->mode_info.audio.pin[i].offset = pin_offsets[i];
1442 adev->mode_info.audio.pin[i].id = i;
1443 /* disable audio. it will be set up later */
1444 /* XXX remove once we switch to ip funcs */
1445 dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
1446 }
1447
1448 return 0;
1449 }
1450
dce_v6_0_audio_fini(struct amdgpu_device * adev)1451 static void dce_v6_0_audio_fini(struct amdgpu_device *adev)
1452 {
1453 if (!amdgpu_audio)
1454 return;
1455
1456 if (!adev->mode_info.audio.enabled)
1457 return;
1458
1459 adev->mode_info.audio.enabled = false;
1460 }
1461
dce_v6_0_audio_set_vbi_packet(struct drm_encoder * encoder)1462 static void dce_v6_0_audio_set_vbi_packet(struct drm_encoder *encoder)
1463 {
1464 struct drm_device *dev = encoder->dev;
1465 struct amdgpu_device *adev = drm_to_adev(dev);
1466 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1467 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1468 u32 tmp;
1469
1470 tmp = RREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset);
1471 tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, 1);
1472 tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_SEND, 1);
1473 tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_CONT, 1);
1474 WREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset, tmp);
1475 }
1476
dce_v6_0_audio_set_acr(struct drm_encoder * encoder,uint32_t clock,int bpc)1477 static void dce_v6_0_audio_set_acr(struct drm_encoder *encoder,
1478 uint32_t clock, int bpc)
1479 {
1480 struct drm_device *dev = encoder->dev;
1481 struct amdgpu_device *adev = drm_to_adev(dev);
1482 struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock);
1483 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1484 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1485 u32 tmp;
1486
1487 tmp = RREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset);
1488 tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_AUTO_SEND, 1);
1489 tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE,
1490 bpc > 8 ? 0 : 1);
1491 WREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset, tmp);
1492
1493 tmp = RREG32(mmHDMI_ACR_32_0 + dig->afmt->offset);
1494 tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_0, HDMI_ACR_CTS_32, acr.cts_32khz);
1495 WREG32(mmHDMI_ACR_32_0 + dig->afmt->offset, tmp);
1496 tmp = RREG32(mmHDMI_ACR_32_1 + dig->afmt->offset);
1497 tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_1, HDMI_ACR_N_32, acr.n_32khz);
1498 WREG32(mmHDMI_ACR_32_1 + dig->afmt->offset, tmp);
1499
1500 tmp = RREG32(mmHDMI_ACR_44_0 + dig->afmt->offset);
1501 tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_0, HDMI_ACR_CTS_44, acr.cts_44_1khz);
1502 WREG32(mmHDMI_ACR_44_0 + dig->afmt->offset, tmp);
1503 tmp = RREG32(mmHDMI_ACR_44_1 + dig->afmt->offset);
1504 tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_1, HDMI_ACR_N_44, acr.n_44_1khz);
1505 WREG32(mmHDMI_ACR_44_1 + dig->afmt->offset, tmp);
1506
1507 tmp = RREG32(mmHDMI_ACR_48_0 + dig->afmt->offset);
1508 tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_0, HDMI_ACR_CTS_48, acr.cts_48khz);
1509 WREG32(mmHDMI_ACR_48_0 + dig->afmt->offset, tmp);
1510 tmp = RREG32(mmHDMI_ACR_48_1 + dig->afmt->offset);
1511 tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_1, HDMI_ACR_N_48, acr.n_48khz);
1512 WREG32(mmHDMI_ACR_48_1 + dig->afmt->offset, tmp);
1513 }
1514
dce_v6_0_audio_set_avi_infoframe(struct drm_encoder * encoder,struct drm_display_mode * mode)1515 static void dce_v6_0_audio_set_avi_infoframe(struct drm_encoder *encoder,
1516 struct drm_display_mode *mode)
1517 {
1518 struct drm_device *dev = encoder->dev;
1519 struct amdgpu_device *adev = drm_to_adev(dev);
1520 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1521 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1522 struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
1523 struct hdmi_avi_infoframe frame;
1524 u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
1525 uint8_t *payload = buffer + 3;
1526 uint8_t *header = buffer;
1527 ssize_t err;
1528 u32 tmp;
1529
1530 err = drm_hdmi_avi_infoframe_from_display_mode(&frame, connector, mode);
1531 if (err < 0) {
1532 DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
1533 return;
1534 }
1535
1536 err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
1537 if (err < 0) {
1538 DRM_ERROR("failed to pack AVI infoframe: %zd\n", err);
1539 return;
1540 }
1541
1542 WREG32(mmAFMT_AVI_INFO0 + dig->afmt->offset,
1543 payload[0x0] | (payload[0x1] << 8) | (payload[0x2] << 16) | (payload[0x3] << 24));
1544 WREG32(mmAFMT_AVI_INFO1 + dig->afmt->offset,
1545 payload[0x4] | (payload[0x5] << 8) | (payload[0x6] << 16) | (payload[0x7] << 24));
1546 WREG32(mmAFMT_AVI_INFO2 + dig->afmt->offset,
1547 payload[0x8] | (payload[0x9] << 8) | (payload[0xA] << 16) | (payload[0xB] << 24));
1548 WREG32(mmAFMT_AVI_INFO3 + dig->afmt->offset,
1549 payload[0xC] | (payload[0xD] << 8) | (header[1] << 24));
1550
1551 tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset);
1552 /* anything other than 0 */
1553 tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1,
1554 HDMI_AUDIO_INFO_LINE, 2);
1555 WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp);
1556 }
1557
dce_v6_0_audio_set_dto(struct drm_encoder * encoder,u32 clock)1558 static void dce_v6_0_audio_set_dto(struct drm_encoder *encoder, u32 clock)
1559 {
1560 struct drm_device *dev = encoder->dev;
1561 struct amdgpu_device *adev = drm_to_adev(dev);
1562 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
1563 int em = amdgpu_atombios_encoder_get_encoder_mode(encoder);
1564 u32 tmp;
1565
1566 /*
1567 * Two dtos: generally use dto0 for hdmi, dto1 for dp.
1568 * Express [24MHz / target pixel clock] as an exact rational
1569 * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE
1570 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
1571 */
1572 tmp = RREG32(mmDCCG_AUDIO_DTO_SOURCE);
1573 tmp = REG_SET_FIELD(tmp, DCCG_AUDIO_DTO_SOURCE,
1574 DCCG_AUDIO_DTO0_SOURCE_SEL, amdgpu_crtc->crtc_id);
1575 if (em == ATOM_ENCODER_MODE_HDMI) {
1576 tmp = REG_SET_FIELD(tmp, DCCG_AUDIO_DTO_SOURCE,
1577 DCCG_AUDIO_DTO_SEL, 0);
1578 } else if (ENCODER_MODE_IS_DP(em)) {
1579 tmp = REG_SET_FIELD(tmp, DCCG_AUDIO_DTO_SOURCE,
1580 DCCG_AUDIO_DTO_SEL, 1);
1581 }
1582 WREG32(mmDCCG_AUDIO_DTO_SOURCE, tmp);
1583 if (em == ATOM_ENCODER_MODE_HDMI) {
1584 WREG32(mmDCCG_AUDIO_DTO0_PHASE, 24000);
1585 WREG32(mmDCCG_AUDIO_DTO0_MODULE, clock);
1586 } else if (ENCODER_MODE_IS_DP(em)) {
1587 WREG32(mmDCCG_AUDIO_DTO1_PHASE, 24000);
1588 WREG32(mmDCCG_AUDIO_DTO1_MODULE, clock);
1589 }
1590 }
1591
dce_v6_0_audio_set_packet(struct drm_encoder * encoder)1592 static void dce_v6_0_audio_set_packet(struct drm_encoder *encoder)
1593 {
1594 struct drm_device *dev = encoder->dev;
1595 struct amdgpu_device *adev = drm_to_adev(dev);
1596 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1597 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1598 u32 tmp;
1599
1600 tmp = RREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset);
1601 tmp = REG_SET_FIELD(tmp, AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, 1);
1602 WREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
1603
1604 tmp = RREG32(mmAFMT_60958_0 + dig->afmt->offset);
1605 tmp = REG_SET_FIELD(tmp, AFMT_60958_0, AFMT_60958_CS_CHANNEL_NUMBER_L, 1);
1606 WREG32(mmAFMT_60958_0 + dig->afmt->offset, tmp);
1607
1608 tmp = RREG32(mmAFMT_60958_1 + dig->afmt->offset);
1609 tmp = REG_SET_FIELD(tmp, AFMT_60958_1, AFMT_60958_CS_CHANNEL_NUMBER_R, 2);
1610 WREG32(mmAFMT_60958_1 + dig->afmt->offset, tmp);
1611
1612 tmp = RREG32(mmAFMT_60958_2 + dig->afmt->offset);
1613 tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_2, 3);
1614 tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_3, 4);
1615 tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_4, 5);
1616 tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_5, 6);
1617 tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_6, 7);
1618 tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_7, 8);
1619 WREG32(mmAFMT_60958_2 + dig->afmt->offset, tmp);
1620
1621 tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL2 + dig->afmt->offset);
1622 tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL2, AFMT_AUDIO_CHANNEL_ENABLE, 0xff);
1623 WREG32(mmAFMT_AUDIO_PACKET_CONTROL2 + dig->afmt->offset, tmp);
1624
1625 tmp = RREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset);
1626 tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_DELAY_EN, 1);
1627 tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_PACKETS_PER_LINE, 3);
1628 WREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
1629
1630 tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
1631 tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_RESET_FIFO_WHEN_AUDIO_DIS, 1);
1632 tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, 1);
1633 WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
1634 }
1635
dce_v6_0_audio_set_mute(struct drm_encoder * encoder,bool mute)1636 static void dce_v6_0_audio_set_mute(struct drm_encoder *encoder, bool mute)
1637 {
1638 struct drm_device *dev = encoder->dev;
1639 struct amdgpu_device *adev = drm_to_adev(dev);
1640 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1641 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1642 u32 tmp;
1643
1644 tmp = RREG32(mmHDMI_GC + dig->afmt->offset);
1645 tmp = REG_SET_FIELD(tmp, HDMI_GC, HDMI_GC_AVMUTE, mute ? 1 : 0);
1646 WREG32(mmHDMI_GC + dig->afmt->offset, tmp);
1647 }
1648
dce_v6_0_audio_hdmi_enable(struct drm_encoder * encoder,bool enable)1649 static void dce_v6_0_audio_hdmi_enable(struct drm_encoder *encoder, bool enable)
1650 {
1651 struct drm_device *dev = encoder->dev;
1652 struct amdgpu_device *adev = drm_to_adev(dev);
1653 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1654 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1655 u32 tmp;
1656
1657 if (enable) {
1658 tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset);
1659 tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_SEND, 1);
1660 tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_CONT, 1);
1661 tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, 1);
1662 tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_CONT, 1);
1663 WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
1664
1665 tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset);
1666 tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1, HDMI_AVI_INFO_LINE, 2);
1667 WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp);
1668
1669 tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
1670 tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, 1);
1671 WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
1672 } else {
1673 tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset);
1674 tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_SEND, 0);
1675 tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_CONT, 0);
1676 tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, 0);
1677 tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_CONT, 0);
1678 WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
1679
1680 tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
1681 tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, 0);
1682 WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
1683 }
1684 }
1685
dce_v6_0_audio_dp_enable(struct drm_encoder * encoder,bool enable)1686 static void dce_v6_0_audio_dp_enable(struct drm_encoder *encoder, bool enable)
1687 {
1688 struct drm_device *dev = encoder->dev;
1689 struct amdgpu_device *adev = drm_to_adev(dev);
1690 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1691 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1692 u32 tmp;
1693
1694 if (enable) {
1695 tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
1696 tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, 1);
1697 WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
1698
1699 tmp = RREG32(mmDP_SEC_TIMESTAMP + dig->afmt->offset);
1700 tmp = REG_SET_FIELD(tmp, DP_SEC_TIMESTAMP, DP_SEC_TIMESTAMP_MODE, 1);
1701 WREG32(mmDP_SEC_TIMESTAMP + dig->afmt->offset, tmp);
1702
1703 tmp = RREG32(mmDP_SEC_CNTL + dig->afmt->offset);
1704 tmp = REG_SET_FIELD(tmp, DP_SEC_CNTL, DP_SEC_ASP_ENABLE, 1);
1705 tmp = REG_SET_FIELD(tmp, DP_SEC_CNTL, DP_SEC_ATP_ENABLE, 1);
1706 tmp = REG_SET_FIELD(tmp, DP_SEC_CNTL, DP_SEC_AIP_ENABLE, 1);
1707 tmp = REG_SET_FIELD(tmp, DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1);
1708 WREG32(mmDP_SEC_CNTL + dig->afmt->offset, tmp);
1709 } else {
1710 WREG32(mmDP_SEC_CNTL + dig->afmt->offset, 0);
1711 }
1712 }
1713
dce_v6_0_afmt_setmode(struct drm_encoder * encoder,struct drm_display_mode * mode)1714 static void dce_v6_0_afmt_setmode(struct drm_encoder *encoder,
1715 struct drm_display_mode *mode)
1716 {
1717 struct drm_device *dev = encoder->dev;
1718 struct amdgpu_device *adev = drm_to_adev(dev);
1719 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1720 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1721 struct drm_connector *connector;
1722 struct drm_connector_list_iter iter;
1723 struct amdgpu_connector *amdgpu_connector = NULL;
1724 int em = amdgpu_atombios_encoder_get_encoder_mode(encoder);
1725 int bpc = 8;
1726
1727 if (!dig || !dig->afmt)
1728 return;
1729
1730 drm_connector_list_iter_begin(dev, &iter);
1731 drm_for_each_connector_iter(connector, &iter) {
1732 if (connector->encoder == encoder) {
1733 amdgpu_connector = to_amdgpu_connector(connector);
1734 break;
1735 }
1736 }
1737 drm_connector_list_iter_end(&iter);
1738
1739 if (!amdgpu_connector) {
1740 DRM_ERROR("Couldn't find encoder's connector\n");
1741 return;
1742 }
1743
1744 if (!dig->afmt->enabled)
1745 return;
1746
1747 dig->afmt->pin = dce_v6_0_audio_get_pin(adev);
1748 if (!dig->afmt->pin)
1749 return;
1750
1751 if (encoder->crtc) {
1752 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
1753 bpc = amdgpu_crtc->bpc;
1754 }
1755
1756 /* disable audio before setting up hw */
1757 dce_v6_0_audio_enable(adev, dig->afmt->pin, false);
1758
1759 dce_v6_0_audio_set_mute(encoder, true);
1760 dce_v6_0_audio_write_speaker_allocation(encoder);
1761 dce_v6_0_audio_write_sad_regs(encoder);
1762 dce_v6_0_audio_write_latency_fields(encoder, mode);
1763 if (em == ATOM_ENCODER_MODE_HDMI) {
1764 dce_v6_0_audio_set_dto(encoder, mode->clock);
1765 dce_v6_0_audio_set_vbi_packet(encoder);
1766 dce_v6_0_audio_set_acr(encoder, mode->clock, bpc);
1767 } else if (ENCODER_MODE_IS_DP(em)) {
1768 dce_v6_0_audio_set_dto(encoder, adev->clock.default_dispclk * 10);
1769 }
1770 dce_v6_0_audio_set_packet(encoder);
1771 dce_v6_0_audio_select_pin(encoder);
1772 dce_v6_0_audio_set_avi_infoframe(encoder, mode);
1773 dce_v6_0_audio_set_mute(encoder, false);
1774 if (em == ATOM_ENCODER_MODE_HDMI) {
1775 dce_v6_0_audio_hdmi_enable(encoder, 1);
1776 } else if (ENCODER_MODE_IS_DP(em)) {
1777 dce_v6_0_audio_dp_enable(encoder, 1);
1778 }
1779
1780 /* enable audio after setting up hw */
1781 dce_v6_0_audio_enable(adev, dig->afmt->pin, true);
1782 }
1783
dce_v6_0_afmt_enable(struct drm_encoder * encoder,bool enable)1784 static void dce_v6_0_afmt_enable(struct drm_encoder *encoder, bool enable)
1785 {
1786 struct drm_device *dev = encoder->dev;
1787 struct amdgpu_device *adev = drm_to_adev(dev);
1788 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1789 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1790
1791 if (!dig || !dig->afmt)
1792 return;
1793
1794 /* Silent, r600_hdmi_enable will raise WARN for us */
1795 if (enable && dig->afmt->enabled)
1796 return;
1797
1798 if (!enable && !dig->afmt->enabled)
1799 return;
1800
1801 if (!enable && dig->afmt->pin) {
1802 dce_v6_0_audio_enable(adev, dig->afmt->pin, false);
1803 dig->afmt->pin = NULL;
1804 }
1805
1806 dig->afmt->enabled = enable;
1807
1808 DRM_DEBUG("%sabling AFMT interface @ 0x%04X for encoder 0x%x\n",
1809 enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id);
1810 }
1811
dce_v6_0_afmt_init(struct amdgpu_device * adev)1812 static int dce_v6_0_afmt_init(struct amdgpu_device *adev)
1813 {
1814 int i, j;
1815
1816 for (i = 0; i < adev->mode_info.num_dig; i++)
1817 adev->mode_info.afmt[i] = NULL;
1818
1819 /* DCE6 has audio blocks tied to DIG encoders */
1820 for (i = 0; i < adev->mode_info.num_dig; i++) {
1821 adev->mode_info.afmt[i] = kzalloc_obj(struct amdgpu_afmt);
1822 if (adev->mode_info.afmt[i]) {
1823 adev->mode_info.afmt[i]->offset = dig_offsets[i];
1824 adev->mode_info.afmt[i]->id = i;
1825 } else {
1826 for (j = 0; j < i; j++) {
1827 kfree(adev->mode_info.afmt[j]);
1828 adev->mode_info.afmt[j] = NULL;
1829 }
1830 DRM_ERROR("Out of memory allocating afmt table\n");
1831 return -ENOMEM;
1832 }
1833 }
1834 return 0;
1835 }
1836
dce_v6_0_afmt_fini(struct amdgpu_device * adev)1837 static void dce_v6_0_afmt_fini(struct amdgpu_device *adev)
1838 {
1839 int i;
1840
1841 for (i = 0; i < adev->mode_info.num_dig; i++) {
1842 kfree(adev->mode_info.afmt[i]);
1843 adev->mode_info.afmt[i] = NULL;
1844 }
1845 }
1846
1847 static const u32 vga_control_regs[6] =
1848 {
1849 mmD1VGA_CONTROL,
1850 mmD2VGA_CONTROL,
1851 mmD3VGA_CONTROL,
1852 mmD4VGA_CONTROL,
1853 mmD5VGA_CONTROL,
1854 mmD6VGA_CONTROL,
1855 };
1856
dce_v6_0_vga_enable(struct drm_crtc * crtc,bool enable)1857 static void dce_v6_0_vga_enable(struct drm_crtc *crtc, bool enable)
1858 {
1859 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1860 struct drm_device *dev = crtc->dev;
1861 struct amdgpu_device *adev = drm_to_adev(dev);
1862 u32 vga_control;
1863
1864 vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1;
1865 WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control | (enable ? 1 : 0));
1866 }
1867
dce_v6_0_grph_enable(struct drm_crtc * crtc,bool enable)1868 static void dce_v6_0_grph_enable(struct drm_crtc *crtc, bool enable)
1869 {
1870 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1871 struct drm_device *dev = crtc->dev;
1872 struct amdgpu_device *adev = drm_to_adev(dev);
1873
1874 WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, enable ? 1 : 0);
1875 }
1876
dce_v6_0_crtc_do_set_base(struct drm_crtc * crtc,struct drm_framebuffer * fb,int x,int y)1877 static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
1878 struct drm_framebuffer *fb,
1879 int x, int y)
1880 {
1881 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1882 struct drm_device *dev = crtc->dev;
1883 struct amdgpu_device *adev = drm_to_adev(dev);
1884 struct drm_framebuffer *target_fb;
1885 struct drm_gem_object *obj;
1886 struct amdgpu_bo *abo;
1887 uint64_t fb_location, tiling_flags;
1888 uint32_t fb_format, fb_pitch_pixels, pipe_config;
1889 u32 fb_swap = (GRPH_ENDIAN_NONE << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1890 u32 viewport_w, viewport_h;
1891 int r;
1892 bool bypass_lut = false;
1893
1894 /* no fb bound */
1895 if (!crtc->primary->fb) {
1896 DRM_DEBUG_KMS("No FB bound\n");
1897 return 0;
1898 }
1899
1900 target_fb = crtc->primary->fb;
1901
1902 /* If atomic, assume fb object is pinned & idle & fenced and
1903 * just update base pointers
1904 */
1905 obj = target_fb->obj[0];
1906 abo = gem_to_amdgpu_bo(obj);
1907 r = amdgpu_bo_reserve(abo, false);
1908 if (unlikely(r != 0))
1909 return r;
1910
1911 abo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
1912 r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM);
1913 if (unlikely(r != 0)) {
1914 amdgpu_bo_unreserve(abo);
1915 return -EINVAL;
1916 }
1917 fb_location = amdgpu_bo_gpu_offset(abo);
1918
1919 amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
1920 amdgpu_bo_unreserve(abo);
1921
1922 switch (target_fb->format->format) {
1923 case DRM_FORMAT_C8:
1924 fb_format = ((GRPH_DEPTH_8BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1925 (GRPH_FORMAT_INDEXED << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1926 break;
1927 case DRM_FORMAT_XRGB4444:
1928 case DRM_FORMAT_ARGB4444:
1929 fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1930 (GRPH_FORMAT_ARGB4444 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1931 #ifdef __BIG_ENDIAN
1932 fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1933 #endif
1934 break;
1935 case DRM_FORMAT_XRGB1555:
1936 case DRM_FORMAT_ARGB1555:
1937 fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1938 (GRPH_FORMAT_ARGB1555 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1939 #ifdef __BIG_ENDIAN
1940 fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1941 #endif
1942 break;
1943 case DRM_FORMAT_BGRX5551:
1944 case DRM_FORMAT_BGRA5551:
1945 fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1946 (GRPH_FORMAT_BGRA5551 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1947 #ifdef __BIG_ENDIAN
1948 fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1949 #endif
1950 break;
1951 case DRM_FORMAT_RGB565:
1952 fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1953 (GRPH_FORMAT_ARGB565 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1954 #ifdef __BIG_ENDIAN
1955 fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1956 #endif
1957 break;
1958 case DRM_FORMAT_XRGB8888:
1959 case DRM_FORMAT_ARGB8888:
1960 fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1961 (GRPH_FORMAT_ARGB8888 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1962 #ifdef __BIG_ENDIAN
1963 fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1964 #endif
1965 break;
1966 case DRM_FORMAT_XRGB2101010:
1967 case DRM_FORMAT_ARGB2101010:
1968 fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1969 (GRPH_FORMAT_ARGB2101010 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1970 #ifdef __BIG_ENDIAN
1971 fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1972 #endif
1973 /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
1974 bypass_lut = true;
1975 break;
1976 case DRM_FORMAT_BGRX1010102:
1977 case DRM_FORMAT_BGRA1010102:
1978 fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1979 (GRPH_FORMAT_BGRA1010102 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1980 #ifdef __BIG_ENDIAN
1981 fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1982 #endif
1983 /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
1984 bypass_lut = true;
1985 break;
1986 case DRM_FORMAT_XBGR8888:
1987 case DRM_FORMAT_ABGR8888:
1988 fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1989 (GRPH_FORMAT_ARGB8888 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1990 fb_swap = ((GRPH_RED_SEL_B << GRPH_SWAP_CNTL__GRPH_RED_CROSSBAR__SHIFT) |
1991 (GRPH_BLUE_SEL_R << GRPH_SWAP_CNTL__GRPH_BLUE_CROSSBAR__SHIFT));
1992 #ifdef __BIG_ENDIAN
1993 fb_swap |= (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1994 #endif
1995 break;
1996 default:
1997 DRM_ERROR("Unsupported screen format %p4cc\n",
1998 &target_fb->format->format);
1999 return -EINVAL;
2000 }
2001
2002 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_2D_TILED_THIN1) {
2003 unsigned bankw, bankh, mtaspect, tile_split, num_banks;
2004
2005 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
2006 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
2007 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
2008 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
2009 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
2010
2011 fb_format |= (num_banks << GRPH_CONTROL__GRPH_NUM_BANKS__SHIFT);
2012 fb_format |= (GRPH_ARRAY_2D_TILED_THIN1 << GRPH_CONTROL__GRPH_ARRAY_MODE__SHIFT);
2013 fb_format |= (tile_split << GRPH_CONTROL__GRPH_TILE_SPLIT__SHIFT);
2014 fb_format |= (bankw << GRPH_CONTROL__GRPH_BANK_WIDTH__SHIFT);
2015 fb_format |= (bankh << GRPH_CONTROL__GRPH_BANK_HEIGHT__SHIFT);
2016 fb_format |= (mtaspect << GRPH_CONTROL__GRPH_MACRO_TILE_ASPECT__SHIFT);
2017 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1) {
2018 fb_format |= (GRPH_ARRAY_1D_TILED_THIN1 << GRPH_CONTROL__GRPH_ARRAY_MODE__SHIFT);
2019 }
2020
2021 pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
2022 fb_format |= (pipe_config << GRPH_CONTROL__GRPH_PIPE_CONFIG__SHIFT);
2023
2024 dce_v6_0_vga_enable(crtc, false);
2025
2026 /* Make sure surface address is updated at vertical blank rather than
2027 * horizontal blank
2028 */
2029 WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, 0);
2030
2031 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
2032 upper_32_bits(fb_location));
2033 WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
2034 upper_32_bits(fb_location));
2035 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
2036 (u32)fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK);
2037 WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
2038 (u32) fb_location & GRPH_SECONDARY_SURFACE_ADDRESS__GRPH_SECONDARY_SURFACE_ADDRESS_MASK);
2039 WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
2040 WREG32(mmGRPH_SWAP_CNTL + amdgpu_crtc->crtc_offset, fb_swap);
2041
2042 /*
2043 * The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT
2044 * for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to
2045 * retain the full precision throughout the pipeline.
2046 */
2047 WREG32_P(mmGRPH_LUT_10BIT_BYPASS + amdgpu_crtc->crtc_offset,
2048 (bypass_lut ? GRPH_LUT_10BIT_BYPASS__GRPH_LUT_10BIT_BYPASS_EN_MASK : 0),
2049 ~GRPH_LUT_10BIT_BYPASS__GRPH_LUT_10BIT_BYPASS_EN_MASK);
2050
2051 if (bypass_lut)
2052 DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n");
2053
2054 WREG32(mmGRPH_SURFACE_OFFSET_X + amdgpu_crtc->crtc_offset, 0);
2055 WREG32(mmGRPH_SURFACE_OFFSET_Y + amdgpu_crtc->crtc_offset, 0);
2056 WREG32(mmGRPH_X_START + amdgpu_crtc->crtc_offset, 0);
2057 WREG32(mmGRPH_Y_START + amdgpu_crtc->crtc_offset, 0);
2058 WREG32(mmGRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width);
2059 WREG32(mmGRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height);
2060
2061 fb_pitch_pixels = target_fb->pitches[0] / target_fb->format->cpp[0];
2062 WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels);
2063
2064 dce_v6_0_grph_enable(crtc, true);
2065
2066 WREG32(mmDESKTOP_HEIGHT + amdgpu_crtc->crtc_offset,
2067 target_fb->height);
2068 x &= ~3;
2069 y &= ~1;
2070 WREG32(mmVIEWPORT_START + amdgpu_crtc->crtc_offset,
2071 (x << 16) | y);
2072 viewport_w = crtc->mode.hdisplay;
2073 viewport_h = (crtc->mode.vdisplay + 1) & ~1;
2074
2075 WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
2076 (viewport_w << 16) | viewport_h);
2077
2078 /* set pageflip to happen anywhere in vblank interval */
2079 WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
2080
2081 if (fb && fb != crtc->primary->fb) {
2082 abo = gem_to_amdgpu_bo(fb->obj[0]);
2083 r = amdgpu_bo_reserve(abo, true);
2084 if (unlikely(r != 0))
2085 return r;
2086 amdgpu_bo_unpin(abo);
2087 amdgpu_bo_unreserve(abo);
2088 }
2089
2090 /* Bytes per pixel may have changed */
2091 dce_v6_0_bandwidth_update(adev);
2092
2093 return 0;
2094
2095 }
2096
dce_v6_0_set_interleave(struct drm_crtc * crtc,struct drm_display_mode * mode)2097 static void dce_v6_0_set_interleave(struct drm_crtc *crtc,
2098 struct drm_display_mode *mode)
2099 {
2100 struct drm_device *dev = crtc->dev;
2101 struct amdgpu_device *adev = drm_to_adev(dev);
2102 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2103
2104 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
2105 WREG32(mmDATA_FORMAT + amdgpu_crtc->crtc_offset,
2106 DATA_FORMAT__INTERLEAVE_EN_MASK);
2107 else
2108 WREG32(mmDATA_FORMAT + amdgpu_crtc->crtc_offset, 0);
2109 }
2110
dce_v6_0_crtc_load_lut(struct drm_crtc * crtc)2111 static void dce_v6_0_crtc_load_lut(struct drm_crtc *crtc)
2112 {
2113 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2114 struct drm_device *dev = crtc->dev;
2115 struct amdgpu_device *adev = drm_to_adev(dev);
2116 u16 *r, *g, *b;
2117 int i;
2118
2119 DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id);
2120
2121 WREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
2122 ((INPUT_CSC_BYPASS << INPUT_CSC_CONTROL__INPUT_CSC_GRPH_MODE__SHIFT) |
2123 (INPUT_CSC_BYPASS << INPUT_CSC_CONTROL__INPUT_CSC_OVL_MODE__SHIFT)));
2124 WREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset,
2125 PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_BYPASS_MASK);
2126 WREG32(mmPRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset,
2127 PRESCALE_OVL_CONTROL__OVL_PRESCALE_BYPASS_MASK);
2128 WREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset,
2129 ((INPUT_GAMMA_USE_LUT << INPUT_GAMMA_CONTROL__GRPH_INPUT_GAMMA_MODE__SHIFT) |
2130 (INPUT_GAMMA_USE_LUT << INPUT_GAMMA_CONTROL__OVL_INPUT_GAMMA_MODE__SHIFT)));
2131
2132 WREG32(mmDC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0);
2133
2134 WREG32(mmDC_LUT_BLACK_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0);
2135 WREG32(mmDC_LUT_BLACK_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0);
2136 WREG32(mmDC_LUT_BLACK_OFFSET_RED + amdgpu_crtc->crtc_offset, 0);
2137
2138 WREG32(mmDC_LUT_WHITE_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0xffff);
2139 WREG32(mmDC_LUT_WHITE_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0xffff);
2140 WREG32(mmDC_LUT_WHITE_OFFSET_RED + amdgpu_crtc->crtc_offset, 0xffff);
2141
2142 WREG32(mmDC_LUT_RW_MODE + amdgpu_crtc->crtc_offset, 0);
2143 WREG32(mmDC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007);
2144
2145 WREG32(mmDC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0);
2146 r = crtc->gamma_store;
2147 g = r + crtc->gamma_size;
2148 b = g + crtc->gamma_size;
2149 for (i = 0; i < 256; i++) {
2150 WREG32(mmDC_LUT_30_COLOR + amdgpu_crtc->crtc_offset,
2151 ((*r++ & 0xffc0) << 14) |
2152 ((*g++ & 0xffc0) << 4) |
2153 (*b++ >> 6));
2154 }
2155
2156 WREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
2157 ((DEGAMMA_BYPASS << DEGAMMA_CONTROL__GRPH_DEGAMMA_MODE__SHIFT) |
2158 (DEGAMMA_BYPASS << DEGAMMA_CONTROL__OVL_DEGAMMA_MODE__SHIFT) |
2159 (DEGAMMA_BYPASS << DEGAMMA_CONTROL__ICON_DEGAMMA_MODE__SHIFT) |
2160 (DEGAMMA_BYPASS << DEGAMMA_CONTROL__CURSOR_DEGAMMA_MODE__SHIFT)));
2161 WREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset,
2162 ((GAMUT_REMAP_BYPASS << GAMUT_REMAP_CONTROL__GRPH_GAMUT_REMAP_MODE__SHIFT) |
2163 (GAMUT_REMAP_BYPASS << GAMUT_REMAP_CONTROL__OVL_GAMUT_REMAP_MODE__SHIFT)));
2164 WREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
2165 ((REGAMMA_BYPASS << REGAMMA_CONTROL__GRPH_REGAMMA_MODE__SHIFT) |
2166 (REGAMMA_BYPASS << REGAMMA_CONTROL__OVL_REGAMMA_MODE__SHIFT)));
2167 WREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
2168 ((OUTPUT_CSC_BYPASS << OUTPUT_CSC_CONTROL__OUTPUT_CSC_GRPH_MODE__SHIFT) |
2169 (OUTPUT_CSC_BYPASS << OUTPUT_CSC_CONTROL__OUTPUT_CSC_OVL_MODE__SHIFT)));
2170 /* XXX match this to the depth of the crtc fmt block, move to modeset? */
2171 WREG32(0x1a50 + amdgpu_crtc->crtc_offset, 0);
2172
2173
2174 }
2175
dce_v6_0_pick_dig_encoder(struct drm_encoder * encoder)2176 static int dce_v6_0_pick_dig_encoder(struct drm_encoder *encoder)
2177 {
2178 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
2179 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
2180
2181 switch (amdgpu_encoder->encoder_id) {
2182 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
2183 return dig->linkb ? 1 : 0;
2184 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
2185 return dig->linkb ? 3 : 2;
2186 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
2187 return dig->linkb ? 5 : 4;
2188 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
2189 return 6;
2190 default:
2191 DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id);
2192 return 0;
2193 }
2194 }
2195
2196 /**
2197 * dce_v6_0_pick_pll - Allocate a PPLL for use by the crtc.
2198 *
2199 * @crtc: drm crtc
2200 *
2201 * Returns the PPLL (Pixel PLL) to be used by the crtc. For DP monitors
2202 * a single PPLL can be used for all DP crtcs/encoders. For non-DP
2203 * monitors a dedicated PPLL must be used. If a particular board has
2204 * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming
2205 * as there is no need to program the PLL itself. If we are not able to
2206 * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to
2207 * avoid messing up an existing monitor.
2208 *
2209 *
2210 */
dce_v6_0_pick_pll(struct drm_crtc * crtc)2211 static u32 dce_v6_0_pick_pll(struct drm_crtc *crtc)
2212 {
2213 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2214 struct drm_device *dev = crtc->dev;
2215 struct amdgpu_device *adev = drm_to_adev(dev);
2216 u32 pll_in_use;
2217 int pll;
2218
2219 if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) {
2220 if (adev->clock.dp_extclk)
2221 /* skip PPLL programming if using ext clock */
2222 return ATOM_PPLL_INVALID;
2223 else
2224 return ATOM_PPLL0;
2225 } else {
2226 /* use the same PPLL for all monitors with the same clock */
2227 pll = amdgpu_pll_get_shared_nondp_ppll(crtc);
2228 if (pll != ATOM_PPLL_INVALID)
2229 return pll;
2230 }
2231
2232 /* PPLL1, and PPLL2 */
2233 pll_in_use = amdgpu_pll_get_use_mask(crtc);
2234 if (!(pll_in_use & (1 << ATOM_PPLL2)))
2235 return ATOM_PPLL2;
2236 if (!(pll_in_use & (1 << ATOM_PPLL1)))
2237 return ATOM_PPLL1;
2238 DRM_ERROR("unable to allocate a PPLL\n");
2239 return ATOM_PPLL_INVALID;
2240 }
2241
dce_v6_0_lock_cursor(struct drm_crtc * crtc,bool lock)2242 static void dce_v6_0_lock_cursor(struct drm_crtc *crtc, bool lock)
2243 {
2244 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
2245 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2246 uint32_t cur_lock;
2247
2248 cur_lock = RREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset);
2249 if (lock)
2250 cur_lock |= CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK;
2251 else
2252 cur_lock &= ~CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK;
2253 WREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset, cur_lock);
2254 }
2255
dce_v6_0_hide_cursor(struct drm_crtc * crtc)2256 static void dce_v6_0_hide_cursor(struct drm_crtc *crtc)
2257 {
2258 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2259 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
2260
2261 WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
2262 (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
2263 (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
2264 }
2265
dce_v6_0_show_cursor(struct drm_crtc * crtc)2266 static void dce_v6_0_show_cursor(struct drm_crtc *crtc)
2267 {
2268 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2269 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
2270
2271 WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
2272 upper_32_bits(amdgpu_crtc->cursor_addr));
2273 WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
2274 lower_32_bits(amdgpu_crtc->cursor_addr));
2275
2276 WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
2277 CUR_CONTROL__CURSOR_EN_MASK |
2278 (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
2279 (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
2280 }
2281
dce_v6_0_cursor_move_locked(struct drm_crtc * crtc,int x,int y)2282 static int dce_v6_0_cursor_move_locked(struct drm_crtc *crtc,
2283 int x, int y)
2284 {
2285 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2286 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
2287 int xorigin = 0, yorigin = 0;
2288
2289 int w = amdgpu_crtc->cursor_width;
2290
2291 amdgpu_crtc->cursor_x = x;
2292 amdgpu_crtc->cursor_y = y;
2293
2294 /* avivo cursor are offset into the total surface */
2295 x += crtc->x;
2296 y += crtc->y;
2297 DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
2298
2299 if (x < 0) {
2300 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
2301 x = 0;
2302 }
2303 if (y < 0) {
2304 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
2305 y = 0;
2306 }
2307
2308 WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
2309 WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
2310 WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
2311 ((w - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
2312
2313 return 0;
2314 }
2315
dce_v6_0_crtc_cursor_move(struct drm_crtc * crtc,int x,int y)2316 static int dce_v6_0_crtc_cursor_move(struct drm_crtc *crtc,
2317 int x, int y)
2318 {
2319 int ret;
2320
2321 dce_v6_0_lock_cursor(crtc, true);
2322 ret = dce_v6_0_cursor_move_locked(crtc, x, y);
2323 dce_v6_0_lock_cursor(crtc, false);
2324
2325 return ret;
2326 }
2327
dce_v6_0_crtc_cursor_set2(struct drm_crtc * crtc,struct drm_file * file_priv,uint32_t handle,uint32_t width,uint32_t height,int32_t hot_x,int32_t hot_y)2328 static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
2329 struct drm_file *file_priv,
2330 uint32_t handle,
2331 uint32_t width,
2332 uint32_t height,
2333 int32_t hot_x,
2334 int32_t hot_y)
2335 {
2336 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2337 struct drm_gem_object *obj;
2338 struct amdgpu_bo *aobj;
2339 int ret;
2340
2341 if (!handle) {
2342 /* turn off cursor */
2343 dce_v6_0_hide_cursor(crtc);
2344 obj = NULL;
2345 goto unpin;
2346 }
2347
2348 if ((width > amdgpu_crtc->max_cursor_width) ||
2349 (height > amdgpu_crtc->max_cursor_height)) {
2350 DRM_ERROR("bad cursor width or height %d x %d\n", width, height);
2351 return -EINVAL;
2352 }
2353
2354 obj = drm_gem_object_lookup(file_priv, handle);
2355 if (!obj) {
2356 DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id);
2357 return -ENOENT;
2358 }
2359
2360 aobj = gem_to_amdgpu_bo(obj);
2361 ret = amdgpu_bo_reserve(aobj, false);
2362 if (ret != 0) {
2363 drm_gem_object_put(obj);
2364 return ret;
2365 }
2366
2367 aobj->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
2368 ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
2369 amdgpu_bo_unreserve(aobj);
2370 if (ret) {
2371 DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
2372 drm_gem_object_put(obj);
2373 return ret;
2374 }
2375 amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
2376
2377 dce_v6_0_lock_cursor(crtc, true);
2378
2379 if (width != amdgpu_crtc->cursor_width ||
2380 height != amdgpu_crtc->cursor_height ||
2381 hot_x != amdgpu_crtc->cursor_hot_x ||
2382 hot_y != amdgpu_crtc->cursor_hot_y) {
2383 int x, y;
2384
2385 x = amdgpu_crtc->cursor_x + amdgpu_crtc->cursor_hot_x - hot_x;
2386 y = amdgpu_crtc->cursor_y + amdgpu_crtc->cursor_hot_y - hot_y;
2387
2388 dce_v6_0_cursor_move_locked(crtc, x, y);
2389
2390 amdgpu_crtc->cursor_width = width;
2391 amdgpu_crtc->cursor_height = height;
2392 amdgpu_crtc->cursor_hot_x = hot_x;
2393 amdgpu_crtc->cursor_hot_y = hot_y;
2394 }
2395
2396 dce_v6_0_show_cursor(crtc);
2397 dce_v6_0_lock_cursor(crtc, false);
2398
2399 unpin:
2400 if (amdgpu_crtc->cursor_bo) {
2401 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2402 ret = amdgpu_bo_reserve(aobj, true);
2403 if (likely(ret == 0)) {
2404 amdgpu_bo_unpin(aobj);
2405 amdgpu_bo_unreserve(aobj);
2406 }
2407 drm_gem_object_put(amdgpu_crtc->cursor_bo);
2408 }
2409
2410 amdgpu_crtc->cursor_bo = obj;
2411 return 0;
2412 }
2413
dce_v6_0_cursor_reset(struct drm_crtc * crtc)2414 static void dce_v6_0_cursor_reset(struct drm_crtc *crtc)
2415 {
2416 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2417
2418 if (amdgpu_crtc->cursor_bo) {
2419 dce_v6_0_lock_cursor(crtc, true);
2420
2421 dce_v6_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
2422 amdgpu_crtc->cursor_y);
2423
2424 dce_v6_0_show_cursor(crtc);
2425 dce_v6_0_lock_cursor(crtc, false);
2426 }
2427 }
2428
dce_v6_0_crtc_gamma_set(struct drm_crtc * crtc,u16 * red,u16 * green,u16 * blue,uint32_t size,struct drm_modeset_acquire_ctx * ctx)2429 static int dce_v6_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
2430 u16 *blue, uint32_t size,
2431 struct drm_modeset_acquire_ctx *ctx)
2432 {
2433 dce_v6_0_crtc_load_lut(crtc);
2434
2435 return 0;
2436 }
2437
dce_v6_0_crtc_destroy(struct drm_crtc * crtc)2438 static void dce_v6_0_crtc_destroy(struct drm_crtc *crtc)
2439 {
2440 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2441
2442 drm_crtc_cleanup(crtc);
2443 kfree(amdgpu_crtc);
2444 }
2445
2446 static const struct drm_crtc_funcs dce_v6_0_crtc_funcs = {
2447 .cursor_set2 = dce_v6_0_crtc_cursor_set2,
2448 .cursor_move = dce_v6_0_crtc_cursor_move,
2449 .gamma_set = dce_v6_0_crtc_gamma_set,
2450 .set_config = amdgpu_display_crtc_set_config,
2451 .destroy = dce_v6_0_crtc_destroy,
2452 .page_flip_target = amdgpu_display_crtc_page_flip_target,
2453 .get_vblank_counter = amdgpu_get_vblank_counter_kms,
2454 .enable_vblank = amdgpu_enable_vblank_kms,
2455 .disable_vblank = amdgpu_disable_vblank_kms,
2456 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
2457 };
2458
dce_v6_0_crtc_dpms(struct drm_crtc * crtc,int mode)2459 static void dce_v6_0_crtc_dpms(struct drm_crtc *crtc, int mode)
2460 {
2461 struct drm_device *dev = crtc->dev;
2462 struct amdgpu_device *adev = drm_to_adev(dev);
2463 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2464 unsigned type;
2465
2466 switch (mode) {
2467 case DRM_MODE_DPMS_ON:
2468 amdgpu_crtc->enabled = true;
2469 amdgpu_atombios_crtc_enable(crtc, ATOM_ENABLE);
2470 amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
2471 /* Make sure VBLANK and PFLIP interrupts are still enabled */
2472 type = amdgpu_display_crtc_idx_to_irq_type(adev,
2473 amdgpu_crtc->crtc_id);
2474 amdgpu_irq_update(adev, &adev->crtc_irq, type);
2475 amdgpu_irq_update(adev, &adev->pageflip_irq, type);
2476 drm_crtc_vblank_on(crtc);
2477 dce_v6_0_crtc_load_lut(crtc);
2478 break;
2479 case DRM_MODE_DPMS_STANDBY:
2480 case DRM_MODE_DPMS_SUSPEND:
2481 case DRM_MODE_DPMS_OFF:
2482 drm_crtc_vblank_off(crtc);
2483 if (amdgpu_crtc->enabled)
2484 amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
2485 amdgpu_atombios_crtc_enable(crtc, ATOM_DISABLE);
2486 amdgpu_crtc->enabled = false;
2487 break;
2488 }
2489 /* adjust pm to dpms */
2490 amdgpu_dpm_compute_clocks(adev);
2491 }
2492
dce_v6_0_crtc_prepare(struct drm_crtc * crtc)2493 static void dce_v6_0_crtc_prepare(struct drm_crtc *crtc)
2494 {
2495 /* disable crtc pair power gating before programming */
2496 amdgpu_atombios_crtc_powergate(crtc, ATOM_DISABLE);
2497 amdgpu_atombios_crtc_lock(crtc, ATOM_ENABLE);
2498 dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2499 }
2500
dce_v6_0_crtc_commit(struct drm_crtc * crtc)2501 static void dce_v6_0_crtc_commit(struct drm_crtc *crtc)
2502 {
2503 dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
2504 amdgpu_atombios_crtc_lock(crtc, ATOM_DISABLE);
2505 }
2506
dce_v6_0_crtc_disable(struct drm_crtc * crtc)2507 static void dce_v6_0_crtc_disable(struct drm_crtc *crtc)
2508 {
2509
2510 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2511 struct drm_device *dev = crtc->dev;
2512 struct amdgpu_device *adev = drm_to_adev(dev);
2513 struct amdgpu_atom_ss ss;
2514 int i;
2515
2516 dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2517 if (crtc->primary->fb) {
2518 int r;
2519 struct amdgpu_bo *abo;
2520
2521 abo = gem_to_amdgpu_bo(crtc->primary->fb->obj[0]);
2522 r = amdgpu_bo_reserve(abo, true);
2523 if (unlikely(r))
2524 DRM_ERROR("failed to reserve abo before unpin\n");
2525 else {
2526 amdgpu_bo_unpin(abo);
2527 amdgpu_bo_unreserve(abo);
2528 }
2529 }
2530 /* disable the GRPH */
2531 dce_v6_0_grph_enable(crtc, false);
2532
2533 amdgpu_atombios_crtc_powergate(crtc, ATOM_ENABLE);
2534
2535 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2536 if (adev->mode_info.crtcs[i] &&
2537 adev->mode_info.crtcs[i]->enabled &&
2538 i != amdgpu_crtc->crtc_id &&
2539 amdgpu_crtc->pll_id == adev->mode_info.crtcs[i]->pll_id) {
2540 /* one other crtc is using this pll don't turn
2541 * off the pll
2542 */
2543 goto done;
2544 }
2545 }
2546
2547 switch (amdgpu_crtc->pll_id) {
2548 case ATOM_PPLL1:
2549 case ATOM_PPLL2:
2550 /* disable the ppll */
2551 amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
2552 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
2553 break;
2554 default:
2555 break;
2556 }
2557 done:
2558 amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2559 amdgpu_crtc->adjusted_clock = 0;
2560 amdgpu_crtc->encoder = NULL;
2561 amdgpu_crtc->connector = NULL;
2562 }
2563
dce_v6_0_crtc_mode_set(struct drm_crtc * crtc,struct drm_display_mode * mode,struct drm_display_mode * adjusted_mode,int x,int y,struct drm_framebuffer * old_fb)2564 static int dce_v6_0_crtc_mode_set(struct drm_crtc *crtc,
2565 struct drm_display_mode *mode,
2566 struct drm_display_mode *adjusted_mode,
2567 int x, int y, struct drm_framebuffer *old_fb)
2568 {
2569 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2570
2571 if (!amdgpu_crtc->adjusted_clock)
2572 return -EINVAL;
2573
2574 amdgpu_atombios_crtc_set_pll(crtc, adjusted_mode);
2575 amdgpu_atombios_crtc_set_dtd_timing(crtc, adjusted_mode);
2576 dce_v6_0_crtc_do_set_base(crtc, old_fb, x, y);
2577 amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode);
2578 amdgpu_atombios_crtc_scaler_setup(crtc);
2579 dce_v6_0_cursor_reset(crtc);
2580 /* update the hw version fpr dpm */
2581 amdgpu_crtc->hw_mode = *adjusted_mode;
2582
2583 return 0;
2584 }
2585
dce_v6_0_crtc_mode_fixup(struct drm_crtc * crtc,const struct drm_display_mode * mode,struct drm_display_mode * adjusted_mode)2586 static bool dce_v6_0_crtc_mode_fixup(struct drm_crtc *crtc,
2587 const struct drm_display_mode *mode,
2588 struct drm_display_mode *adjusted_mode)
2589 {
2590 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2591 struct drm_device *dev = crtc->dev;
2592 struct drm_encoder *encoder;
2593
2594 /* assign the encoder to the amdgpu crtc to avoid repeated lookups later */
2595 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
2596 if (encoder->crtc == crtc) {
2597 amdgpu_crtc->encoder = encoder;
2598 amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder);
2599 break;
2600 }
2601 }
2602 if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) {
2603 amdgpu_crtc->encoder = NULL;
2604 amdgpu_crtc->connector = NULL;
2605 return false;
2606 }
2607 if (!amdgpu_display_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
2608 return false;
2609 if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode))
2610 return false;
2611 /* pick pll */
2612 amdgpu_crtc->pll_id = dce_v6_0_pick_pll(crtc);
2613 /* if we can't get a PPLL for a non-DP encoder, fail */
2614 if ((amdgpu_crtc->pll_id == ATOM_PPLL_INVALID) &&
2615 !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder)))
2616 return false;
2617
2618 return true;
2619 }
2620
dce_v6_0_crtc_set_base(struct drm_crtc * crtc,int x,int y,struct drm_framebuffer * old_fb)2621 static int dce_v6_0_crtc_set_base(struct drm_crtc *crtc, int x, int y,
2622 struct drm_framebuffer *old_fb)
2623 {
2624 return dce_v6_0_crtc_do_set_base(crtc, old_fb, x, y);
2625 }
2626
2627 static const struct drm_crtc_helper_funcs dce_v6_0_crtc_helper_funcs = {
2628 .dpms = dce_v6_0_crtc_dpms,
2629 .mode_fixup = dce_v6_0_crtc_mode_fixup,
2630 .mode_set = dce_v6_0_crtc_mode_set,
2631 .mode_set_base = dce_v6_0_crtc_set_base,
2632 .prepare = dce_v6_0_crtc_prepare,
2633 .commit = dce_v6_0_crtc_commit,
2634 .disable = dce_v6_0_crtc_disable,
2635 .get_scanout_position = amdgpu_crtc_get_scanout_position,
2636 };
2637
dce_v6_0_panic_flush(struct drm_plane * plane)2638 static void dce_v6_0_panic_flush(struct drm_plane *plane)
2639 {
2640 struct drm_framebuffer *fb;
2641 struct amdgpu_crtc *amdgpu_crtc;
2642 struct amdgpu_device *adev;
2643 uint32_t fb_format;
2644
2645 if (!plane->fb)
2646 return;
2647
2648 fb = plane->fb;
2649 amdgpu_crtc = to_amdgpu_crtc(plane->crtc);
2650 adev = drm_to_adev(fb->dev);
2651
2652 /* Disable DC tiling */
2653 fb_format = RREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset);
2654 fb_format &= ~GRPH_CONTROL__GRPH_ARRAY_MODE_MASK;
2655 WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
2656
2657 }
2658
2659 static const struct drm_plane_helper_funcs dce_v6_0_drm_primary_plane_helper_funcs = {
2660 .get_scanout_buffer = amdgpu_display_get_scanout_buffer,
2661 .panic_flush = dce_v6_0_panic_flush,
2662 };
2663
dce_v6_0_crtc_init(struct amdgpu_device * adev,int index)2664 static int dce_v6_0_crtc_init(struct amdgpu_device *adev, int index)
2665 {
2666 struct amdgpu_crtc *amdgpu_crtc;
2667
2668 amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) +
2669 (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
2670 if (amdgpu_crtc == NULL)
2671 return -ENOMEM;
2672
2673 drm_crtc_init(adev_to_drm(adev), &amdgpu_crtc->base, &dce_v6_0_crtc_funcs);
2674
2675 drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
2676 amdgpu_crtc->crtc_id = index;
2677 adev->mode_info.crtcs[index] = amdgpu_crtc;
2678
2679 amdgpu_crtc->max_cursor_width = CURSOR_WIDTH;
2680 amdgpu_crtc->max_cursor_height = CURSOR_HEIGHT;
2681 adev_to_drm(adev)->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
2682 adev_to_drm(adev)->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
2683
2684 amdgpu_crtc->crtc_offset = crtc_offsets[amdgpu_crtc->crtc_id];
2685
2686 amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2687 amdgpu_crtc->adjusted_clock = 0;
2688 amdgpu_crtc->encoder = NULL;
2689 amdgpu_crtc->connector = NULL;
2690 drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v6_0_crtc_helper_funcs);
2691 drm_plane_helper_add(amdgpu_crtc->base.primary, &dce_v6_0_drm_primary_plane_helper_funcs);
2692
2693 return 0;
2694 }
2695
dce_v6_0_early_init(struct amdgpu_ip_block * ip_block)2696 static int dce_v6_0_early_init(struct amdgpu_ip_block *ip_block)
2697 {
2698 struct amdgpu_device *adev = ip_block->adev;
2699
2700 adev->audio_endpt_rreg = &dce_v6_0_audio_endpt_rreg;
2701 adev->audio_endpt_wreg = &dce_v6_0_audio_endpt_wreg;
2702
2703 dce_v6_0_set_display_funcs(adev);
2704
2705 adev->mode_info.num_crtc = dce_v6_0_get_num_crtc(adev);
2706
2707 switch (adev->asic_type) {
2708 case CHIP_TAHITI:
2709 case CHIP_PITCAIRN:
2710 case CHIP_VERDE:
2711 adev->mode_info.num_hpd = 6;
2712 adev->mode_info.num_dig = 6;
2713 break;
2714 case CHIP_OLAND:
2715 adev->mode_info.num_hpd = 2;
2716 adev->mode_info.num_dig = 2;
2717 break;
2718 default:
2719 return -EINVAL;
2720 }
2721
2722 dce_v6_0_set_irq_funcs(adev);
2723
2724 return 0;
2725 }
2726
dce_v6_0_sw_init(struct amdgpu_ip_block * ip_block)2727 static int dce_v6_0_sw_init(struct amdgpu_ip_block *ip_block)
2728 {
2729 int r, i;
2730 struct amdgpu_device *adev = ip_block->adev;
2731
2732 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2733 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + 1, &adev->crtc_irq);
2734 if (r)
2735 return r;
2736 }
2737
2738 for (i = 8; i < 20; i += 2) {
2739 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i, &adev->pageflip_irq);
2740 if (r)
2741 return r;
2742 }
2743
2744 /* HPD hotplug */
2745 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 42, &adev->hpd_irq);
2746 if (r)
2747 return r;
2748
2749 adev->mode_info.mode_config_initialized = true;
2750
2751 adev_to_drm(adev)->mode_config.funcs = &amdgpu_mode_funcs;
2752 adev_to_drm(adev)->mode_config.async_page_flip = true;
2753 adev_to_drm(adev)->mode_config.max_width = 16384;
2754 adev_to_drm(adev)->mode_config.max_height = 16384;
2755 adev_to_drm(adev)->mode_config.preferred_depth = 24;
2756 adev_to_drm(adev)->mode_config.prefer_shadow = 1;
2757 adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true;
2758
2759 r = amdgpu_display_modeset_create_props(adev);
2760 if (r)
2761 return r;
2762
2763 adev_to_drm(adev)->mode_config.max_width = 16384;
2764 adev_to_drm(adev)->mode_config.max_height = 16384;
2765
2766 /* allocate crtcs */
2767 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2768 r = dce_v6_0_crtc_init(adev, i);
2769 if (r)
2770 return r;
2771 }
2772
2773 if (amdgpu_atombios_get_connector_info_from_object_table(adev))
2774 amdgpu_display_print_display_setup(adev_to_drm(adev));
2775 else
2776 return -EINVAL;
2777
2778 /* setup afmt */
2779 r = dce_v6_0_afmt_init(adev);
2780 if (r)
2781 return r;
2782
2783 r = dce_v6_0_audio_init(adev);
2784 if (r)
2785 return r;
2786
2787 /* Disable vblank IRQs aggressively for power-saving */
2788 /* XXX: can this be enabled for DC? */
2789 adev_to_drm(adev)->vblank_disable_immediate = true;
2790
2791 r = drm_vblank_init(adev_to_drm(adev), adev->mode_info.num_crtc);
2792 if (r)
2793 return r;
2794
2795 /* Pre-DCE11 */
2796 INIT_DELAYED_WORK(&adev->hotplug_work,
2797 amdgpu_display_hotplug_work_func);
2798
2799 drm_kms_helper_poll_init(adev_to_drm(adev));
2800
2801 return r;
2802 }
2803
dce_v6_0_sw_fini(struct amdgpu_ip_block * ip_block)2804 static int dce_v6_0_sw_fini(struct amdgpu_ip_block *ip_block)
2805 {
2806 struct amdgpu_device *adev = ip_block->adev;
2807
2808 drm_edid_free(adev->mode_info.bios_hardcoded_edid);
2809
2810 drm_kms_helper_poll_fini(adev_to_drm(adev));
2811
2812 dce_v6_0_audio_fini(adev);
2813 dce_v6_0_afmt_fini(adev);
2814
2815 drm_mode_config_cleanup(adev_to_drm(adev));
2816 adev->mode_info.mode_config_initialized = false;
2817
2818 return 0;
2819 }
2820
dce_v6_0_hw_init(struct amdgpu_ip_block * ip_block)2821 static int dce_v6_0_hw_init(struct amdgpu_ip_block *ip_block)
2822 {
2823 int i;
2824 struct amdgpu_device *adev = ip_block->adev;
2825
2826 /* disable vga render */
2827 dce_v6_0_set_vga_render_state(adev, false);
2828 /* init dig PHYs, disp eng pll */
2829 amdgpu_atombios_encoder_init_dig(adev);
2830 amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
2831
2832 /* initialize hpd */
2833 dce_v6_0_hpd_init(adev);
2834
2835 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
2836 dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
2837 }
2838
2839 dce_v6_0_pageflip_interrupt_init(adev);
2840
2841 return 0;
2842 }
2843
dce_v6_0_hw_fini(struct amdgpu_ip_block * ip_block)2844 static int dce_v6_0_hw_fini(struct amdgpu_ip_block *ip_block)
2845 {
2846 int i;
2847 struct amdgpu_device *adev = ip_block->adev;
2848
2849 dce_v6_0_hpd_fini(adev);
2850
2851 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
2852 dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
2853 }
2854
2855 dce_v6_0_pageflip_interrupt_fini(adev);
2856
2857 flush_delayed_work(&adev->hotplug_work);
2858
2859 return 0;
2860 }
2861
dce_v6_0_suspend(struct amdgpu_ip_block * ip_block)2862 static int dce_v6_0_suspend(struct amdgpu_ip_block *ip_block)
2863 {
2864 struct amdgpu_device *adev = ip_block->adev;
2865 int r;
2866
2867 r = amdgpu_display_suspend_helper(adev);
2868 if (r)
2869 return r;
2870 adev->mode_info.bl_level =
2871 amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
2872
2873 return dce_v6_0_hw_fini(ip_block);
2874 }
2875
dce_v6_0_resume(struct amdgpu_ip_block * ip_block)2876 static int dce_v6_0_resume(struct amdgpu_ip_block *ip_block)
2877 {
2878 struct amdgpu_device *adev = ip_block->adev;
2879 int ret;
2880
2881 amdgpu_atombios_encoder_set_backlight_level_to_reg(adev,
2882 adev->mode_info.bl_level);
2883
2884 ret = dce_v6_0_hw_init(ip_block);
2885
2886 /* turn on the BL */
2887 if (adev->mode_info.bl_encoder) {
2888 u8 bl_level = amdgpu_display_backlight_get_level(adev,
2889 adev->mode_info.bl_encoder);
2890 amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
2891 bl_level);
2892 }
2893 if (ret)
2894 return ret;
2895
2896 return amdgpu_display_resume_helper(adev);
2897 }
2898
dce_v6_0_is_idle(struct amdgpu_ip_block * ip_block)2899 static bool dce_v6_0_is_idle(struct amdgpu_ip_block *ip_block)
2900 {
2901 return true;
2902 }
2903
dce_v6_0_soft_reset(struct amdgpu_ip_block * ip_block)2904 static int dce_v6_0_soft_reset(struct amdgpu_ip_block *ip_block)
2905 {
2906 u32 srbm_soft_reset = 0, tmp;
2907 struct amdgpu_device *adev = ip_block->adev;
2908
2909 if (dce_v6_0_is_display_hung(adev))
2910 srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK;
2911
2912 if (srbm_soft_reset) {
2913 tmp = RREG32(mmSRBM_SOFT_RESET);
2914 tmp |= srbm_soft_reset;
2915 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
2916 WREG32(mmSRBM_SOFT_RESET, tmp);
2917 tmp = RREG32(mmSRBM_SOFT_RESET);
2918
2919 udelay(50);
2920
2921 tmp &= ~srbm_soft_reset;
2922 WREG32(mmSRBM_SOFT_RESET, tmp);
2923 tmp = RREG32(mmSRBM_SOFT_RESET);
2924
2925 /* Wait a little for things to settle down */
2926 udelay(50);
2927 }
2928 return 0;
2929 }
2930
dce_v6_0_set_crtc_vblank_interrupt_state(struct amdgpu_device * adev,int crtc,enum amdgpu_interrupt_state state)2931 static void dce_v6_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
2932 int crtc,
2933 enum amdgpu_interrupt_state state)
2934 {
2935 u32 reg_block, interrupt_mask;
2936
2937 if (crtc >= adev->mode_info.num_crtc) {
2938 DRM_DEBUG("invalid crtc %d\n", crtc);
2939 return;
2940 }
2941
2942 switch (crtc) {
2943 case 0:
2944 reg_block = CRTC0_REGISTER_OFFSET;
2945 break;
2946 case 1:
2947 reg_block = CRTC1_REGISTER_OFFSET;
2948 break;
2949 case 2:
2950 reg_block = CRTC2_REGISTER_OFFSET;
2951 break;
2952 case 3:
2953 reg_block = CRTC3_REGISTER_OFFSET;
2954 break;
2955 case 4:
2956 reg_block = CRTC4_REGISTER_OFFSET;
2957 break;
2958 case 5:
2959 reg_block = CRTC5_REGISTER_OFFSET;
2960 break;
2961 default:
2962 DRM_DEBUG("invalid crtc %d\n", crtc);
2963 return;
2964 }
2965
2966 switch (state) {
2967 case AMDGPU_IRQ_STATE_DISABLE:
2968 interrupt_mask = RREG32(mmINT_MASK + reg_block);
2969 interrupt_mask &= ~INT_MASK__VBLANK_INT_MASK;
2970 WREG32(mmINT_MASK + reg_block, interrupt_mask);
2971 break;
2972 case AMDGPU_IRQ_STATE_ENABLE:
2973 interrupt_mask = RREG32(mmINT_MASK + reg_block);
2974 interrupt_mask |= INT_MASK__VBLANK_INT_MASK;
2975 WREG32(mmINT_MASK + reg_block, interrupt_mask);
2976 break;
2977 default:
2978 break;
2979 }
2980 }
2981
dce_v6_0_set_crtc_vline_interrupt_state(struct amdgpu_device * adev,int crtc,enum amdgpu_interrupt_state state)2982 static void dce_v6_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev,
2983 int crtc,
2984 enum amdgpu_interrupt_state state)
2985 {
2986
2987 }
2988
dce_v6_0_set_hpd_irq_state(struct amdgpu_device * adev,struct amdgpu_irq_src * src,unsigned hpd,enum amdgpu_interrupt_state state)2989 static int dce_v6_0_set_hpd_irq_state(struct amdgpu_device *adev,
2990 struct amdgpu_irq_src *src,
2991 unsigned hpd,
2992 enum amdgpu_interrupt_state state)
2993 {
2994 u32 dc_hpd_int_cntl;
2995
2996 if (hpd >= adev->mode_info.num_hpd) {
2997 DRM_DEBUG("invalid hpd %d\n", hpd);
2998 return 0;
2999 }
3000
3001 switch (state) {
3002 case AMDGPU_IRQ_STATE_DISABLE:
3003 dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
3004 dc_hpd_int_cntl &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
3005 WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], dc_hpd_int_cntl);
3006 break;
3007 case AMDGPU_IRQ_STATE_ENABLE:
3008 dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
3009 dc_hpd_int_cntl |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
3010 WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], dc_hpd_int_cntl);
3011 break;
3012 default:
3013 break;
3014 }
3015
3016 return 0;
3017 }
3018
dce_v6_0_set_crtc_irq_state(struct amdgpu_device * adev,struct amdgpu_irq_src * src,unsigned type,enum amdgpu_interrupt_state state)3019 static int dce_v6_0_set_crtc_irq_state(struct amdgpu_device *adev,
3020 struct amdgpu_irq_src *src,
3021 unsigned type,
3022 enum amdgpu_interrupt_state state)
3023 {
3024 switch (type) {
3025 case AMDGPU_CRTC_IRQ_VBLANK1:
3026 dce_v6_0_set_crtc_vblank_interrupt_state(adev, 0, state);
3027 break;
3028 case AMDGPU_CRTC_IRQ_VBLANK2:
3029 dce_v6_0_set_crtc_vblank_interrupt_state(adev, 1, state);
3030 break;
3031 case AMDGPU_CRTC_IRQ_VBLANK3:
3032 dce_v6_0_set_crtc_vblank_interrupt_state(adev, 2, state);
3033 break;
3034 case AMDGPU_CRTC_IRQ_VBLANK4:
3035 dce_v6_0_set_crtc_vblank_interrupt_state(adev, 3, state);
3036 break;
3037 case AMDGPU_CRTC_IRQ_VBLANK5:
3038 dce_v6_0_set_crtc_vblank_interrupt_state(adev, 4, state);
3039 break;
3040 case AMDGPU_CRTC_IRQ_VBLANK6:
3041 dce_v6_0_set_crtc_vblank_interrupt_state(adev, 5, state);
3042 break;
3043 case AMDGPU_CRTC_IRQ_VLINE1:
3044 dce_v6_0_set_crtc_vline_interrupt_state(adev, 0, state);
3045 break;
3046 case AMDGPU_CRTC_IRQ_VLINE2:
3047 dce_v6_0_set_crtc_vline_interrupt_state(adev, 1, state);
3048 break;
3049 case AMDGPU_CRTC_IRQ_VLINE3:
3050 dce_v6_0_set_crtc_vline_interrupt_state(adev, 2, state);
3051 break;
3052 case AMDGPU_CRTC_IRQ_VLINE4:
3053 dce_v6_0_set_crtc_vline_interrupt_state(adev, 3, state);
3054 break;
3055 case AMDGPU_CRTC_IRQ_VLINE5:
3056 dce_v6_0_set_crtc_vline_interrupt_state(adev, 4, state);
3057 break;
3058 case AMDGPU_CRTC_IRQ_VLINE6:
3059 dce_v6_0_set_crtc_vline_interrupt_state(adev, 5, state);
3060 break;
3061 default:
3062 break;
3063 }
3064 return 0;
3065 }
3066
dce_v6_0_crtc_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)3067 static int dce_v6_0_crtc_irq(struct amdgpu_device *adev,
3068 struct amdgpu_irq_src *source,
3069 struct amdgpu_iv_entry *entry)
3070 {
3071 unsigned crtc = entry->src_id - 1;
3072 uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg);
3073 unsigned int irq_type = amdgpu_display_crtc_idx_to_irq_type(adev,
3074 crtc);
3075
3076 switch (entry->src_data[0]) {
3077 case 0: /* vblank */
3078 if (disp_int & interrupt_status_offsets[crtc].vblank)
3079 WREG32(mmVBLANK_STATUS + crtc_offsets[crtc], VBLANK_STATUS__VBLANK_ACK_MASK);
3080 else
3081 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3082
3083 if (amdgpu_irq_enabled(adev, source, irq_type)) {
3084 drm_handle_vblank(adev_to_drm(adev), crtc);
3085 }
3086 DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
3087 break;
3088 case 1: /* vline */
3089 if (disp_int & interrupt_status_offsets[crtc].vline)
3090 WREG32(mmVLINE_STATUS + crtc_offsets[crtc], VLINE_STATUS__VLINE_ACK_MASK);
3091 else
3092 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3093
3094 DRM_DEBUG("IH: D%d vline\n", crtc + 1);
3095 break;
3096 default:
3097 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]);
3098 break;
3099 }
3100
3101 return 0;
3102 }
3103
dce_v6_0_set_pageflip_irq_state(struct amdgpu_device * adev,struct amdgpu_irq_src * src,unsigned type,enum amdgpu_interrupt_state state)3104 static int dce_v6_0_set_pageflip_irq_state(struct amdgpu_device *adev,
3105 struct amdgpu_irq_src *src,
3106 unsigned type,
3107 enum amdgpu_interrupt_state state)
3108 {
3109 u32 reg;
3110
3111 if (type >= adev->mode_info.num_crtc) {
3112 DRM_ERROR("invalid pageflip crtc %d\n", type);
3113 return -EINVAL;
3114 }
3115
3116 reg = RREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type]);
3117 if (state == AMDGPU_IRQ_STATE_DISABLE)
3118 WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
3119 reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
3120 else
3121 WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
3122 reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
3123
3124 return 0;
3125 }
3126
dce_v6_0_pageflip_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)3127 static int dce_v6_0_pageflip_irq(struct amdgpu_device *adev,
3128 struct amdgpu_irq_src *source,
3129 struct amdgpu_iv_entry *entry)
3130 {
3131 unsigned long flags;
3132 unsigned crtc_id;
3133 struct amdgpu_crtc *amdgpu_crtc;
3134 struct amdgpu_flip_work *works;
3135
3136 crtc_id = (entry->src_id - 8) >> 1;
3137 amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
3138
3139 if (crtc_id >= adev->mode_info.num_crtc) {
3140 DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
3141 return -EINVAL;
3142 }
3143
3144 if (RREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id]) &
3145 GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK)
3146 WREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id],
3147 GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK);
3148
3149 /* IRQ could occur when in initial stage */
3150 if (amdgpu_crtc == NULL)
3151 return 0;
3152
3153 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
3154 works = amdgpu_crtc->pflip_works;
3155 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) {
3156 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
3157 "AMDGPU_FLIP_SUBMITTED(%d)\n",
3158 amdgpu_crtc->pflip_status,
3159 AMDGPU_FLIP_SUBMITTED);
3160 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
3161 return 0;
3162 }
3163
3164 /* page flip completed. clean up */
3165 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
3166 amdgpu_crtc->pflip_works = NULL;
3167
3168 /* wakeup usersapce */
3169 if (works->event)
3170 drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
3171
3172 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
3173
3174 drm_crtc_vblank_put(&amdgpu_crtc->base);
3175 schedule_work(&works->unpin_work);
3176
3177 return 0;
3178 }
3179
dce_v6_0_hpd_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)3180 static int dce_v6_0_hpd_irq(struct amdgpu_device *adev,
3181 struct amdgpu_irq_src *source,
3182 struct amdgpu_iv_entry *entry)
3183 {
3184 uint32_t disp_int, mask;
3185 unsigned hpd;
3186
3187 if (entry->src_data[0] >= adev->mode_info.num_hpd) {
3188 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]);
3189 return 0;
3190 }
3191
3192 hpd = entry->src_data[0];
3193 disp_int = RREG32(interrupt_status_offsets[hpd].reg);
3194 mask = interrupt_status_offsets[hpd].hpd;
3195
3196 if (disp_int & mask) {
3197 dce_v6_0_hpd_int_ack(adev, hpd);
3198 schedule_delayed_work(&adev->hotplug_work, 0);
3199 DRM_DEBUG("IH: HPD%d\n", hpd + 1);
3200 }
3201
3202 return 0;
3203 }
3204
dce_v6_0_set_clockgating_state(struct amdgpu_ip_block * ip_block,enum amd_clockgating_state state)3205 static int dce_v6_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
3206 enum amd_clockgating_state state)
3207 {
3208 return 0;
3209 }
3210
dce_v6_0_set_powergating_state(struct amdgpu_ip_block * ip_block,enum amd_powergating_state state)3211 static int dce_v6_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
3212 enum amd_powergating_state state)
3213 {
3214 return 0;
3215 }
3216
3217 static const struct amd_ip_funcs dce_v6_0_ip_funcs = {
3218 .name = "dce_v6_0",
3219 .early_init = dce_v6_0_early_init,
3220 .sw_init = dce_v6_0_sw_init,
3221 .sw_fini = dce_v6_0_sw_fini,
3222 .hw_init = dce_v6_0_hw_init,
3223 .hw_fini = dce_v6_0_hw_fini,
3224 .suspend = dce_v6_0_suspend,
3225 .resume = dce_v6_0_resume,
3226 .is_idle = dce_v6_0_is_idle,
3227 .soft_reset = dce_v6_0_soft_reset,
3228 .set_clockgating_state = dce_v6_0_set_clockgating_state,
3229 .set_powergating_state = dce_v6_0_set_powergating_state,
3230 };
3231
dce_v6_0_encoder_mode_set(struct drm_encoder * encoder,struct drm_display_mode * mode,struct drm_display_mode * adjusted_mode)3232 static void dce_v6_0_encoder_mode_set(struct drm_encoder *encoder,
3233 struct drm_display_mode *mode,
3234 struct drm_display_mode *adjusted_mode)
3235 {
3236 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3237 int em = amdgpu_atombios_encoder_get_encoder_mode(encoder);
3238
3239 amdgpu_encoder->pixel_clock = adjusted_mode->clock;
3240
3241 /* need to call this here rather than in prepare() since we need some crtc info */
3242 amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
3243
3244 /* set scaler clears this on some chips */
3245 dce_v6_0_set_interleave(encoder->crtc, mode);
3246
3247 if (em == ATOM_ENCODER_MODE_HDMI || ENCODER_MODE_IS_DP(em)) {
3248 dce_v6_0_afmt_enable(encoder, true);
3249 dce_v6_0_afmt_setmode(encoder, adjusted_mode);
3250 }
3251 }
3252
dce_v6_0_encoder_prepare(struct drm_encoder * encoder)3253 static void dce_v6_0_encoder_prepare(struct drm_encoder *encoder)
3254 {
3255 struct amdgpu_device *adev = drm_to_adev(encoder->dev);
3256 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3257 struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
3258
3259 if ((amdgpu_encoder->active_device &
3260 (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
3261 (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) !=
3262 ENCODER_OBJECT_ID_NONE)) {
3263 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
3264 if (dig) {
3265 dig->dig_encoder = dce_v6_0_pick_dig_encoder(encoder);
3266 if (amdgpu_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT)
3267 dig->afmt = adev->mode_info.afmt[dig->dig_encoder];
3268 }
3269 }
3270
3271 amdgpu_atombios_scratch_regs_lock(adev, true);
3272
3273 if (connector) {
3274 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
3275
3276 /* select the clock/data port if it uses a router */
3277 if (amdgpu_connector->router.cd_valid)
3278 amdgpu_i2c_router_select_cd_port(amdgpu_connector);
3279
3280 /* turn eDP panel on for mode set */
3281 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
3282 amdgpu_atombios_encoder_set_edp_panel_power(connector,
3283 ATOM_TRANSMITTER_ACTION_POWER_ON);
3284 }
3285
3286 /* this is needed for the pll/ss setup to work correctly in some cases */
3287 amdgpu_atombios_encoder_set_crtc_source(encoder);
3288 /* set up the FMT blocks */
3289 dce_v6_0_program_fmt(encoder);
3290 }
3291
dce_v6_0_encoder_commit(struct drm_encoder * encoder)3292 static void dce_v6_0_encoder_commit(struct drm_encoder *encoder)
3293 {
3294 struct drm_device *dev = encoder->dev;
3295 struct amdgpu_device *adev = drm_to_adev(dev);
3296
3297 /* need to call this here as we need the crtc set up */
3298 amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
3299 amdgpu_atombios_scratch_regs_lock(adev, false);
3300 }
3301
dce_v6_0_encoder_disable(struct drm_encoder * encoder)3302 static void dce_v6_0_encoder_disable(struct drm_encoder *encoder)
3303 {
3304 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3305 struct amdgpu_encoder_atom_dig *dig;
3306 int em = amdgpu_atombios_encoder_get_encoder_mode(encoder);
3307
3308 amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
3309
3310 if (amdgpu_atombios_encoder_is_digital(encoder)) {
3311 if (em == ATOM_ENCODER_MODE_HDMI || ENCODER_MODE_IS_DP(em))
3312 dce_v6_0_afmt_enable(encoder, false);
3313 dig = amdgpu_encoder->enc_priv;
3314 dig->dig_encoder = -1;
3315 }
3316 amdgpu_encoder->active_device = 0;
3317 }
3318
3319 /* these are handled by the primary encoders */
dce_v6_0_ext_prepare(struct drm_encoder * encoder)3320 static void dce_v6_0_ext_prepare(struct drm_encoder *encoder)
3321 {
3322
3323 }
3324
dce_v6_0_ext_commit(struct drm_encoder * encoder)3325 static void dce_v6_0_ext_commit(struct drm_encoder *encoder)
3326 {
3327
3328 }
3329
dce_v6_0_ext_mode_set(struct drm_encoder * encoder,struct drm_display_mode * mode,struct drm_display_mode * adjusted_mode)3330 static void dce_v6_0_ext_mode_set(struct drm_encoder *encoder,
3331 struct drm_display_mode *mode,
3332 struct drm_display_mode *adjusted_mode)
3333 {
3334
3335 }
3336
dce_v6_0_ext_disable(struct drm_encoder * encoder)3337 static void dce_v6_0_ext_disable(struct drm_encoder *encoder)
3338 {
3339
3340 }
3341
dce_v6_0_ext_dpms(struct drm_encoder * encoder,int mode)3342 static void dce_v6_0_ext_dpms(struct drm_encoder *encoder, int mode)
3343 {
3344
3345 }
3346
dce_v6_0_ext_mode_fixup(struct drm_encoder * encoder,const struct drm_display_mode * mode,struct drm_display_mode * adjusted_mode)3347 static bool dce_v6_0_ext_mode_fixup(struct drm_encoder *encoder,
3348 const struct drm_display_mode *mode,
3349 struct drm_display_mode *adjusted_mode)
3350 {
3351 return true;
3352 }
3353
3354 static const struct drm_encoder_helper_funcs dce_v6_0_ext_helper_funcs = {
3355 .dpms = dce_v6_0_ext_dpms,
3356 .mode_fixup = dce_v6_0_ext_mode_fixup,
3357 .prepare = dce_v6_0_ext_prepare,
3358 .mode_set = dce_v6_0_ext_mode_set,
3359 .commit = dce_v6_0_ext_commit,
3360 .disable = dce_v6_0_ext_disable,
3361 /* no detect for TMDS/LVDS yet */
3362 };
3363
3364 static const struct drm_encoder_helper_funcs dce_v6_0_dig_helper_funcs = {
3365 .dpms = amdgpu_atombios_encoder_dpms,
3366 .mode_fixup = amdgpu_atombios_encoder_mode_fixup,
3367 .prepare = dce_v6_0_encoder_prepare,
3368 .mode_set = dce_v6_0_encoder_mode_set,
3369 .commit = dce_v6_0_encoder_commit,
3370 .disable = dce_v6_0_encoder_disable,
3371 .detect = amdgpu_atombios_encoder_dig_detect,
3372 };
3373
3374 static const struct drm_encoder_helper_funcs dce_v6_0_dac_helper_funcs = {
3375 .dpms = amdgpu_atombios_encoder_dpms,
3376 .mode_fixup = amdgpu_atombios_encoder_mode_fixup,
3377 .prepare = dce_v6_0_encoder_prepare,
3378 .mode_set = dce_v6_0_encoder_mode_set,
3379 .commit = dce_v6_0_encoder_commit,
3380 .detect = amdgpu_atombios_encoder_dac_detect,
3381 };
3382
dce_v6_0_encoder_destroy(struct drm_encoder * encoder)3383 static void dce_v6_0_encoder_destroy(struct drm_encoder *encoder)
3384 {
3385 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3386 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3387 amdgpu_atombios_encoder_fini_backlight(amdgpu_encoder);
3388 kfree(amdgpu_encoder->enc_priv);
3389 drm_encoder_cleanup(encoder);
3390 kfree(amdgpu_encoder);
3391 }
3392
3393 static const struct drm_encoder_funcs dce_v6_0_encoder_funcs = {
3394 .destroy = dce_v6_0_encoder_destroy,
3395 };
3396
dce_v6_0_encoder_add(struct amdgpu_device * adev,uint32_t encoder_enum,uint32_t supported_device,u16 caps)3397 static void dce_v6_0_encoder_add(struct amdgpu_device *adev,
3398 uint32_t encoder_enum,
3399 uint32_t supported_device,
3400 u16 caps)
3401 {
3402 struct drm_device *dev = adev_to_drm(adev);
3403 struct drm_encoder *encoder;
3404 struct amdgpu_encoder *amdgpu_encoder;
3405
3406 /* see if we already added it */
3407 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
3408 amdgpu_encoder = to_amdgpu_encoder(encoder);
3409 if (amdgpu_encoder->encoder_enum == encoder_enum) {
3410 amdgpu_encoder->devices |= supported_device;
3411 return;
3412 }
3413 }
3414
3415 /* add a new one */
3416 amdgpu_encoder = kzalloc_obj(struct amdgpu_encoder);
3417 if (!amdgpu_encoder)
3418 return;
3419
3420 encoder = &amdgpu_encoder->base;
3421 switch (adev->mode_info.num_crtc) {
3422 case 1:
3423 encoder->possible_crtcs = 0x1;
3424 break;
3425 case 2:
3426 default:
3427 encoder->possible_crtcs = 0x3;
3428 break;
3429 case 4:
3430 encoder->possible_crtcs = 0xf;
3431 break;
3432 case 6:
3433 encoder->possible_crtcs = 0x3f;
3434 break;
3435 }
3436
3437 amdgpu_encoder->enc_priv = NULL;
3438 amdgpu_encoder->encoder_enum = encoder_enum;
3439 amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
3440 amdgpu_encoder->devices = supported_device;
3441 amdgpu_encoder->rmx_type = RMX_OFF;
3442 amdgpu_encoder->underscan_type = UNDERSCAN_OFF;
3443 amdgpu_encoder->is_ext_encoder = false;
3444 amdgpu_encoder->caps = caps;
3445
3446 switch (amdgpu_encoder->encoder_id) {
3447 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
3448 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
3449 drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3450 DRM_MODE_ENCODER_DAC, NULL);
3451 drm_encoder_helper_add(encoder, &dce_v6_0_dac_helper_funcs);
3452 break;
3453 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
3454 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
3455 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
3456 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
3457 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
3458 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
3459 amdgpu_encoder->rmx_type = RMX_FULL;
3460 drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3461 DRM_MODE_ENCODER_LVDS, NULL);
3462 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder);
3463 } else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
3464 drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3465 DRM_MODE_ENCODER_DAC, NULL);
3466 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3467 } else {
3468 drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3469 DRM_MODE_ENCODER_TMDS, NULL);
3470 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3471 }
3472 drm_encoder_helper_add(encoder, &dce_v6_0_dig_helper_funcs);
3473 break;
3474 case ENCODER_OBJECT_ID_SI170B:
3475 case ENCODER_OBJECT_ID_CH7303:
3476 case ENCODER_OBJECT_ID_EXTERNAL_SDVOA:
3477 case ENCODER_OBJECT_ID_EXTERNAL_SDVOB:
3478 case ENCODER_OBJECT_ID_TITFP513:
3479 case ENCODER_OBJECT_ID_VT1623:
3480 case ENCODER_OBJECT_ID_HDMI_SI1930:
3481 case ENCODER_OBJECT_ID_TRAVIS:
3482 case ENCODER_OBJECT_ID_NUTMEG:
3483 /* these are handled by the primary encoders */
3484 amdgpu_encoder->is_ext_encoder = true;
3485 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3486 drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3487 DRM_MODE_ENCODER_LVDS, NULL);
3488 else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
3489 drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3490 DRM_MODE_ENCODER_DAC, NULL);
3491 else
3492 drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3493 DRM_MODE_ENCODER_TMDS, NULL);
3494 drm_encoder_helper_add(encoder, &dce_v6_0_ext_helper_funcs);
3495 break;
3496 }
3497 }
3498
3499 static const struct amdgpu_display_funcs dce_v6_0_display_funcs = {
3500 .bandwidth_update = &dce_v6_0_bandwidth_update,
3501 .vblank_get_counter = &dce_v6_0_vblank_get_counter,
3502 .backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level,
3503 .backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level,
3504 .hpd_sense = &dce_v6_0_hpd_sense,
3505 .hpd_set_polarity = &dce_v6_0_hpd_set_polarity,
3506 .hpd_get_gpio_reg = &dce_v6_0_hpd_get_gpio_reg,
3507 .page_flip = &dce_v6_0_page_flip,
3508 .page_flip_get_scanoutpos = &dce_v6_0_crtc_get_scanoutpos,
3509 .add_encoder = &dce_v6_0_encoder_add,
3510 .add_connector = &amdgpu_connector_add,
3511 };
3512
dce_v6_0_set_display_funcs(struct amdgpu_device * adev)3513 static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev)
3514 {
3515 adev->mode_info.funcs = &dce_v6_0_display_funcs;
3516 }
3517
3518 static const struct amdgpu_irq_src_funcs dce_v6_0_crtc_irq_funcs = {
3519 .set = dce_v6_0_set_crtc_irq_state,
3520 .process = dce_v6_0_crtc_irq,
3521 };
3522
3523 static const struct amdgpu_irq_src_funcs dce_v6_0_pageflip_irq_funcs = {
3524 .set = dce_v6_0_set_pageflip_irq_state,
3525 .process = dce_v6_0_pageflip_irq,
3526 };
3527
3528 static const struct amdgpu_irq_src_funcs dce_v6_0_hpd_irq_funcs = {
3529 .set = dce_v6_0_set_hpd_irq_state,
3530 .process = dce_v6_0_hpd_irq,
3531 };
3532
dce_v6_0_set_irq_funcs(struct amdgpu_device * adev)3533 static void dce_v6_0_set_irq_funcs(struct amdgpu_device *adev)
3534 {
3535 if (adev->mode_info.num_crtc > 0)
3536 adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_VLINE1 + adev->mode_info.num_crtc;
3537 else
3538 adev->crtc_irq.num_types = 0;
3539 adev->crtc_irq.funcs = &dce_v6_0_crtc_irq_funcs;
3540
3541 adev->pageflip_irq.num_types = adev->mode_info.num_crtc;
3542 adev->pageflip_irq.funcs = &dce_v6_0_pageflip_irq_funcs;
3543
3544 adev->hpd_irq.num_types = adev->mode_info.num_hpd;
3545 adev->hpd_irq.funcs = &dce_v6_0_hpd_irq_funcs;
3546 }
3547
3548 const struct amdgpu_ip_block_version dce_v6_0_ip_block =
3549 {
3550 .type = AMD_IP_BLOCK_TYPE_DCE,
3551 .major = 6,
3552 .minor = 0,
3553 .rev = 0,
3554 .funcs = &dce_v6_0_ip_funcs,
3555 };
3556
3557 const struct amdgpu_ip_block_version dce_v6_4_ip_block =
3558 {
3559 .type = AMD_IP_BLOCK_TYPE_DCE,
3560 .major = 6,
3561 .minor = 4,
3562 .rev = 0,
3563 .funcs = &dce_v6_0_ip_funcs,
3564 };
3565