1 /*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24 #include <drm/drm_edid.h>
25 #include <drm/drm_fourcc.h>
26 #include <drm/drm_modeset_helper.h>
27 #include <drm/drm_modeset_helper_vtables.h>
28 #include <drm/drm_vblank.h>
29
30 #include "amdgpu.h"
31 #include "amdgpu_pm.h"
32 #include "amdgpu_i2c.h"
33 #include "cikd.h"
34 #include "atom.h"
35 #include "amdgpu_atombios.h"
36 #include "atombios_crtc.h"
37 #include "atombios_encoders.h"
38 #include "amdgpu_pll.h"
39 #include "amdgpu_connectors.h"
40 #include "amdgpu_display.h"
41 #include "dce_v8_0.h"
42
43 #include "dce/dce_8_0_d.h"
44 #include "dce/dce_8_0_sh_mask.h"
45
46 #include "gca/gfx_7_2_enum.h"
47
48 #include "gmc/gmc_7_1_d.h"
49 #include "gmc/gmc_7_1_sh_mask.h"
50
51 #include "oss/oss_2_0_d.h"
52 #include "oss/oss_2_0_sh_mask.h"
53
54 static void dce_v8_0_set_display_funcs(struct amdgpu_device *adev);
55 static void dce_v8_0_set_irq_funcs(struct amdgpu_device *adev);
56
57 static const u32 crtc_offsets[6] = {
58 CRTC0_REGISTER_OFFSET,
59 CRTC1_REGISTER_OFFSET,
60 CRTC2_REGISTER_OFFSET,
61 CRTC3_REGISTER_OFFSET,
62 CRTC4_REGISTER_OFFSET,
63 CRTC5_REGISTER_OFFSET
64 };
65
66 static const u32 hpd_offsets[] = {
67 HPD0_REGISTER_OFFSET,
68 HPD1_REGISTER_OFFSET,
69 HPD2_REGISTER_OFFSET,
70 HPD3_REGISTER_OFFSET,
71 HPD4_REGISTER_OFFSET,
72 HPD5_REGISTER_OFFSET
73 };
74
75 static const uint32_t dig_offsets[] = {
76 CRTC0_REGISTER_OFFSET,
77 CRTC1_REGISTER_OFFSET,
78 CRTC2_REGISTER_OFFSET,
79 CRTC3_REGISTER_OFFSET,
80 CRTC4_REGISTER_OFFSET,
81 CRTC5_REGISTER_OFFSET,
82 (0x13830 - 0x7030) >> 2,
83 };
84
85 static const struct {
86 uint32_t reg;
87 uint32_t vblank;
88 uint32_t vline;
89 uint32_t hpd;
90
91 } interrupt_status_offsets[6] = { {
92 .reg = mmDISP_INTERRUPT_STATUS,
93 .vblank = DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK,
94 .vline = DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK,
95 .hpd = DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK
96 }, {
97 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE,
98 .vblank = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK,
99 .vline = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK,
100 .hpd = DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK
101 }, {
102 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE2,
103 .vblank = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK,
104 .vline = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK,
105 .hpd = DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK
106 }, {
107 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE3,
108 .vblank = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK,
109 .vline = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK,
110 .hpd = DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK
111 }, {
112 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE4,
113 .vblank = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK,
114 .vline = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK,
115 .hpd = DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK
116 }, {
117 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE5,
118 .vblank = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK,
119 .vline = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK,
120 .hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK
121 } };
122
dce_v8_0_audio_endpt_rreg(struct amdgpu_device * adev,u32 block_offset,u32 reg)123 static u32 dce_v8_0_audio_endpt_rreg(struct amdgpu_device *adev,
124 u32 block_offset, u32 reg)
125 {
126 unsigned long flags;
127 u32 r;
128
129 spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
130 WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
131 r = RREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset);
132 spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
133
134 return r;
135 }
136
dce_v8_0_audio_endpt_wreg(struct amdgpu_device * adev,u32 block_offset,u32 reg,u32 v)137 static void dce_v8_0_audio_endpt_wreg(struct amdgpu_device *adev,
138 u32 block_offset, u32 reg, u32 v)
139 {
140 unsigned long flags;
141
142 spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
143 WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
144 WREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset, v);
145 spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
146 }
147
dce_v8_0_vblank_get_counter(struct amdgpu_device * adev,int crtc)148 static u32 dce_v8_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
149 {
150 if (crtc >= adev->mode_info.num_crtc)
151 return 0;
152 else
153 return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
154 }
155
dce_v8_0_pageflip_interrupt_init(struct amdgpu_device * adev)156 static void dce_v8_0_pageflip_interrupt_init(struct amdgpu_device *adev)
157 {
158 unsigned i;
159
160 /* Enable pflip interrupts */
161 for (i = 0; i < adev->mode_info.num_crtc; i++)
162 amdgpu_irq_get(adev, &adev->pageflip_irq, i);
163 }
164
dce_v8_0_pageflip_interrupt_fini(struct amdgpu_device * adev)165 static void dce_v8_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
166 {
167 unsigned i;
168
169 /* Disable pflip interrupts */
170 for (i = 0; i < adev->mode_info.num_crtc; i++)
171 amdgpu_irq_put(adev, &adev->pageflip_irq, i);
172 }
173
174 /**
175 * dce_v8_0_page_flip - pageflip callback.
176 *
177 * @adev: amdgpu_device pointer
178 * @crtc_id: crtc to cleanup pageflip on
179 * @crtc_base: new address of the crtc (GPU MC address)
180 * @async: asynchronous flip
181 *
182 * Triggers the actual pageflip by updating the primary
183 * surface base address.
184 */
dce_v8_0_page_flip(struct amdgpu_device * adev,int crtc_id,u64 crtc_base,bool async)185 static void dce_v8_0_page_flip(struct amdgpu_device *adev,
186 int crtc_id, u64 crtc_base, bool async)
187 {
188 struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
189 struct drm_framebuffer *fb = amdgpu_crtc->base.primary->fb;
190
191 /* flip at hsync for async, default is vsync */
192 WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, async ?
193 GRPH_FLIP_CONTROL__GRPH_SURFACE_UPDATE_H_RETRACE_EN_MASK : 0);
194 /* update pitch */
195 WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset,
196 fb->pitches[0] / fb->format->cpp[0]);
197 /* update the primary scanout addresses */
198 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
199 upper_32_bits(crtc_base));
200 /* writing to the low address triggers the update */
201 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
202 lower_32_bits(crtc_base));
203 /* post the write */
204 RREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset);
205 }
206
dce_v8_0_crtc_get_scanoutpos(struct amdgpu_device * adev,int crtc,u32 * vbl,u32 * position)207 static int dce_v8_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
208 u32 *vbl, u32 *position)
209 {
210 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
211 return -EINVAL;
212
213 *vbl = RREG32(mmCRTC_V_BLANK_START_END + crtc_offsets[crtc]);
214 *position = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
215
216 return 0;
217 }
218
219 /**
220 * dce_v8_0_hpd_sense - hpd sense callback.
221 *
222 * @adev: amdgpu_device pointer
223 * @hpd: hpd (hotplug detect) pin
224 *
225 * Checks if a digital monitor is connected (evergreen+).
226 * Returns true if connected, false if not connected.
227 */
dce_v8_0_hpd_sense(struct amdgpu_device * adev,enum amdgpu_hpd_id hpd)228 static bool dce_v8_0_hpd_sense(struct amdgpu_device *adev,
229 enum amdgpu_hpd_id hpd)
230 {
231 bool connected = false;
232
233 if (hpd >= adev->mode_info.num_hpd)
234 return connected;
235
236 if (RREG32(mmDC_HPD1_INT_STATUS + hpd_offsets[hpd]) &
237 DC_HPD1_INT_STATUS__DC_HPD1_SENSE_MASK)
238 connected = true;
239
240 return connected;
241 }
242
243 /**
244 * dce_v8_0_hpd_set_polarity - hpd set polarity callback.
245 *
246 * @adev: amdgpu_device pointer
247 * @hpd: hpd (hotplug detect) pin
248 *
249 * Set the polarity of the hpd pin (evergreen+).
250 */
dce_v8_0_hpd_set_polarity(struct amdgpu_device * adev,enum amdgpu_hpd_id hpd)251 static void dce_v8_0_hpd_set_polarity(struct amdgpu_device *adev,
252 enum amdgpu_hpd_id hpd)
253 {
254 u32 tmp;
255 bool connected = dce_v8_0_hpd_sense(adev, hpd);
256
257 if (hpd >= adev->mode_info.num_hpd)
258 return;
259
260 tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
261 if (connected)
262 tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
263 else
264 tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
265 WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
266 }
267
dce_v8_0_hpd_int_ack(struct amdgpu_device * adev,int hpd)268 static void dce_v8_0_hpd_int_ack(struct amdgpu_device *adev,
269 int hpd)
270 {
271 u32 tmp;
272
273 if (hpd >= adev->mode_info.num_hpd) {
274 DRM_DEBUG("invalid hpd %d\n", hpd);
275 return;
276 }
277
278 tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
279 tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
280 WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
281 }
282
283 /**
284 * dce_v8_0_hpd_init - hpd setup callback.
285 *
286 * @adev: amdgpu_device pointer
287 *
288 * Setup the hpd pins used by the card (evergreen+).
289 * Enable the pin, set the polarity, and enable the hpd interrupts.
290 */
dce_v8_0_hpd_init(struct amdgpu_device * adev)291 static void dce_v8_0_hpd_init(struct amdgpu_device *adev)
292 {
293 struct drm_device *dev = adev_to_drm(adev);
294 struct drm_connector *connector;
295 struct drm_connector_list_iter iter;
296 u32 tmp;
297
298 drm_connector_list_iter_begin(dev, &iter);
299 drm_for_each_connector_iter(connector, &iter) {
300 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
301
302 if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
303 continue;
304
305 tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
306 tmp |= DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
307 WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
308
309 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
310 connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
311 /* don't try to enable hpd on eDP or LVDS avoid breaking the
312 * aux dp channel on imac and help (but not completely fix)
313 * https://bugzilla.redhat.com/show_bug.cgi?id=726143
314 * also avoid interrupt storms during dpms.
315 */
316 tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
317 tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
318 WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
319 continue;
320 }
321
322 dce_v8_0_hpd_int_ack(adev, amdgpu_connector->hpd.hpd);
323 dce_v8_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
324 amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
325 }
326 drm_connector_list_iter_end(&iter);
327 }
328
329 /**
330 * dce_v8_0_hpd_fini - hpd tear down callback.
331 *
332 * @adev: amdgpu_device pointer
333 *
334 * Tear down the hpd pins used by the card (evergreen+).
335 * Disable the hpd interrupts.
336 */
dce_v8_0_hpd_fini(struct amdgpu_device * adev)337 static void dce_v8_0_hpd_fini(struct amdgpu_device *adev)
338 {
339 struct drm_device *dev = adev_to_drm(adev);
340 struct drm_connector *connector;
341 struct drm_connector_list_iter iter;
342 u32 tmp;
343
344 drm_connector_list_iter_begin(dev, &iter);
345 drm_for_each_connector_iter(connector, &iter) {
346 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
347
348 if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
349 continue;
350
351 tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
352 tmp &= ~DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
353 WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
354
355 amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
356 }
357 drm_connector_list_iter_end(&iter);
358 }
359
dce_v8_0_hpd_get_gpio_reg(struct amdgpu_device * adev)360 static u32 dce_v8_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
361 {
362 return mmDC_GPIO_HPD_A;
363 }
364
dce_v8_0_is_display_hung(struct amdgpu_device * adev)365 static bool dce_v8_0_is_display_hung(struct amdgpu_device *adev)
366 {
367 u32 crtc_hung = 0;
368 u32 crtc_status[6];
369 u32 i, j, tmp;
370
371 for (i = 0; i < adev->mode_info.num_crtc; i++) {
372 if (RREG32(mmCRTC_CONTROL + crtc_offsets[i]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK) {
373 crtc_status[i] = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
374 crtc_hung |= (1 << i);
375 }
376 }
377
378 for (j = 0; j < 10; j++) {
379 for (i = 0; i < adev->mode_info.num_crtc; i++) {
380 if (crtc_hung & (1 << i)) {
381 tmp = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
382 if (tmp != crtc_status[i])
383 crtc_hung &= ~(1 << i);
384 }
385 }
386 if (crtc_hung == 0)
387 return false;
388 udelay(100);
389 }
390
391 return true;
392 }
393
dce_v8_0_set_vga_render_state(struct amdgpu_device * adev,bool render)394 static void dce_v8_0_set_vga_render_state(struct amdgpu_device *adev,
395 bool render)
396 {
397 u32 tmp;
398
399 /* Lockout access through VGA aperture*/
400 tmp = RREG32(mmVGA_HDP_CONTROL);
401 if (render)
402 tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 0);
403 else
404 tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
405 WREG32(mmVGA_HDP_CONTROL, tmp);
406
407 /* disable VGA render */
408 tmp = RREG32(mmVGA_RENDER_CONTROL);
409 if (render)
410 tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 1);
411 else
412 tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
413 WREG32(mmVGA_RENDER_CONTROL, tmp);
414 }
415
dce_v8_0_get_num_crtc(struct amdgpu_device * adev)416 static int dce_v8_0_get_num_crtc(struct amdgpu_device *adev)
417 {
418 int num_crtc = 0;
419
420 switch (adev->asic_type) {
421 case CHIP_BONAIRE:
422 case CHIP_HAWAII:
423 num_crtc = 6;
424 break;
425 case CHIP_KAVERI:
426 num_crtc = 4;
427 break;
428 case CHIP_KABINI:
429 case CHIP_MULLINS:
430 num_crtc = 2;
431 break;
432 default:
433 num_crtc = 0;
434 }
435 return num_crtc;
436 }
437
dce_v8_0_disable_dce(struct amdgpu_device * adev)438 void dce_v8_0_disable_dce(struct amdgpu_device *adev)
439 {
440 /*Disable VGA render and enabled crtc, if has DCE engine*/
441 if (amdgpu_atombios_has_dce_engine_info(adev)) {
442 u32 tmp;
443 int crtc_enabled, i;
444
445 dce_v8_0_set_vga_render_state(adev, false);
446
447 /*Disable crtc*/
448 for (i = 0; i < dce_v8_0_get_num_crtc(adev); i++) {
449 crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]),
450 CRTC_CONTROL, CRTC_MASTER_EN);
451 if (crtc_enabled) {
452 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
453 tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
454 tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0);
455 WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
456 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
457 }
458 }
459 }
460 }
461
dce_v8_0_program_fmt(struct drm_encoder * encoder)462 static void dce_v8_0_program_fmt(struct drm_encoder *encoder)
463 {
464 struct drm_device *dev = encoder->dev;
465 struct amdgpu_device *adev = drm_to_adev(dev);
466 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
467 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
468 struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
469 int bpc = 0;
470 u32 tmp = 0;
471 enum amdgpu_connector_dither dither = AMDGPU_FMT_DITHER_DISABLE;
472
473 if (connector) {
474 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
475 bpc = amdgpu_connector_get_monitor_bpc(connector);
476 dither = amdgpu_connector->dither;
477 }
478
479 /* LVDS/eDP FMT is set up by atom */
480 if (amdgpu_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
481 return;
482
483 /* not needed for analog */
484 if ((amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) ||
485 (amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2))
486 return;
487
488 if (bpc == 0)
489 return;
490
491 switch (bpc) {
492 case 6:
493 if (dither == AMDGPU_FMT_DITHER_ENABLE)
494 /* XXX sort out optimal dither settings */
495 tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
496 FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
497 FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK |
498 (0 << FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT));
499 else
500 tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK |
501 (0 << FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT));
502 break;
503 case 8:
504 if (dither == AMDGPU_FMT_DITHER_ENABLE)
505 /* XXX sort out optimal dither settings */
506 tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
507 FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
508 FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK |
509 FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK |
510 (1 << FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT));
511 else
512 tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK |
513 (1 << FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT));
514 break;
515 case 10:
516 if (dither == AMDGPU_FMT_DITHER_ENABLE)
517 /* XXX sort out optimal dither settings */
518 tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
519 FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
520 FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK |
521 FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK |
522 (2 << FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT));
523 else
524 tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK |
525 (2 << FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT));
526 break;
527 default:
528 /* not needed */
529 break;
530 }
531
532 WREG32(mmFMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
533 }
534
535
536 /* display watermark setup */
537 /**
538 * dce_v8_0_line_buffer_adjust - Set up the line buffer
539 *
540 * @adev: amdgpu_device pointer
541 * @amdgpu_crtc: the selected display controller
542 * @mode: the current display mode on the selected display
543 * controller
544 *
545 * Setup up the line buffer allocation for
546 * the selected display controller (CIK).
547 * Returns the line buffer size in pixels.
548 */
dce_v8_0_line_buffer_adjust(struct amdgpu_device * adev,struct amdgpu_crtc * amdgpu_crtc,struct drm_display_mode * mode)549 static u32 dce_v8_0_line_buffer_adjust(struct amdgpu_device *adev,
550 struct amdgpu_crtc *amdgpu_crtc,
551 struct drm_display_mode *mode)
552 {
553 u32 tmp, buffer_alloc, i;
554 u32 pipe_offset = amdgpu_crtc->crtc_id * 0x8;
555 /*
556 * Line Buffer Setup
557 * There are 6 line buffers, one for each display controllers.
558 * There are 3 partitions per LB. Select the number of partitions
559 * to enable based on the display width. For display widths larger
560 * than 4096, you need use to use 2 display controllers and combine
561 * them using the stereo blender.
562 */
563 if (amdgpu_crtc->base.enabled && mode) {
564 if (mode->crtc_hdisplay < 1920) {
565 tmp = 1;
566 buffer_alloc = 2;
567 } else if (mode->crtc_hdisplay < 2560) {
568 tmp = 2;
569 buffer_alloc = 2;
570 } else if (mode->crtc_hdisplay < 4096) {
571 tmp = 0;
572 buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
573 } else {
574 DRM_DEBUG_KMS("Mode too big for LB!\n");
575 tmp = 0;
576 buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
577 }
578 } else {
579 tmp = 1;
580 buffer_alloc = 0;
581 }
582
583 WREG32(mmLB_MEMORY_CTRL + amdgpu_crtc->crtc_offset,
584 (tmp << LB_MEMORY_CTRL__LB_MEMORY_CONFIG__SHIFT) |
585 (0x6B0 << LB_MEMORY_CTRL__LB_MEMORY_SIZE__SHIFT));
586
587 WREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
588 (buffer_alloc << PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED__SHIFT));
589 for (i = 0; i < adev->usec_timeout; i++) {
590 if (RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
591 PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED_MASK)
592 break;
593 udelay(1);
594 }
595
596 if (amdgpu_crtc->base.enabled && mode) {
597 switch (tmp) {
598 case 0:
599 default:
600 return 4096 * 2;
601 case 1:
602 return 1920 * 2;
603 case 2:
604 return 2560 * 2;
605 }
606 }
607
608 /* controller not enabled, so no lb used */
609 return 0;
610 }
611
612 /**
613 * cik_get_number_of_dram_channels - get the number of dram channels
614 *
615 * @adev: amdgpu_device pointer
616 *
617 * Look up the number of video ram channels (CIK).
618 * Used for display watermark bandwidth calculations
619 * Returns the number of dram channels
620 */
cik_get_number_of_dram_channels(struct amdgpu_device * adev)621 static u32 cik_get_number_of_dram_channels(struct amdgpu_device *adev)
622 {
623 u32 tmp = RREG32(mmMC_SHARED_CHMAP);
624
625 switch ((tmp & MC_SHARED_CHMAP__NOOFCHAN_MASK) >> MC_SHARED_CHMAP__NOOFCHAN__SHIFT) {
626 case 0:
627 default:
628 return 1;
629 case 1:
630 return 2;
631 case 2:
632 return 4;
633 case 3:
634 return 8;
635 case 4:
636 return 3;
637 case 5:
638 return 6;
639 case 6:
640 return 10;
641 case 7:
642 return 12;
643 case 8:
644 return 16;
645 }
646 }
647
648 struct dce8_wm_params {
649 u32 dram_channels; /* number of dram channels */
650 u32 yclk; /* bandwidth per dram data pin in kHz */
651 u32 sclk; /* engine clock in kHz */
652 u32 disp_clk; /* display clock in kHz */
653 u32 src_width; /* viewport width */
654 u32 active_time; /* active display time in ns */
655 u32 blank_time; /* blank time in ns */
656 bool interlaced; /* mode is interlaced */
657 fixed20_12 vsc; /* vertical scale ratio */
658 u32 num_heads; /* number of active crtcs */
659 u32 bytes_per_pixel; /* bytes per pixel display + overlay */
660 u32 lb_size; /* line buffer allocated to pipe */
661 u32 vtaps; /* vertical scaler taps */
662 };
663
664 /**
665 * dce_v8_0_dram_bandwidth - get the dram bandwidth
666 *
667 * @wm: watermark calculation data
668 *
669 * Calculate the raw dram bandwidth (CIK).
670 * Used for display watermark bandwidth calculations
671 * Returns the dram bandwidth in MBytes/s
672 */
dce_v8_0_dram_bandwidth(struct dce8_wm_params * wm)673 static u32 dce_v8_0_dram_bandwidth(struct dce8_wm_params *wm)
674 {
675 /* Calculate raw DRAM Bandwidth */
676 fixed20_12 dram_efficiency; /* 0.7 */
677 fixed20_12 yclk, dram_channels, bandwidth;
678 fixed20_12 a;
679
680 a.full = dfixed_const(1000);
681 yclk.full = dfixed_const(wm->yclk);
682 yclk.full = dfixed_div(yclk, a);
683 dram_channels.full = dfixed_const(wm->dram_channels * 4);
684 a.full = dfixed_const(10);
685 dram_efficiency.full = dfixed_const(7);
686 dram_efficiency.full = dfixed_div(dram_efficiency, a);
687 bandwidth.full = dfixed_mul(dram_channels, yclk);
688 bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
689
690 return dfixed_trunc(bandwidth);
691 }
692
693 /**
694 * dce_v8_0_dram_bandwidth_for_display - get the dram bandwidth for display
695 *
696 * @wm: watermark calculation data
697 *
698 * Calculate the dram bandwidth used for display (CIK).
699 * Used for display watermark bandwidth calculations
700 * Returns the dram bandwidth for display in MBytes/s
701 */
dce_v8_0_dram_bandwidth_for_display(struct dce8_wm_params * wm)702 static u32 dce_v8_0_dram_bandwidth_for_display(struct dce8_wm_params *wm)
703 {
704 /* Calculate DRAM Bandwidth and the part allocated to display. */
705 fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
706 fixed20_12 yclk, dram_channels, bandwidth;
707 fixed20_12 a;
708
709 a.full = dfixed_const(1000);
710 yclk.full = dfixed_const(wm->yclk);
711 yclk.full = dfixed_div(yclk, a);
712 dram_channels.full = dfixed_const(wm->dram_channels * 4);
713 a.full = dfixed_const(10);
714 disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
715 disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
716 bandwidth.full = dfixed_mul(dram_channels, yclk);
717 bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
718
719 return dfixed_trunc(bandwidth);
720 }
721
722 /**
723 * dce_v8_0_data_return_bandwidth - get the data return bandwidth
724 *
725 * @wm: watermark calculation data
726 *
727 * Calculate the data return bandwidth used for display (CIK).
728 * Used for display watermark bandwidth calculations
729 * Returns the data return bandwidth in MBytes/s
730 */
dce_v8_0_data_return_bandwidth(struct dce8_wm_params * wm)731 static u32 dce_v8_0_data_return_bandwidth(struct dce8_wm_params *wm)
732 {
733 /* Calculate the display Data return Bandwidth */
734 fixed20_12 return_efficiency; /* 0.8 */
735 fixed20_12 sclk, bandwidth;
736 fixed20_12 a;
737
738 a.full = dfixed_const(1000);
739 sclk.full = dfixed_const(wm->sclk);
740 sclk.full = dfixed_div(sclk, a);
741 a.full = dfixed_const(10);
742 return_efficiency.full = dfixed_const(8);
743 return_efficiency.full = dfixed_div(return_efficiency, a);
744 a.full = dfixed_const(32);
745 bandwidth.full = dfixed_mul(a, sclk);
746 bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
747
748 return dfixed_trunc(bandwidth);
749 }
750
751 /**
752 * dce_v8_0_dmif_request_bandwidth - get the dmif bandwidth
753 *
754 * @wm: watermark calculation data
755 *
756 * Calculate the dmif bandwidth used for display (CIK).
757 * Used for display watermark bandwidth calculations
758 * Returns the dmif bandwidth in MBytes/s
759 */
dce_v8_0_dmif_request_bandwidth(struct dce8_wm_params * wm)760 static u32 dce_v8_0_dmif_request_bandwidth(struct dce8_wm_params *wm)
761 {
762 /* Calculate the DMIF Request Bandwidth */
763 fixed20_12 disp_clk_request_efficiency; /* 0.8 */
764 fixed20_12 disp_clk, bandwidth;
765 fixed20_12 a, b;
766
767 a.full = dfixed_const(1000);
768 disp_clk.full = dfixed_const(wm->disp_clk);
769 disp_clk.full = dfixed_div(disp_clk, a);
770 a.full = dfixed_const(32);
771 b.full = dfixed_mul(a, disp_clk);
772
773 a.full = dfixed_const(10);
774 disp_clk_request_efficiency.full = dfixed_const(8);
775 disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
776
777 bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency);
778
779 return dfixed_trunc(bandwidth);
780 }
781
782 /**
783 * dce_v8_0_available_bandwidth - get the min available bandwidth
784 *
785 * @wm: watermark calculation data
786 *
787 * Calculate the min available bandwidth used for display (CIK).
788 * Used for display watermark bandwidth calculations
789 * Returns the min available bandwidth in MBytes/s
790 */
dce_v8_0_available_bandwidth(struct dce8_wm_params * wm)791 static u32 dce_v8_0_available_bandwidth(struct dce8_wm_params *wm)
792 {
793 /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
794 u32 dram_bandwidth = dce_v8_0_dram_bandwidth(wm);
795 u32 data_return_bandwidth = dce_v8_0_data_return_bandwidth(wm);
796 u32 dmif_req_bandwidth = dce_v8_0_dmif_request_bandwidth(wm);
797
798 return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
799 }
800
801 /**
802 * dce_v8_0_average_bandwidth - get the average available bandwidth
803 *
804 * @wm: watermark calculation data
805 *
806 * Calculate the average available bandwidth used for display (CIK).
807 * Used for display watermark bandwidth calculations
808 * Returns the average available bandwidth in MBytes/s
809 */
dce_v8_0_average_bandwidth(struct dce8_wm_params * wm)810 static u32 dce_v8_0_average_bandwidth(struct dce8_wm_params *wm)
811 {
812 /* Calculate the display mode Average Bandwidth
813 * DisplayMode should contain the source and destination dimensions,
814 * timing, etc.
815 */
816 fixed20_12 bpp;
817 fixed20_12 line_time;
818 fixed20_12 src_width;
819 fixed20_12 bandwidth;
820 fixed20_12 a;
821
822 a.full = dfixed_const(1000);
823 line_time.full = dfixed_const(wm->active_time + wm->blank_time);
824 line_time.full = dfixed_div(line_time, a);
825 bpp.full = dfixed_const(wm->bytes_per_pixel);
826 src_width.full = dfixed_const(wm->src_width);
827 bandwidth.full = dfixed_mul(src_width, bpp);
828 bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
829 bandwidth.full = dfixed_div(bandwidth, line_time);
830
831 return dfixed_trunc(bandwidth);
832 }
833
834 /**
835 * dce_v8_0_latency_watermark - get the latency watermark
836 *
837 * @wm: watermark calculation data
838 *
839 * Calculate the latency watermark (CIK).
840 * Used for display watermark bandwidth calculations
841 * Returns the latency watermark in ns
842 */
dce_v8_0_latency_watermark(struct dce8_wm_params * wm)843 static u32 dce_v8_0_latency_watermark(struct dce8_wm_params *wm)
844 {
845 /* First calculate the latency in ns */
846 u32 mc_latency = 2000; /* 2000 ns. */
847 u32 available_bandwidth = dce_v8_0_available_bandwidth(wm);
848 u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
849 u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
850 u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
851 u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
852 (wm->num_heads * cursor_line_pair_return_time);
853 u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
854 u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
855 u32 tmp, dmif_size = 12288;
856 fixed20_12 a, b, c;
857
858 if (wm->num_heads == 0)
859 return 0;
860
861 a.full = dfixed_const(2);
862 b.full = dfixed_const(1);
863 if ((wm->vsc.full > a.full) ||
864 ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
865 (wm->vtaps >= 5) ||
866 ((wm->vsc.full >= a.full) && wm->interlaced))
867 max_src_lines_per_dst_line = 4;
868 else
869 max_src_lines_per_dst_line = 2;
870
871 a.full = dfixed_const(available_bandwidth);
872 b.full = dfixed_const(wm->num_heads);
873 a.full = dfixed_div(a, b);
874 tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512);
875 tmp = min(dfixed_trunc(a), tmp);
876
877 lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000);
878
879 a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
880 b.full = dfixed_const(1000);
881 c.full = dfixed_const(lb_fill_bw);
882 b.full = dfixed_div(c, b);
883 a.full = dfixed_div(a, b);
884 line_fill_time = dfixed_trunc(a);
885
886 if (line_fill_time < wm->active_time)
887 return latency;
888 else
889 return latency + (line_fill_time - wm->active_time);
890
891 }
892
893 /**
894 * dce_v8_0_average_bandwidth_vs_dram_bandwidth_for_display - check
895 * average and available dram bandwidth
896 *
897 * @wm: watermark calculation data
898 *
899 * Check if the display average bandwidth fits in the display
900 * dram bandwidth (CIK).
901 * Used for display watermark bandwidth calculations
902 * Returns true if the display fits, false if not.
903 */
dce_v8_0_average_bandwidth_vs_dram_bandwidth_for_display(struct dce8_wm_params * wm)904 static bool dce_v8_0_average_bandwidth_vs_dram_bandwidth_for_display(struct dce8_wm_params *wm)
905 {
906 if (dce_v8_0_average_bandwidth(wm) <=
907 (dce_v8_0_dram_bandwidth_for_display(wm) / wm->num_heads))
908 return true;
909 else
910 return false;
911 }
912
913 /**
914 * dce_v8_0_average_bandwidth_vs_available_bandwidth - check
915 * average and available bandwidth
916 *
917 * @wm: watermark calculation data
918 *
919 * Check if the display average bandwidth fits in the display
920 * available bandwidth (CIK).
921 * Used for display watermark bandwidth calculations
922 * Returns true if the display fits, false if not.
923 */
dce_v8_0_average_bandwidth_vs_available_bandwidth(struct dce8_wm_params * wm)924 static bool dce_v8_0_average_bandwidth_vs_available_bandwidth(struct dce8_wm_params *wm)
925 {
926 if (dce_v8_0_average_bandwidth(wm) <=
927 (dce_v8_0_available_bandwidth(wm) / wm->num_heads))
928 return true;
929 else
930 return false;
931 }
932
933 /**
934 * dce_v8_0_check_latency_hiding - check latency hiding
935 *
936 * @wm: watermark calculation data
937 *
938 * Check latency hiding (CIK).
939 * Used for display watermark bandwidth calculations
940 * Returns true if the display fits, false if not.
941 */
dce_v8_0_check_latency_hiding(struct dce8_wm_params * wm)942 static bool dce_v8_0_check_latency_hiding(struct dce8_wm_params *wm)
943 {
944 u32 lb_partitions = wm->lb_size / wm->src_width;
945 u32 line_time = wm->active_time + wm->blank_time;
946 u32 latency_tolerant_lines;
947 u32 latency_hiding;
948 fixed20_12 a;
949
950 a.full = dfixed_const(1);
951 if (wm->vsc.full > a.full)
952 latency_tolerant_lines = 1;
953 else {
954 if (lb_partitions <= (wm->vtaps + 1))
955 latency_tolerant_lines = 1;
956 else
957 latency_tolerant_lines = 2;
958 }
959
960 latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
961
962 if (dce_v8_0_latency_watermark(wm) <= latency_hiding)
963 return true;
964 else
965 return false;
966 }
967
968 /**
969 * dce_v8_0_program_watermarks - program display watermarks
970 *
971 * @adev: amdgpu_device pointer
972 * @amdgpu_crtc: the selected display controller
973 * @lb_size: line buffer size
974 * @num_heads: number of display controllers in use
975 *
976 * Calculate and program the display watermarks for the
977 * selected display controller (CIK).
978 */
dce_v8_0_program_watermarks(struct amdgpu_device * adev,struct amdgpu_crtc * amdgpu_crtc,u32 lb_size,u32 num_heads)979 static void dce_v8_0_program_watermarks(struct amdgpu_device *adev,
980 struct amdgpu_crtc *amdgpu_crtc,
981 u32 lb_size, u32 num_heads)
982 {
983 struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
984 struct dce8_wm_params wm_low, wm_high;
985 u32 active_time;
986 u32 line_time = 0;
987 u32 latency_watermark_a = 0, latency_watermark_b = 0;
988 u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
989
990 if (amdgpu_crtc->base.enabled && num_heads && mode) {
991 active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
992 (u32)mode->clock);
993 line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
994 (u32)mode->clock);
995 line_time = min_t(u32, line_time, 65535);
996
997 /* watermark for high clocks */
998 if (adev->pm.dpm_enabled) {
999 wm_high.yclk =
1000 amdgpu_dpm_get_mclk(adev, false) * 10;
1001 wm_high.sclk =
1002 amdgpu_dpm_get_sclk(adev, false) * 10;
1003 } else {
1004 wm_high.yclk = adev->pm.current_mclk * 10;
1005 wm_high.sclk = adev->pm.current_sclk * 10;
1006 }
1007
1008 wm_high.disp_clk = mode->clock;
1009 wm_high.src_width = mode->crtc_hdisplay;
1010 wm_high.active_time = active_time;
1011 wm_high.blank_time = line_time - wm_high.active_time;
1012 wm_high.interlaced = false;
1013 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1014 wm_high.interlaced = true;
1015 wm_high.vsc = amdgpu_crtc->vsc;
1016 wm_high.vtaps = 1;
1017 if (amdgpu_crtc->rmx_type != RMX_OFF)
1018 wm_high.vtaps = 2;
1019 wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
1020 wm_high.lb_size = lb_size;
1021 wm_high.dram_channels = cik_get_number_of_dram_channels(adev);
1022 wm_high.num_heads = num_heads;
1023
1024 /* set for high clocks */
1025 latency_watermark_a = min_t(u32, dce_v8_0_latency_watermark(&wm_high), 65535);
1026
1027 /* possibly force display priority to high */
1028 /* should really do this at mode validation time... */
1029 if (!dce_v8_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
1030 !dce_v8_0_average_bandwidth_vs_available_bandwidth(&wm_high) ||
1031 !dce_v8_0_check_latency_hiding(&wm_high) ||
1032 (adev->mode_info.disp_priority == 2)) {
1033 DRM_DEBUG_KMS("force priority to high\n");
1034 }
1035
1036 /* watermark for low clocks */
1037 if (adev->pm.dpm_enabled) {
1038 wm_low.yclk =
1039 amdgpu_dpm_get_mclk(adev, true) * 10;
1040 wm_low.sclk =
1041 amdgpu_dpm_get_sclk(adev, true) * 10;
1042 } else {
1043 wm_low.yclk = adev->pm.current_mclk * 10;
1044 wm_low.sclk = adev->pm.current_sclk * 10;
1045 }
1046
1047 wm_low.disp_clk = mode->clock;
1048 wm_low.src_width = mode->crtc_hdisplay;
1049 wm_low.active_time = active_time;
1050 wm_low.blank_time = line_time - wm_low.active_time;
1051 wm_low.interlaced = false;
1052 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1053 wm_low.interlaced = true;
1054 wm_low.vsc = amdgpu_crtc->vsc;
1055 wm_low.vtaps = 1;
1056 if (amdgpu_crtc->rmx_type != RMX_OFF)
1057 wm_low.vtaps = 2;
1058 wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
1059 wm_low.lb_size = lb_size;
1060 wm_low.dram_channels = cik_get_number_of_dram_channels(adev);
1061 wm_low.num_heads = num_heads;
1062
1063 /* set for low clocks */
1064 latency_watermark_b = min_t(u32, dce_v8_0_latency_watermark(&wm_low), 65535);
1065
1066 /* possibly force display priority to high */
1067 /* should really do this at mode validation time... */
1068 if (!dce_v8_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
1069 !dce_v8_0_average_bandwidth_vs_available_bandwidth(&wm_low) ||
1070 !dce_v8_0_check_latency_hiding(&wm_low) ||
1071 (adev->mode_info.disp_priority == 2)) {
1072 DRM_DEBUG_KMS("force priority to high\n");
1073 }
1074 lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
1075 }
1076
1077 /* select wm A */
1078 wm_mask = RREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset);
1079 tmp = wm_mask;
1080 tmp &= ~(3 << DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT);
1081 tmp |= (1 << DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT);
1082 WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp);
1083 WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
1084 ((latency_watermark_a << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) |
1085 (line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT)));
1086 /* select wm B */
1087 tmp = RREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset);
1088 tmp &= ~(3 << DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT);
1089 tmp |= (2 << DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT);
1090 WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp);
1091 WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
1092 ((latency_watermark_b << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) |
1093 (line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT)));
1094 /* restore original selection */
1095 WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, wm_mask);
1096
1097 /* save values for DPM */
1098 amdgpu_crtc->line_time = line_time;
1099
1100 /* Save number of lines the linebuffer leads before the scanout */
1101 amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
1102 }
1103
1104 /**
1105 * dce_v8_0_bandwidth_update - program display watermarks
1106 *
1107 * @adev: amdgpu_device pointer
1108 *
1109 * Calculate and program the display watermarks and line
1110 * buffer allocation (CIK).
1111 */
dce_v8_0_bandwidth_update(struct amdgpu_device * adev)1112 static void dce_v8_0_bandwidth_update(struct amdgpu_device *adev)
1113 {
1114 struct drm_display_mode *mode = NULL;
1115 u32 num_heads = 0, lb_size;
1116 int i;
1117
1118 amdgpu_display_update_priority(adev);
1119
1120 for (i = 0; i < adev->mode_info.num_crtc; i++) {
1121 if (adev->mode_info.crtcs[i]->base.enabled)
1122 num_heads++;
1123 }
1124 for (i = 0; i < adev->mode_info.num_crtc; i++) {
1125 mode = &adev->mode_info.crtcs[i]->base.mode;
1126 lb_size = dce_v8_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i], mode);
1127 dce_v8_0_program_watermarks(adev, adev->mode_info.crtcs[i],
1128 lb_size, num_heads);
1129 }
1130 }
1131
dce_v8_0_audio_get_connected_pins(struct amdgpu_device * adev)1132 static void dce_v8_0_audio_get_connected_pins(struct amdgpu_device *adev)
1133 {
1134 int i;
1135 u32 offset, tmp;
1136
1137 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1138 offset = adev->mode_info.audio.pin[i].offset;
1139 tmp = RREG32_AUDIO_ENDPT(offset,
1140 ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT);
1141 if (((tmp &
1142 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK) >>
1143 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT) == 1)
1144 adev->mode_info.audio.pin[i].connected = false;
1145 else
1146 adev->mode_info.audio.pin[i].connected = true;
1147 }
1148 }
1149
dce_v8_0_audio_get_pin(struct amdgpu_device * adev)1150 static struct amdgpu_audio_pin *dce_v8_0_audio_get_pin(struct amdgpu_device *adev)
1151 {
1152 int i;
1153
1154 dce_v8_0_audio_get_connected_pins(adev);
1155
1156 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1157 if (adev->mode_info.audio.pin[i].connected)
1158 return &adev->mode_info.audio.pin[i];
1159 }
1160 DRM_ERROR("No connected audio pins found!\n");
1161 return NULL;
1162 }
1163
dce_v8_0_afmt_audio_select_pin(struct drm_encoder * encoder)1164 static void dce_v8_0_afmt_audio_select_pin(struct drm_encoder *encoder)
1165 {
1166 struct amdgpu_device *adev = drm_to_adev(encoder->dev);
1167 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1168 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1169 u32 offset;
1170
1171 if (!dig || !dig->afmt || !dig->afmt->pin)
1172 return;
1173
1174 offset = dig->afmt->offset;
1175
1176 WREG32(mmAFMT_AUDIO_SRC_CONTROL + offset,
1177 (dig->afmt->pin->id << AFMT_AUDIO_SRC_CONTROL__AFMT_AUDIO_SRC_SELECT__SHIFT));
1178 }
1179
dce_v8_0_audio_write_latency_fields(struct drm_encoder * encoder,struct drm_display_mode * mode)1180 static void dce_v8_0_audio_write_latency_fields(struct drm_encoder *encoder,
1181 struct drm_display_mode *mode)
1182 {
1183 struct drm_device *dev = encoder->dev;
1184 struct amdgpu_device *adev = drm_to_adev(dev);
1185 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1186 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1187 struct drm_connector *connector;
1188 struct drm_connector_list_iter iter;
1189 struct amdgpu_connector *amdgpu_connector = NULL;
1190 u32 tmp = 0, offset;
1191
1192 if (!dig || !dig->afmt || !dig->afmt->pin)
1193 return;
1194
1195 offset = dig->afmt->pin->offset;
1196
1197 drm_connector_list_iter_begin(dev, &iter);
1198 drm_for_each_connector_iter(connector, &iter) {
1199 if (connector->encoder == encoder) {
1200 amdgpu_connector = to_amdgpu_connector(connector);
1201 break;
1202 }
1203 }
1204 drm_connector_list_iter_end(&iter);
1205
1206 if (!amdgpu_connector) {
1207 DRM_ERROR("Couldn't find encoder's connector\n");
1208 return;
1209 }
1210
1211 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
1212 if (connector->latency_present[1])
1213 tmp =
1214 (connector->video_latency[1] <<
1215 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT) |
1216 (connector->audio_latency[1] <<
1217 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT);
1218 else
1219 tmp =
1220 (0 <<
1221 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT) |
1222 (0 <<
1223 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT);
1224 } else {
1225 if (connector->latency_present[0])
1226 tmp =
1227 (connector->video_latency[0] <<
1228 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT) |
1229 (connector->audio_latency[0] <<
1230 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT);
1231 else
1232 tmp =
1233 (0 <<
1234 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT) |
1235 (0 <<
1236 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT);
1237
1238 }
1239 WREG32_AUDIO_ENDPT(offset, ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
1240 }
1241
dce_v8_0_audio_write_speaker_allocation(struct drm_encoder * encoder)1242 static void dce_v8_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
1243 {
1244 struct drm_device *dev = encoder->dev;
1245 struct amdgpu_device *adev = drm_to_adev(dev);
1246 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1247 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1248 struct drm_connector *connector;
1249 struct drm_connector_list_iter iter;
1250 struct amdgpu_connector *amdgpu_connector = NULL;
1251 u32 offset, tmp;
1252 u8 *sadb = NULL;
1253 int sad_count;
1254
1255 if (!dig || !dig->afmt || !dig->afmt->pin)
1256 return;
1257
1258 offset = dig->afmt->pin->offset;
1259
1260 drm_connector_list_iter_begin(dev, &iter);
1261 drm_for_each_connector_iter(connector, &iter) {
1262 if (connector->encoder == encoder) {
1263 amdgpu_connector = to_amdgpu_connector(connector);
1264 break;
1265 }
1266 }
1267 drm_connector_list_iter_end(&iter);
1268
1269 if (!amdgpu_connector) {
1270 DRM_ERROR("Couldn't find encoder's connector\n");
1271 return;
1272 }
1273
1274 sad_count = drm_edid_to_speaker_allocation(amdgpu_connector->edid, &sadb);
1275 if (sad_count < 0) {
1276 DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
1277 sad_count = 0;
1278 }
1279
1280 /* program the speaker allocation */
1281 tmp = RREG32_AUDIO_ENDPT(offset, ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
1282 tmp &= ~(AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION_MASK |
1283 AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION_MASK);
1284 /* set HDMI mode */
1285 tmp |= AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION_MASK;
1286 if (sad_count)
1287 tmp |= (sadb[0] << AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION__SHIFT);
1288 else
1289 tmp |= (5 << AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION__SHIFT); /* stereo */
1290 WREG32_AUDIO_ENDPT(offset, ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
1291
1292 kfree(sadb);
1293 }
1294
dce_v8_0_audio_write_sad_regs(struct drm_encoder * encoder)1295 static void dce_v8_0_audio_write_sad_regs(struct drm_encoder *encoder)
1296 {
1297 struct drm_device *dev = encoder->dev;
1298 struct amdgpu_device *adev = drm_to_adev(dev);
1299 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1300 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1301 u32 offset;
1302 struct drm_connector *connector;
1303 struct drm_connector_list_iter iter;
1304 struct amdgpu_connector *amdgpu_connector = NULL;
1305 struct cea_sad *sads;
1306 int i, sad_count;
1307
1308 static const u16 eld_reg_to_type[][2] = {
1309 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM },
1310 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 },
1311 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 },
1312 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 },
1313 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 },
1314 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC },
1315 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS },
1316 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC },
1317 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 },
1318 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD },
1319 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP },
1320 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
1321 };
1322
1323 if (!dig || !dig->afmt || !dig->afmt->pin)
1324 return;
1325
1326 offset = dig->afmt->pin->offset;
1327
1328 drm_connector_list_iter_begin(dev, &iter);
1329 drm_for_each_connector_iter(connector, &iter) {
1330 if (connector->encoder == encoder) {
1331 amdgpu_connector = to_amdgpu_connector(connector);
1332 break;
1333 }
1334 }
1335 drm_connector_list_iter_end(&iter);
1336
1337 if (!amdgpu_connector) {
1338 DRM_ERROR("Couldn't find encoder's connector\n");
1339 return;
1340 }
1341
1342 sad_count = drm_edid_to_sad(amdgpu_connector->edid, &sads);
1343 if (sad_count < 0)
1344 DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
1345 if (sad_count <= 0)
1346 return;
1347 BUG_ON(!sads);
1348
1349 for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
1350 u32 value = 0;
1351 u8 stereo_freqs = 0;
1352 int max_channels = -1;
1353 int j;
1354
1355 for (j = 0; j < sad_count; j++) {
1356 struct cea_sad *sad = &sads[j];
1357
1358 if (sad->format == eld_reg_to_type[i][1]) {
1359 if (sad->channels > max_channels) {
1360 value = (sad->channels <<
1361 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT) |
1362 (sad->byte2 <<
1363 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT) |
1364 (sad->freq <<
1365 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT);
1366 max_channels = sad->channels;
1367 }
1368
1369 if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
1370 stereo_freqs |= sad->freq;
1371 else
1372 break;
1373 }
1374 }
1375
1376 value |= (stereo_freqs <<
1377 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO__SHIFT);
1378
1379 WREG32_AUDIO_ENDPT(offset, eld_reg_to_type[i][0], value);
1380 }
1381
1382 kfree(sads);
1383 }
1384
dce_v8_0_audio_enable(struct amdgpu_device * adev,struct amdgpu_audio_pin * pin,bool enable)1385 static void dce_v8_0_audio_enable(struct amdgpu_device *adev,
1386 struct amdgpu_audio_pin *pin,
1387 bool enable)
1388 {
1389 if (!pin)
1390 return;
1391
1392 WREG32_AUDIO_ENDPT(pin->offset, ixAZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
1393 enable ? AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK : 0);
1394 }
1395
1396 static const u32 pin_offsets[7] = {
1397 AUD0_REGISTER_OFFSET,
1398 AUD1_REGISTER_OFFSET,
1399 AUD2_REGISTER_OFFSET,
1400 AUD3_REGISTER_OFFSET,
1401 AUD4_REGISTER_OFFSET,
1402 AUD5_REGISTER_OFFSET,
1403 AUD6_REGISTER_OFFSET,
1404 };
1405
dce_v8_0_audio_init(struct amdgpu_device * adev)1406 static int dce_v8_0_audio_init(struct amdgpu_device *adev)
1407 {
1408 int i;
1409
1410 if (!amdgpu_audio)
1411 return 0;
1412
1413 adev->mode_info.audio.enabled = true;
1414
1415 if (adev->asic_type == CHIP_KAVERI) /* KV: 4 streams, 7 endpoints */
1416 adev->mode_info.audio.num_pins = 7;
1417 else if ((adev->asic_type == CHIP_KABINI) ||
1418 (adev->asic_type == CHIP_MULLINS)) /* KB/ML: 2 streams, 3 endpoints */
1419 adev->mode_info.audio.num_pins = 3;
1420 else if ((adev->asic_type == CHIP_BONAIRE) ||
1421 (adev->asic_type == CHIP_HAWAII))/* BN/HW: 6 streams, 7 endpoints */
1422 adev->mode_info.audio.num_pins = 7;
1423 else
1424 adev->mode_info.audio.num_pins = 3;
1425
1426 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1427 adev->mode_info.audio.pin[i].channels = -1;
1428 adev->mode_info.audio.pin[i].rate = -1;
1429 adev->mode_info.audio.pin[i].bits_per_sample = -1;
1430 adev->mode_info.audio.pin[i].status_bits = 0;
1431 adev->mode_info.audio.pin[i].category_code = 0;
1432 adev->mode_info.audio.pin[i].connected = false;
1433 adev->mode_info.audio.pin[i].offset = pin_offsets[i];
1434 adev->mode_info.audio.pin[i].id = i;
1435 /* disable audio. it will be set up later */
1436 /* XXX remove once we switch to ip funcs */
1437 dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
1438 }
1439
1440 return 0;
1441 }
1442
dce_v8_0_audio_fini(struct amdgpu_device * adev)1443 static void dce_v8_0_audio_fini(struct amdgpu_device *adev)
1444 {
1445 if (!amdgpu_audio)
1446 return;
1447
1448 if (!adev->mode_info.audio.enabled)
1449 return;
1450
1451 adev->mode_info.audio.enabled = false;
1452 }
1453
1454 /*
1455 * update the N and CTS parameters for a given pixel clock rate
1456 */
dce_v8_0_afmt_update_ACR(struct drm_encoder * encoder,uint32_t clock)1457 static void dce_v8_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock)
1458 {
1459 struct drm_device *dev = encoder->dev;
1460 struct amdgpu_device *adev = drm_to_adev(dev);
1461 struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock);
1462 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1463 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1464 uint32_t offset = dig->afmt->offset;
1465
1466 WREG32(mmHDMI_ACR_32_0 + offset, (acr.cts_32khz << HDMI_ACR_32_0__HDMI_ACR_CTS_32__SHIFT));
1467 WREG32(mmHDMI_ACR_32_1 + offset, acr.n_32khz);
1468
1469 WREG32(mmHDMI_ACR_44_0 + offset, (acr.cts_44_1khz << HDMI_ACR_44_0__HDMI_ACR_CTS_44__SHIFT));
1470 WREG32(mmHDMI_ACR_44_1 + offset, acr.n_44_1khz);
1471
1472 WREG32(mmHDMI_ACR_48_0 + offset, (acr.cts_48khz << HDMI_ACR_48_0__HDMI_ACR_CTS_48__SHIFT));
1473 WREG32(mmHDMI_ACR_48_1 + offset, acr.n_48khz);
1474 }
1475
1476 /*
1477 * build a HDMI Video Info Frame
1478 */
dce_v8_0_afmt_update_avi_infoframe(struct drm_encoder * encoder,void * buffer,size_t size)1479 static void dce_v8_0_afmt_update_avi_infoframe(struct drm_encoder *encoder,
1480 void *buffer, size_t size)
1481 {
1482 struct drm_device *dev = encoder->dev;
1483 struct amdgpu_device *adev = drm_to_adev(dev);
1484 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1485 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1486 uint32_t offset = dig->afmt->offset;
1487 uint8_t *frame = buffer + 3;
1488 uint8_t *header = buffer;
1489
1490 WREG32(mmAFMT_AVI_INFO0 + offset,
1491 frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
1492 WREG32(mmAFMT_AVI_INFO1 + offset,
1493 frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x7] << 24));
1494 WREG32(mmAFMT_AVI_INFO2 + offset,
1495 frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24));
1496 WREG32(mmAFMT_AVI_INFO3 + offset,
1497 frame[0xC] | (frame[0xD] << 8) | (header[1] << 24));
1498 }
1499
dce_v8_0_audio_set_dto(struct drm_encoder * encoder,u32 clock)1500 static void dce_v8_0_audio_set_dto(struct drm_encoder *encoder, u32 clock)
1501 {
1502 struct drm_device *dev = encoder->dev;
1503 struct amdgpu_device *adev = drm_to_adev(dev);
1504 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1505 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1506 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
1507 u32 dto_phase = 24 * 1000;
1508 u32 dto_modulo = clock;
1509
1510 if (!dig || !dig->afmt)
1511 return;
1512
1513 /* XXX two dtos; generally use dto0 for hdmi */
1514 /* Express [24MHz / target pixel clock] as an exact rational
1515 * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE
1516 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
1517 */
1518 WREG32(mmDCCG_AUDIO_DTO_SOURCE, (amdgpu_crtc->crtc_id << DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO0_SOURCE_SEL__SHIFT));
1519 WREG32(mmDCCG_AUDIO_DTO0_PHASE, dto_phase);
1520 WREG32(mmDCCG_AUDIO_DTO0_MODULE, dto_modulo);
1521 }
1522
1523 /*
1524 * update the info frames with the data from the current display mode
1525 */
dce_v8_0_afmt_setmode(struct drm_encoder * encoder,struct drm_display_mode * mode)1526 static void dce_v8_0_afmt_setmode(struct drm_encoder *encoder,
1527 struct drm_display_mode *mode)
1528 {
1529 struct drm_device *dev = encoder->dev;
1530 struct amdgpu_device *adev = drm_to_adev(dev);
1531 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1532 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1533 struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
1534 u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
1535 struct hdmi_avi_infoframe frame;
1536 uint32_t offset, val;
1537 ssize_t err;
1538 int bpc = 8;
1539
1540 if (!dig || !dig->afmt)
1541 return;
1542
1543 /* Silent, r600_hdmi_enable will raise WARN for us */
1544 if (!dig->afmt->enabled)
1545 return;
1546
1547 offset = dig->afmt->offset;
1548
1549 /* hdmi deep color mode general control packets setup, if bpc > 8 */
1550 if (encoder->crtc) {
1551 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
1552 bpc = amdgpu_crtc->bpc;
1553 }
1554
1555 /* disable audio prior to setting up hw */
1556 dig->afmt->pin = dce_v8_0_audio_get_pin(adev);
1557 dce_v8_0_audio_enable(adev, dig->afmt->pin, false);
1558
1559 dce_v8_0_audio_set_dto(encoder, mode->clock);
1560
1561 WREG32(mmHDMI_VBI_PACKET_CONTROL + offset,
1562 HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND_MASK); /* send null packets when required */
1563
1564 WREG32(mmAFMT_AUDIO_CRC_CONTROL + offset, 0x1000);
1565
1566 val = RREG32(mmHDMI_CONTROL + offset);
1567 val &= ~HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK;
1568 val &= ~HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH_MASK;
1569
1570 switch (bpc) {
1571 case 0:
1572 case 6:
1573 case 8:
1574 case 16:
1575 default:
1576 DRM_DEBUG("%s: Disabling hdmi deep color for %d bpc.\n",
1577 connector->name, bpc);
1578 break;
1579 case 10:
1580 val |= HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK;
1581 val |= 1 << HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH__SHIFT;
1582 DRM_DEBUG("%s: Enabling hdmi deep color 30 for 10 bpc.\n",
1583 connector->name);
1584 break;
1585 case 12:
1586 val |= HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK;
1587 val |= 2 << HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH__SHIFT;
1588 DRM_DEBUG("%s: Enabling hdmi deep color 36 for 12 bpc.\n",
1589 connector->name);
1590 break;
1591 }
1592
1593 WREG32(mmHDMI_CONTROL + offset, val);
1594
1595 WREG32(mmHDMI_VBI_PACKET_CONTROL + offset,
1596 HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND_MASK | /* send null packets when required */
1597 HDMI_VBI_PACKET_CONTROL__HDMI_GC_SEND_MASK | /* send general control packets */
1598 HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT_MASK); /* send general control packets every frame */
1599
1600 WREG32(mmHDMI_INFOFRAME_CONTROL0 + offset,
1601 HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_SEND_MASK | /* enable audio info frames (frames won't be set until audio is enabled) */
1602 HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_CONT_MASK); /* required for audio info values to be updated */
1603
1604 WREG32(mmAFMT_INFOFRAME_CONTROL0 + offset,
1605 AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_UPDATE_MASK); /* required for audio info values to be updated */
1606
1607 WREG32(mmHDMI_INFOFRAME_CONTROL1 + offset,
1608 (2 << HDMI_INFOFRAME_CONTROL1__HDMI_AUDIO_INFO_LINE__SHIFT)); /* anything other than 0 */
1609
1610 WREG32(mmHDMI_GC + offset, 0); /* unset HDMI_GC_AVMUTE */
1611
1612 WREG32(mmHDMI_AUDIO_PACKET_CONTROL + offset,
1613 (1 << HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_DELAY_EN__SHIFT) | /* set the default audio delay */
1614 (3 << HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_PACKETS_PER_LINE__SHIFT)); /* should be suffient for all audio modes and small enough for all hblanks */
1615
1616 WREG32(mmAFMT_AUDIO_PACKET_CONTROL + offset,
1617 AFMT_AUDIO_PACKET_CONTROL__AFMT_60958_CS_UPDATE_MASK); /* allow 60958 channel status fields to be updated */
1618
1619 /* fglrx clears sth in AFMT_AUDIO_PACKET_CONTROL2 here */
1620
1621 if (bpc > 8)
1622 WREG32(mmHDMI_ACR_PACKET_CONTROL + offset,
1623 HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND_MASK); /* allow hw to sent ACR packets when required */
1624 else
1625 WREG32(mmHDMI_ACR_PACKET_CONTROL + offset,
1626 HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SOURCE_MASK | /* select SW CTS value */
1627 HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND_MASK); /* allow hw to sent ACR packets when required */
1628
1629 dce_v8_0_afmt_update_ACR(encoder, mode->clock);
1630
1631 WREG32(mmAFMT_60958_0 + offset,
1632 (1 << AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L__SHIFT));
1633
1634 WREG32(mmAFMT_60958_1 + offset,
1635 (2 << AFMT_60958_1__AFMT_60958_CS_CHANNEL_NUMBER_R__SHIFT));
1636
1637 WREG32(mmAFMT_60958_2 + offset,
1638 (3 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_2__SHIFT) |
1639 (4 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_3__SHIFT) |
1640 (5 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_4__SHIFT) |
1641 (6 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_5__SHIFT) |
1642 (7 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_6__SHIFT) |
1643 (8 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_7__SHIFT));
1644
1645 dce_v8_0_audio_write_speaker_allocation(encoder);
1646
1647
1648 WREG32(mmAFMT_AUDIO_PACKET_CONTROL2 + offset,
1649 (0xff << AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE__SHIFT));
1650
1651 dce_v8_0_afmt_audio_select_pin(encoder);
1652 dce_v8_0_audio_write_sad_regs(encoder);
1653 dce_v8_0_audio_write_latency_fields(encoder, mode);
1654
1655 err = drm_hdmi_avi_infoframe_from_display_mode(&frame, connector, mode);
1656 if (err < 0) {
1657 DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
1658 return;
1659 }
1660
1661 err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
1662 if (err < 0) {
1663 DRM_ERROR("failed to pack AVI infoframe: %zd\n", err);
1664 return;
1665 }
1666
1667 dce_v8_0_afmt_update_avi_infoframe(encoder, buffer, sizeof(buffer));
1668
1669 WREG32_OR(mmHDMI_INFOFRAME_CONTROL0 + offset,
1670 HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_SEND_MASK | /* enable AVI info frames */
1671 HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_CONT_MASK); /* required for audio info values to be updated */
1672
1673 WREG32_P(mmHDMI_INFOFRAME_CONTROL1 + offset,
1674 (2 << HDMI_INFOFRAME_CONTROL1__HDMI_AVI_INFO_LINE__SHIFT), /* anything other than 0 */
1675 ~HDMI_INFOFRAME_CONTROL1__HDMI_AVI_INFO_LINE_MASK);
1676
1677 WREG32_OR(mmAFMT_AUDIO_PACKET_CONTROL + offset,
1678 AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_MASK); /* send audio packets */
1679
1680 WREG32(mmAFMT_RAMP_CONTROL0 + offset, 0x00FFFFFF);
1681 WREG32(mmAFMT_RAMP_CONTROL1 + offset, 0x007FFFFF);
1682 WREG32(mmAFMT_RAMP_CONTROL2 + offset, 0x00000001);
1683 WREG32(mmAFMT_RAMP_CONTROL3 + offset, 0x00000001);
1684
1685 /* enable audio after setting up hw */
1686 dce_v8_0_audio_enable(adev, dig->afmt->pin, true);
1687 }
1688
dce_v8_0_afmt_enable(struct drm_encoder * encoder,bool enable)1689 static void dce_v8_0_afmt_enable(struct drm_encoder *encoder, bool enable)
1690 {
1691 struct drm_device *dev = encoder->dev;
1692 struct amdgpu_device *adev = drm_to_adev(dev);
1693 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1694 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1695
1696 if (!dig || !dig->afmt)
1697 return;
1698
1699 /* Silent, r600_hdmi_enable will raise WARN for us */
1700 if (enable && dig->afmt->enabled)
1701 return;
1702 if (!enable && !dig->afmt->enabled)
1703 return;
1704
1705 if (!enable && dig->afmt->pin) {
1706 dce_v8_0_audio_enable(adev, dig->afmt->pin, false);
1707 dig->afmt->pin = NULL;
1708 }
1709
1710 dig->afmt->enabled = enable;
1711
1712 DRM_DEBUG("%sabling AFMT interface @ 0x%04X for encoder 0x%x\n",
1713 enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id);
1714 }
1715
dce_v8_0_afmt_init(struct amdgpu_device * adev)1716 static int dce_v8_0_afmt_init(struct amdgpu_device *adev)
1717 {
1718 int i;
1719
1720 for (i = 0; i < adev->mode_info.num_dig; i++)
1721 adev->mode_info.afmt[i] = NULL;
1722
1723 /* DCE8 has audio blocks tied to DIG encoders */
1724 for (i = 0; i < adev->mode_info.num_dig; i++) {
1725 adev->mode_info.afmt[i] = kzalloc(sizeof(struct amdgpu_afmt), GFP_KERNEL);
1726 if (adev->mode_info.afmt[i]) {
1727 adev->mode_info.afmt[i]->offset = dig_offsets[i];
1728 adev->mode_info.afmt[i]->id = i;
1729 } else {
1730 int j;
1731 for (j = 0; j < i; j++) {
1732 kfree(adev->mode_info.afmt[j]);
1733 adev->mode_info.afmt[j] = NULL;
1734 }
1735 return -ENOMEM;
1736 }
1737 }
1738 return 0;
1739 }
1740
dce_v8_0_afmt_fini(struct amdgpu_device * adev)1741 static void dce_v8_0_afmt_fini(struct amdgpu_device *adev)
1742 {
1743 int i;
1744
1745 for (i = 0; i < adev->mode_info.num_dig; i++) {
1746 kfree(adev->mode_info.afmt[i]);
1747 adev->mode_info.afmt[i] = NULL;
1748 }
1749 }
1750
1751 static const u32 vga_control_regs[6] = {
1752 mmD1VGA_CONTROL,
1753 mmD2VGA_CONTROL,
1754 mmD3VGA_CONTROL,
1755 mmD4VGA_CONTROL,
1756 mmD5VGA_CONTROL,
1757 mmD6VGA_CONTROL,
1758 };
1759
dce_v8_0_vga_enable(struct drm_crtc * crtc,bool enable)1760 static void dce_v8_0_vga_enable(struct drm_crtc *crtc, bool enable)
1761 {
1762 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1763 struct drm_device *dev = crtc->dev;
1764 struct amdgpu_device *adev = drm_to_adev(dev);
1765 u32 vga_control;
1766
1767 vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1;
1768 if (enable)
1769 WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control | 1);
1770 else
1771 WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control);
1772 }
1773
dce_v8_0_grph_enable(struct drm_crtc * crtc,bool enable)1774 static void dce_v8_0_grph_enable(struct drm_crtc *crtc, bool enable)
1775 {
1776 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1777 struct drm_device *dev = crtc->dev;
1778 struct amdgpu_device *adev = drm_to_adev(dev);
1779
1780 if (enable)
1781 WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 1);
1782 else
1783 WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 0);
1784 }
1785
dce_v8_0_crtc_do_set_base(struct drm_crtc * crtc,struct drm_framebuffer * fb,int x,int y,int atomic)1786 static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
1787 struct drm_framebuffer *fb,
1788 int x, int y, int atomic)
1789 {
1790 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1791 struct drm_device *dev = crtc->dev;
1792 struct amdgpu_device *adev = drm_to_adev(dev);
1793 struct drm_framebuffer *target_fb;
1794 struct drm_gem_object *obj;
1795 struct amdgpu_bo *abo;
1796 uint64_t fb_location, tiling_flags;
1797 uint32_t fb_format, fb_pitch_pixels;
1798 u32 fb_swap = (GRPH_ENDIAN_NONE << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1799 u32 pipe_config;
1800 u32 viewport_w, viewport_h;
1801 int r;
1802 bool bypass_lut = false;
1803
1804 /* no fb bound */
1805 if (!atomic && !crtc->primary->fb) {
1806 DRM_DEBUG_KMS("No FB bound\n");
1807 return 0;
1808 }
1809
1810 if (atomic)
1811 target_fb = fb;
1812 else
1813 target_fb = crtc->primary->fb;
1814
1815 /* If atomic, assume fb object is pinned & idle & fenced and
1816 * just update base pointers
1817 */
1818 obj = target_fb->obj[0];
1819 abo = gem_to_amdgpu_bo(obj);
1820 r = amdgpu_bo_reserve(abo, false);
1821 if (unlikely(r != 0))
1822 return r;
1823
1824 if (!atomic) {
1825 abo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
1826 r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM);
1827 if (unlikely(r != 0)) {
1828 amdgpu_bo_unreserve(abo);
1829 return -EINVAL;
1830 }
1831 }
1832 fb_location = amdgpu_bo_gpu_offset(abo);
1833
1834 amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
1835 amdgpu_bo_unreserve(abo);
1836
1837 pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
1838
1839 switch (target_fb->format->format) {
1840 case DRM_FORMAT_C8:
1841 fb_format = ((GRPH_DEPTH_8BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1842 (GRPH_FORMAT_INDEXED << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1843 break;
1844 case DRM_FORMAT_XRGB4444:
1845 case DRM_FORMAT_ARGB4444:
1846 fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1847 (GRPH_FORMAT_ARGB4444 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1848 #ifdef __BIG_ENDIAN
1849 fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1850 #endif
1851 break;
1852 case DRM_FORMAT_XRGB1555:
1853 case DRM_FORMAT_ARGB1555:
1854 fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1855 (GRPH_FORMAT_ARGB1555 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1856 #ifdef __BIG_ENDIAN
1857 fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1858 #endif
1859 break;
1860 case DRM_FORMAT_BGRX5551:
1861 case DRM_FORMAT_BGRA5551:
1862 fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1863 (GRPH_FORMAT_BGRA5551 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1864 #ifdef __BIG_ENDIAN
1865 fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1866 #endif
1867 break;
1868 case DRM_FORMAT_RGB565:
1869 fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1870 (GRPH_FORMAT_ARGB565 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1871 #ifdef __BIG_ENDIAN
1872 fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1873 #endif
1874 break;
1875 case DRM_FORMAT_XRGB8888:
1876 case DRM_FORMAT_ARGB8888:
1877 fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1878 (GRPH_FORMAT_ARGB8888 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1879 #ifdef __BIG_ENDIAN
1880 fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1881 #endif
1882 break;
1883 case DRM_FORMAT_XRGB2101010:
1884 case DRM_FORMAT_ARGB2101010:
1885 fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1886 (GRPH_FORMAT_ARGB2101010 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1887 #ifdef __BIG_ENDIAN
1888 fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1889 #endif
1890 /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
1891 bypass_lut = true;
1892 break;
1893 case DRM_FORMAT_BGRX1010102:
1894 case DRM_FORMAT_BGRA1010102:
1895 fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1896 (GRPH_FORMAT_BGRA1010102 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1897 #ifdef __BIG_ENDIAN
1898 fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1899 #endif
1900 /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
1901 bypass_lut = true;
1902 break;
1903 case DRM_FORMAT_XBGR8888:
1904 case DRM_FORMAT_ABGR8888:
1905 fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1906 (GRPH_FORMAT_ARGB8888 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1907 fb_swap = ((GRPH_RED_SEL_B << GRPH_SWAP_CNTL__GRPH_RED_CROSSBAR__SHIFT) |
1908 (GRPH_BLUE_SEL_R << GRPH_SWAP_CNTL__GRPH_BLUE_CROSSBAR__SHIFT));
1909 #ifdef __BIG_ENDIAN
1910 fb_swap |= (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1911 #endif
1912 break;
1913 default:
1914 DRM_ERROR("Unsupported screen format %p4cc\n",
1915 &target_fb->format->format);
1916 return -EINVAL;
1917 }
1918
1919 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_2D_TILED_THIN1) {
1920 unsigned bankw, bankh, mtaspect, tile_split, num_banks;
1921
1922 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
1923 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
1924 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
1925 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
1926 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
1927
1928 fb_format |= (num_banks << GRPH_CONTROL__GRPH_NUM_BANKS__SHIFT);
1929 fb_format |= (GRPH_ARRAY_2D_TILED_THIN1 << GRPH_CONTROL__GRPH_ARRAY_MODE__SHIFT);
1930 fb_format |= (tile_split << GRPH_CONTROL__GRPH_TILE_SPLIT__SHIFT);
1931 fb_format |= (bankw << GRPH_CONTROL__GRPH_BANK_WIDTH__SHIFT);
1932 fb_format |= (bankh << GRPH_CONTROL__GRPH_BANK_HEIGHT__SHIFT);
1933 fb_format |= (mtaspect << GRPH_CONTROL__GRPH_MACRO_TILE_ASPECT__SHIFT);
1934 fb_format |= (DISPLAY_MICRO_TILING << GRPH_CONTROL__GRPH_MICRO_TILE_MODE__SHIFT);
1935 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1) {
1936 fb_format |= (GRPH_ARRAY_1D_TILED_THIN1 << GRPH_CONTROL__GRPH_ARRAY_MODE__SHIFT);
1937 }
1938
1939 fb_format |= (pipe_config << GRPH_CONTROL__GRPH_PIPE_CONFIG__SHIFT);
1940
1941 dce_v8_0_vga_enable(crtc, false);
1942
1943 /* Make sure surface address is updated at vertical blank rather than
1944 * horizontal blank
1945 */
1946 WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, 0);
1947
1948 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
1949 upper_32_bits(fb_location));
1950 WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
1951 upper_32_bits(fb_location));
1952 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
1953 (u32)fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK);
1954 WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
1955 (u32) fb_location & GRPH_SECONDARY_SURFACE_ADDRESS__GRPH_SECONDARY_SURFACE_ADDRESS_MASK);
1956 WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
1957 WREG32(mmGRPH_SWAP_CNTL + amdgpu_crtc->crtc_offset, fb_swap);
1958
1959 /*
1960 * The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT
1961 * for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to
1962 * retain the full precision throughout the pipeline.
1963 */
1964 WREG32_P(mmGRPH_LUT_10BIT_BYPASS_CONTROL + amdgpu_crtc->crtc_offset,
1965 (bypass_lut ? LUT_10BIT_BYPASS_EN : 0),
1966 ~LUT_10BIT_BYPASS_EN);
1967
1968 if (bypass_lut)
1969 DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n");
1970
1971 WREG32(mmGRPH_SURFACE_OFFSET_X + amdgpu_crtc->crtc_offset, 0);
1972 WREG32(mmGRPH_SURFACE_OFFSET_Y + amdgpu_crtc->crtc_offset, 0);
1973 WREG32(mmGRPH_X_START + amdgpu_crtc->crtc_offset, 0);
1974 WREG32(mmGRPH_Y_START + amdgpu_crtc->crtc_offset, 0);
1975 WREG32(mmGRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width);
1976 WREG32(mmGRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height);
1977
1978 fb_pitch_pixels = target_fb->pitches[0] / target_fb->format->cpp[0];
1979 WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels);
1980
1981 dce_v8_0_grph_enable(crtc, true);
1982
1983 WREG32(mmLB_DESKTOP_HEIGHT + amdgpu_crtc->crtc_offset,
1984 target_fb->height);
1985
1986 x &= ~3;
1987 y &= ~1;
1988 WREG32(mmVIEWPORT_START + amdgpu_crtc->crtc_offset,
1989 (x << 16) | y);
1990 viewport_w = crtc->mode.hdisplay;
1991 viewport_h = (crtc->mode.vdisplay + 1) & ~1;
1992 WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
1993 (viewport_w << 16) | viewport_h);
1994
1995 /* set pageflip to happen anywhere in vblank interval */
1996 WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
1997
1998 if (!atomic && fb && fb != crtc->primary->fb) {
1999 abo = gem_to_amdgpu_bo(fb->obj[0]);
2000 r = amdgpu_bo_reserve(abo, true);
2001 if (unlikely(r != 0))
2002 return r;
2003 amdgpu_bo_unpin(abo);
2004 amdgpu_bo_unreserve(abo);
2005 }
2006
2007 /* Bytes per pixel may have changed */
2008 dce_v8_0_bandwidth_update(adev);
2009
2010 return 0;
2011 }
2012
dce_v8_0_set_interleave(struct drm_crtc * crtc,struct drm_display_mode * mode)2013 static void dce_v8_0_set_interleave(struct drm_crtc *crtc,
2014 struct drm_display_mode *mode)
2015 {
2016 struct drm_device *dev = crtc->dev;
2017 struct amdgpu_device *adev = drm_to_adev(dev);
2018 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2019
2020 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
2021 WREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset,
2022 LB_DATA_FORMAT__INTERLEAVE_EN__SHIFT);
2023 else
2024 WREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset, 0);
2025 }
2026
dce_v8_0_crtc_load_lut(struct drm_crtc * crtc)2027 static void dce_v8_0_crtc_load_lut(struct drm_crtc *crtc)
2028 {
2029 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2030 struct drm_device *dev = crtc->dev;
2031 struct amdgpu_device *adev = drm_to_adev(dev);
2032 u16 *r, *g, *b;
2033 int i;
2034
2035 DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id);
2036
2037 WREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
2038 ((INPUT_CSC_BYPASS << INPUT_CSC_CONTROL__INPUT_CSC_GRPH_MODE__SHIFT) |
2039 (INPUT_CSC_BYPASS << INPUT_CSC_CONTROL__INPUT_CSC_OVL_MODE__SHIFT)));
2040 WREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset,
2041 PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_BYPASS_MASK);
2042 WREG32(mmPRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset,
2043 PRESCALE_OVL_CONTROL__OVL_PRESCALE_BYPASS_MASK);
2044 WREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset,
2045 ((INPUT_GAMMA_USE_LUT << INPUT_GAMMA_CONTROL__GRPH_INPUT_GAMMA_MODE__SHIFT) |
2046 (INPUT_GAMMA_USE_LUT << INPUT_GAMMA_CONTROL__OVL_INPUT_GAMMA_MODE__SHIFT)));
2047
2048 WREG32(mmDC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0);
2049
2050 WREG32(mmDC_LUT_BLACK_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0);
2051 WREG32(mmDC_LUT_BLACK_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0);
2052 WREG32(mmDC_LUT_BLACK_OFFSET_RED + amdgpu_crtc->crtc_offset, 0);
2053
2054 WREG32(mmDC_LUT_WHITE_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0xffff);
2055 WREG32(mmDC_LUT_WHITE_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0xffff);
2056 WREG32(mmDC_LUT_WHITE_OFFSET_RED + amdgpu_crtc->crtc_offset, 0xffff);
2057
2058 WREG32(mmDC_LUT_RW_MODE + amdgpu_crtc->crtc_offset, 0);
2059 WREG32(mmDC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007);
2060
2061 WREG32(mmDC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0);
2062 r = crtc->gamma_store;
2063 g = r + crtc->gamma_size;
2064 b = g + crtc->gamma_size;
2065 for (i = 0; i < 256; i++) {
2066 WREG32(mmDC_LUT_30_COLOR + amdgpu_crtc->crtc_offset,
2067 ((*r++ & 0xffc0) << 14) |
2068 ((*g++ & 0xffc0) << 4) |
2069 (*b++ >> 6));
2070 }
2071
2072 WREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
2073 ((DEGAMMA_BYPASS << DEGAMMA_CONTROL__GRPH_DEGAMMA_MODE__SHIFT) |
2074 (DEGAMMA_BYPASS << DEGAMMA_CONTROL__OVL_DEGAMMA_MODE__SHIFT) |
2075 (DEGAMMA_BYPASS << DEGAMMA_CONTROL__CURSOR_DEGAMMA_MODE__SHIFT)));
2076 WREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset,
2077 ((GAMUT_REMAP_BYPASS << GAMUT_REMAP_CONTROL__GRPH_GAMUT_REMAP_MODE__SHIFT) |
2078 (GAMUT_REMAP_BYPASS << GAMUT_REMAP_CONTROL__OVL_GAMUT_REMAP_MODE__SHIFT)));
2079 WREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
2080 ((REGAMMA_BYPASS << REGAMMA_CONTROL__GRPH_REGAMMA_MODE__SHIFT) |
2081 (REGAMMA_BYPASS << REGAMMA_CONTROL__OVL_REGAMMA_MODE__SHIFT)));
2082 WREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
2083 ((OUTPUT_CSC_BYPASS << OUTPUT_CSC_CONTROL__OUTPUT_CSC_GRPH_MODE__SHIFT) |
2084 (OUTPUT_CSC_BYPASS << OUTPUT_CSC_CONTROL__OUTPUT_CSC_OVL_MODE__SHIFT)));
2085 /* XXX match this to the depth of the crtc fmt block, move to modeset? */
2086 WREG32(0x1a50 + amdgpu_crtc->crtc_offset, 0);
2087 /* XXX this only needs to be programmed once per crtc at startup,
2088 * not sure where the best place for it is
2089 */
2090 WREG32(mmALPHA_CONTROL + amdgpu_crtc->crtc_offset,
2091 ALPHA_CONTROL__CURSOR_ALPHA_BLND_ENA_MASK);
2092 }
2093
dce_v8_0_pick_dig_encoder(struct drm_encoder * encoder)2094 static int dce_v8_0_pick_dig_encoder(struct drm_encoder *encoder)
2095 {
2096 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
2097 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
2098
2099 switch (amdgpu_encoder->encoder_id) {
2100 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
2101 if (dig->linkb)
2102 return 1;
2103 else
2104 return 0;
2105 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
2106 if (dig->linkb)
2107 return 3;
2108 else
2109 return 2;
2110 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
2111 if (dig->linkb)
2112 return 5;
2113 else
2114 return 4;
2115 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
2116 return 6;
2117 default:
2118 DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id);
2119 return 0;
2120 }
2121 }
2122
2123 /**
2124 * dce_v8_0_pick_pll - Allocate a PPLL for use by the crtc.
2125 *
2126 * @crtc: drm crtc
2127 *
2128 * Returns the PPLL (Pixel PLL) to be used by the crtc. For DP monitors
2129 * a single PPLL can be used for all DP crtcs/encoders. For non-DP
2130 * monitors a dedicated PPLL must be used. If a particular board has
2131 * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming
2132 * as there is no need to program the PLL itself. If we are not able to
2133 * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to
2134 * avoid messing up an existing monitor.
2135 *
2136 * Asic specific PLL information
2137 *
2138 * DCE 8.x
2139 * KB/KV
2140 * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP)
2141 * CI
2142 * - PPLL0, PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
2143 *
2144 */
dce_v8_0_pick_pll(struct drm_crtc * crtc)2145 static u32 dce_v8_0_pick_pll(struct drm_crtc *crtc)
2146 {
2147 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2148 struct drm_device *dev = crtc->dev;
2149 struct amdgpu_device *adev = drm_to_adev(dev);
2150 u32 pll_in_use;
2151 int pll;
2152
2153 if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) {
2154 if (adev->clock.dp_extclk)
2155 /* skip PPLL programming if using ext clock */
2156 return ATOM_PPLL_INVALID;
2157 else {
2158 /* use the same PPLL for all DP monitors */
2159 pll = amdgpu_pll_get_shared_dp_ppll(crtc);
2160 if (pll != ATOM_PPLL_INVALID)
2161 return pll;
2162 }
2163 } else {
2164 /* use the same PPLL for all monitors with the same clock */
2165 pll = amdgpu_pll_get_shared_nondp_ppll(crtc);
2166 if (pll != ATOM_PPLL_INVALID)
2167 return pll;
2168 }
2169 /* otherwise, pick one of the plls */
2170 if ((adev->asic_type == CHIP_KABINI) ||
2171 (adev->asic_type == CHIP_MULLINS)) {
2172 /* KB/ML has PPLL1 and PPLL2 */
2173 pll_in_use = amdgpu_pll_get_use_mask(crtc);
2174 if (!(pll_in_use & (1 << ATOM_PPLL2)))
2175 return ATOM_PPLL2;
2176 if (!(pll_in_use & (1 << ATOM_PPLL1)))
2177 return ATOM_PPLL1;
2178 DRM_ERROR("unable to allocate a PPLL\n");
2179 return ATOM_PPLL_INVALID;
2180 } else {
2181 /* CI/KV has PPLL0, PPLL1, and PPLL2 */
2182 pll_in_use = amdgpu_pll_get_use_mask(crtc);
2183 if (!(pll_in_use & (1 << ATOM_PPLL2)))
2184 return ATOM_PPLL2;
2185 if (!(pll_in_use & (1 << ATOM_PPLL1)))
2186 return ATOM_PPLL1;
2187 if (!(pll_in_use & (1 << ATOM_PPLL0)))
2188 return ATOM_PPLL0;
2189 DRM_ERROR("unable to allocate a PPLL\n");
2190 return ATOM_PPLL_INVALID;
2191 }
2192 return ATOM_PPLL_INVALID;
2193 }
2194
dce_v8_0_lock_cursor(struct drm_crtc * crtc,bool lock)2195 static void dce_v8_0_lock_cursor(struct drm_crtc *crtc, bool lock)
2196 {
2197 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
2198 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2199 uint32_t cur_lock;
2200
2201 cur_lock = RREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset);
2202 if (lock)
2203 cur_lock |= CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK;
2204 else
2205 cur_lock &= ~CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK;
2206 WREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset, cur_lock);
2207 }
2208
dce_v8_0_hide_cursor(struct drm_crtc * crtc)2209 static void dce_v8_0_hide_cursor(struct drm_crtc *crtc)
2210 {
2211 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2212 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
2213
2214 WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
2215 (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
2216 (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
2217 }
2218
dce_v8_0_show_cursor(struct drm_crtc * crtc)2219 static void dce_v8_0_show_cursor(struct drm_crtc *crtc)
2220 {
2221 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2222 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
2223
2224 WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
2225 upper_32_bits(amdgpu_crtc->cursor_addr));
2226 WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
2227 lower_32_bits(amdgpu_crtc->cursor_addr));
2228
2229 WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
2230 CUR_CONTROL__CURSOR_EN_MASK |
2231 (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
2232 (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
2233 }
2234
dce_v8_0_cursor_move_locked(struct drm_crtc * crtc,int x,int y)2235 static int dce_v8_0_cursor_move_locked(struct drm_crtc *crtc,
2236 int x, int y)
2237 {
2238 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2239 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
2240 int xorigin = 0, yorigin = 0;
2241
2242 amdgpu_crtc->cursor_x = x;
2243 amdgpu_crtc->cursor_y = y;
2244
2245 /* avivo cursor are offset into the total surface */
2246 x += crtc->x;
2247 y += crtc->y;
2248 DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
2249
2250 if (x < 0) {
2251 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
2252 x = 0;
2253 }
2254 if (y < 0) {
2255 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
2256 y = 0;
2257 }
2258
2259 WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
2260 WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
2261 WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
2262 ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
2263
2264 return 0;
2265 }
2266
dce_v8_0_crtc_cursor_move(struct drm_crtc * crtc,int x,int y)2267 static int dce_v8_0_crtc_cursor_move(struct drm_crtc *crtc,
2268 int x, int y)
2269 {
2270 int ret;
2271
2272 dce_v8_0_lock_cursor(crtc, true);
2273 ret = dce_v8_0_cursor_move_locked(crtc, x, y);
2274 dce_v8_0_lock_cursor(crtc, false);
2275
2276 return ret;
2277 }
2278
dce_v8_0_crtc_cursor_set2(struct drm_crtc * crtc,struct drm_file * file_priv,uint32_t handle,uint32_t width,uint32_t height,int32_t hot_x,int32_t hot_y)2279 static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
2280 struct drm_file *file_priv,
2281 uint32_t handle,
2282 uint32_t width,
2283 uint32_t height,
2284 int32_t hot_x,
2285 int32_t hot_y)
2286 {
2287 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2288 struct drm_gem_object *obj;
2289 struct amdgpu_bo *aobj;
2290 int ret;
2291
2292 if (!handle) {
2293 /* turn off cursor */
2294 dce_v8_0_hide_cursor(crtc);
2295 obj = NULL;
2296 goto unpin;
2297 }
2298
2299 if ((width > amdgpu_crtc->max_cursor_width) ||
2300 (height > amdgpu_crtc->max_cursor_height)) {
2301 DRM_ERROR("bad cursor width or height %d x %d\n", width, height);
2302 return -EINVAL;
2303 }
2304
2305 obj = drm_gem_object_lookup(file_priv, handle);
2306 if (!obj) {
2307 DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id);
2308 return -ENOENT;
2309 }
2310
2311 aobj = gem_to_amdgpu_bo(obj);
2312 ret = amdgpu_bo_reserve(aobj, false);
2313 if (ret != 0) {
2314 drm_gem_object_put(obj);
2315 return ret;
2316 }
2317
2318 aobj->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
2319 ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
2320 amdgpu_bo_unreserve(aobj);
2321 if (ret) {
2322 DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
2323 drm_gem_object_put(obj);
2324 return ret;
2325 }
2326 amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
2327
2328 dce_v8_0_lock_cursor(crtc, true);
2329
2330 if (width != amdgpu_crtc->cursor_width ||
2331 height != amdgpu_crtc->cursor_height ||
2332 hot_x != amdgpu_crtc->cursor_hot_x ||
2333 hot_y != amdgpu_crtc->cursor_hot_y) {
2334 int x, y;
2335
2336 x = amdgpu_crtc->cursor_x + amdgpu_crtc->cursor_hot_x - hot_x;
2337 y = amdgpu_crtc->cursor_y + amdgpu_crtc->cursor_hot_y - hot_y;
2338
2339 dce_v8_0_cursor_move_locked(crtc, x, y);
2340
2341 amdgpu_crtc->cursor_width = width;
2342 amdgpu_crtc->cursor_height = height;
2343 amdgpu_crtc->cursor_hot_x = hot_x;
2344 amdgpu_crtc->cursor_hot_y = hot_y;
2345 }
2346
2347 dce_v8_0_show_cursor(crtc);
2348 dce_v8_0_lock_cursor(crtc, false);
2349
2350 unpin:
2351 if (amdgpu_crtc->cursor_bo) {
2352 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2353 ret = amdgpu_bo_reserve(aobj, true);
2354 if (likely(ret == 0)) {
2355 amdgpu_bo_unpin(aobj);
2356 amdgpu_bo_unreserve(aobj);
2357 }
2358 drm_gem_object_put(amdgpu_crtc->cursor_bo);
2359 }
2360
2361 amdgpu_crtc->cursor_bo = obj;
2362 return 0;
2363 }
2364
dce_v8_0_cursor_reset(struct drm_crtc * crtc)2365 static void dce_v8_0_cursor_reset(struct drm_crtc *crtc)
2366 {
2367 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2368
2369 if (amdgpu_crtc->cursor_bo) {
2370 dce_v8_0_lock_cursor(crtc, true);
2371
2372 dce_v8_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
2373 amdgpu_crtc->cursor_y);
2374
2375 dce_v8_0_show_cursor(crtc);
2376
2377 dce_v8_0_lock_cursor(crtc, false);
2378 }
2379 }
2380
dce_v8_0_crtc_gamma_set(struct drm_crtc * crtc,u16 * red,u16 * green,u16 * blue,uint32_t size,struct drm_modeset_acquire_ctx * ctx)2381 static int dce_v8_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
2382 u16 *blue, uint32_t size,
2383 struct drm_modeset_acquire_ctx *ctx)
2384 {
2385 dce_v8_0_crtc_load_lut(crtc);
2386
2387 return 0;
2388 }
2389
dce_v8_0_crtc_destroy(struct drm_crtc * crtc)2390 static void dce_v8_0_crtc_destroy(struct drm_crtc *crtc)
2391 {
2392 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2393
2394 drm_crtc_cleanup(crtc);
2395 kfree(amdgpu_crtc);
2396 }
2397
2398 static const struct drm_crtc_funcs dce_v8_0_crtc_funcs = {
2399 .cursor_set2 = dce_v8_0_crtc_cursor_set2,
2400 .cursor_move = dce_v8_0_crtc_cursor_move,
2401 .gamma_set = dce_v8_0_crtc_gamma_set,
2402 .set_config = amdgpu_display_crtc_set_config,
2403 .destroy = dce_v8_0_crtc_destroy,
2404 .page_flip_target = amdgpu_display_crtc_page_flip_target,
2405 .get_vblank_counter = amdgpu_get_vblank_counter_kms,
2406 .enable_vblank = amdgpu_enable_vblank_kms,
2407 .disable_vblank = amdgpu_disable_vblank_kms,
2408 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
2409 };
2410
dce_v8_0_crtc_dpms(struct drm_crtc * crtc,int mode)2411 static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode)
2412 {
2413 struct drm_device *dev = crtc->dev;
2414 struct amdgpu_device *adev = drm_to_adev(dev);
2415 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2416 unsigned type;
2417
2418 switch (mode) {
2419 case DRM_MODE_DPMS_ON:
2420 amdgpu_crtc->enabled = true;
2421 amdgpu_atombios_crtc_enable(crtc, ATOM_ENABLE);
2422 dce_v8_0_vga_enable(crtc, true);
2423 amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
2424 dce_v8_0_vga_enable(crtc, false);
2425 /* Make sure VBLANK and PFLIP interrupts are still enabled */
2426 type = amdgpu_display_crtc_idx_to_irq_type(adev,
2427 amdgpu_crtc->crtc_id);
2428 amdgpu_irq_update(adev, &adev->crtc_irq, type);
2429 amdgpu_irq_update(adev, &adev->pageflip_irq, type);
2430 drm_crtc_vblank_on(crtc);
2431 dce_v8_0_crtc_load_lut(crtc);
2432 break;
2433 case DRM_MODE_DPMS_STANDBY:
2434 case DRM_MODE_DPMS_SUSPEND:
2435 case DRM_MODE_DPMS_OFF:
2436 drm_crtc_vblank_off(crtc);
2437 if (amdgpu_crtc->enabled) {
2438 dce_v8_0_vga_enable(crtc, true);
2439 amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
2440 dce_v8_0_vga_enable(crtc, false);
2441 }
2442 amdgpu_atombios_crtc_enable(crtc, ATOM_DISABLE);
2443 amdgpu_crtc->enabled = false;
2444 break;
2445 }
2446 /* adjust pm to dpms */
2447 amdgpu_dpm_compute_clocks(adev);
2448 }
2449
dce_v8_0_crtc_prepare(struct drm_crtc * crtc)2450 static void dce_v8_0_crtc_prepare(struct drm_crtc *crtc)
2451 {
2452 /* disable crtc pair power gating before programming */
2453 amdgpu_atombios_crtc_powergate(crtc, ATOM_DISABLE);
2454 amdgpu_atombios_crtc_lock(crtc, ATOM_ENABLE);
2455 dce_v8_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2456 }
2457
dce_v8_0_crtc_commit(struct drm_crtc * crtc)2458 static void dce_v8_0_crtc_commit(struct drm_crtc *crtc)
2459 {
2460 dce_v8_0_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
2461 amdgpu_atombios_crtc_lock(crtc, ATOM_DISABLE);
2462 }
2463
dce_v8_0_crtc_disable(struct drm_crtc * crtc)2464 static void dce_v8_0_crtc_disable(struct drm_crtc *crtc)
2465 {
2466 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2467 struct drm_device *dev = crtc->dev;
2468 struct amdgpu_device *adev = drm_to_adev(dev);
2469 struct amdgpu_atom_ss ss;
2470 int i;
2471
2472 dce_v8_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2473 if (crtc->primary->fb) {
2474 int r;
2475 struct amdgpu_bo *abo;
2476
2477 abo = gem_to_amdgpu_bo(crtc->primary->fb->obj[0]);
2478 r = amdgpu_bo_reserve(abo, true);
2479 if (unlikely(r))
2480 DRM_ERROR("failed to reserve abo before unpin\n");
2481 else {
2482 amdgpu_bo_unpin(abo);
2483 amdgpu_bo_unreserve(abo);
2484 }
2485 }
2486 /* disable the GRPH */
2487 dce_v8_0_grph_enable(crtc, false);
2488
2489 amdgpu_atombios_crtc_powergate(crtc, ATOM_ENABLE);
2490
2491 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2492 if (adev->mode_info.crtcs[i] &&
2493 adev->mode_info.crtcs[i]->enabled &&
2494 i != amdgpu_crtc->crtc_id &&
2495 amdgpu_crtc->pll_id == adev->mode_info.crtcs[i]->pll_id) {
2496 /* one other crtc is using this pll don't turn
2497 * off the pll
2498 */
2499 goto done;
2500 }
2501 }
2502
2503 switch (amdgpu_crtc->pll_id) {
2504 case ATOM_PPLL1:
2505 case ATOM_PPLL2:
2506 /* disable the ppll */
2507 amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
2508 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
2509 break;
2510 case ATOM_PPLL0:
2511 /* disable the ppll */
2512 if ((adev->asic_type == CHIP_KAVERI) ||
2513 (adev->asic_type == CHIP_BONAIRE) ||
2514 (adev->asic_type == CHIP_HAWAII))
2515 amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
2516 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
2517 break;
2518 default:
2519 break;
2520 }
2521 done:
2522 amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2523 amdgpu_crtc->adjusted_clock = 0;
2524 amdgpu_crtc->encoder = NULL;
2525 amdgpu_crtc->connector = NULL;
2526 }
2527
dce_v8_0_crtc_mode_set(struct drm_crtc * crtc,struct drm_display_mode * mode,struct drm_display_mode * adjusted_mode,int x,int y,struct drm_framebuffer * old_fb)2528 static int dce_v8_0_crtc_mode_set(struct drm_crtc *crtc,
2529 struct drm_display_mode *mode,
2530 struct drm_display_mode *adjusted_mode,
2531 int x, int y, struct drm_framebuffer *old_fb)
2532 {
2533 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2534
2535 if (!amdgpu_crtc->adjusted_clock)
2536 return -EINVAL;
2537
2538 amdgpu_atombios_crtc_set_pll(crtc, adjusted_mode);
2539 amdgpu_atombios_crtc_set_dtd_timing(crtc, adjusted_mode);
2540 dce_v8_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2541 amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode);
2542 amdgpu_atombios_crtc_scaler_setup(crtc);
2543 dce_v8_0_cursor_reset(crtc);
2544 /* update the hw version fpr dpm */
2545 amdgpu_crtc->hw_mode = *adjusted_mode;
2546
2547 return 0;
2548 }
2549
dce_v8_0_crtc_mode_fixup(struct drm_crtc * crtc,const struct drm_display_mode * mode,struct drm_display_mode * adjusted_mode)2550 static bool dce_v8_0_crtc_mode_fixup(struct drm_crtc *crtc,
2551 const struct drm_display_mode *mode,
2552 struct drm_display_mode *adjusted_mode)
2553 {
2554 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2555 struct drm_device *dev = crtc->dev;
2556 struct drm_encoder *encoder;
2557
2558 /* assign the encoder to the amdgpu crtc to avoid repeated lookups later */
2559 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
2560 if (encoder->crtc == crtc) {
2561 amdgpu_crtc->encoder = encoder;
2562 amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder);
2563 break;
2564 }
2565 }
2566 if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) {
2567 amdgpu_crtc->encoder = NULL;
2568 amdgpu_crtc->connector = NULL;
2569 return false;
2570 }
2571 if (!amdgpu_display_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
2572 return false;
2573 if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode))
2574 return false;
2575 /* pick pll */
2576 amdgpu_crtc->pll_id = dce_v8_0_pick_pll(crtc);
2577 /* if we can't get a PPLL for a non-DP encoder, fail */
2578 if ((amdgpu_crtc->pll_id == ATOM_PPLL_INVALID) &&
2579 !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder)))
2580 return false;
2581
2582 return true;
2583 }
2584
dce_v8_0_crtc_set_base(struct drm_crtc * crtc,int x,int y,struct drm_framebuffer * old_fb)2585 static int dce_v8_0_crtc_set_base(struct drm_crtc *crtc, int x, int y,
2586 struct drm_framebuffer *old_fb)
2587 {
2588 return dce_v8_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2589 }
2590
dce_v8_0_crtc_set_base_atomic(struct drm_crtc * crtc,struct drm_framebuffer * fb,int x,int y,enum mode_set_atomic state)2591 static int dce_v8_0_crtc_set_base_atomic(struct drm_crtc *crtc,
2592 struct drm_framebuffer *fb,
2593 int x, int y, enum mode_set_atomic state)
2594 {
2595 return dce_v8_0_crtc_do_set_base(crtc, fb, x, y, 1);
2596 }
2597
2598 static const struct drm_crtc_helper_funcs dce_v8_0_crtc_helper_funcs = {
2599 .dpms = dce_v8_0_crtc_dpms,
2600 .mode_fixup = dce_v8_0_crtc_mode_fixup,
2601 .mode_set = dce_v8_0_crtc_mode_set,
2602 .mode_set_base = dce_v8_0_crtc_set_base,
2603 .mode_set_base_atomic = dce_v8_0_crtc_set_base_atomic,
2604 .prepare = dce_v8_0_crtc_prepare,
2605 .commit = dce_v8_0_crtc_commit,
2606 .disable = dce_v8_0_crtc_disable,
2607 .get_scanout_position = amdgpu_crtc_get_scanout_position,
2608 };
2609
dce_v8_0_panic_flush(struct drm_plane * plane)2610 static void dce_v8_0_panic_flush(struct drm_plane *plane)
2611 {
2612 struct drm_framebuffer *fb;
2613 struct amdgpu_crtc *amdgpu_crtc;
2614 struct amdgpu_device *adev;
2615 uint32_t fb_format;
2616
2617 if (!plane->fb)
2618 return;
2619
2620 fb = plane->fb;
2621 amdgpu_crtc = to_amdgpu_crtc(plane->crtc);
2622 adev = drm_to_adev(fb->dev);
2623
2624 /* Disable DC tiling */
2625 fb_format = RREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset);
2626 fb_format &= ~GRPH_CONTROL__GRPH_ARRAY_MODE_MASK;
2627 WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
2628 }
2629
2630 static const struct drm_plane_helper_funcs dce_v8_0_drm_primary_plane_helper_funcs = {
2631 .get_scanout_buffer = amdgpu_display_get_scanout_buffer,
2632 .panic_flush = dce_v8_0_panic_flush,
2633 };
2634
dce_v8_0_crtc_init(struct amdgpu_device * adev,int index)2635 static int dce_v8_0_crtc_init(struct amdgpu_device *adev, int index)
2636 {
2637 struct amdgpu_crtc *amdgpu_crtc;
2638
2639 amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) +
2640 (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
2641 if (amdgpu_crtc == NULL)
2642 return -ENOMEM;
2643
2644 drm_crtc_init(adev_to_drm(adev), &amdgpu_crtc->base, &dce_v8_0_crtc_funcs);
2645
2646 drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
2647 amdgpu_crtc->crtc_id = index;
2648 adev->mode_info.crtcs[index] = amdgpu_crtc;
2649
2650 amdgpu_crtc->max_cursor_width = CIK_CURSOR_WIDTH;
2651 amdgpu_crtc->max_cursor_height = CIK_CURSOR_HEIGHT;
2652 adev_to_drm(adev)->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
2653 adev_to_drm(adev)->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
2654
2655 amdgpu_crtc->crtc_offset = crtc_offsets[amdgpu_crtc->crtc_id];
2656
2657 amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2658 amdgpu_crtc->adjusted_clock = 0;
2659 amdgpu_crtc->encoder = NULL;
2660 amdgpu_crtc->connector = NULL;
2661 drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v8_0_crtc_helper_funcs);
2662 drm_plane_helper_add(amdgpu_crtc->base.primary, &dce_v8_0_drm_primary_plane_helper_funcs);
2663
2664 return 0;
2665 }
2666
dce_v8_0_early_init(struct amdgpu_ip_block * ip_block)2667 static int dce_v8_0_early_init(struct amdgpu_ip_block *ip_block)
2668 {
2669 struct amdgpu_device *adev = ip_block->adev;
2670
2671 adev->audio_endpt_rreg = &dce_v8_0_audio_endpt_rreg;
2672 adev->audio_endpt_wreg = &dce_v8_0_audio_endpt_wreg;
2673
2674 dce_v8_0_set_display_funcs(adev);
2675
2676 adev->mode_info.num_crtc = dce_v8_0_get_num_crtc(adev);
2677
2678 switch (adev->asic_type) {
2679 case CHIP_BONAIRE:
2680 case CHIP_HAWAII:
2681 adev->mode_info.num_hpd = 6;
2682 adev->mode_info.num_dig = 6;
2683 break;
2684 case CHIP_KAVERI:
2685 adev->mode_info.num_hpd = 6;
2686 adev->mode_info.num_dig = 7;
2687 break;
2688 case CHIP_KABINI:
2689 case CHIP_MULLINS:
2690 adev->mode_info.num_hpd = 6;
2691 adev->mode_info.num_dig = 6; /* ? */
2692 break;
2693 default:
2694 /* FIXME: not supported yet */
2695 return -EINVAL;
2696 }
2697
2698 dce_v8_0_set_irq_funcs(adev);
2699
2700 return 0;
2701 }
2702
dce_v8_0_sw_init(struct amdgpu_ip_block * ip_block)2703 static int dce_v8_0_sw_init(struct amdgpu_ip_block *ip_block)
2704 {
2705 int r, i;
2706 struct amdgpu_device *adev = ip_block->adev;
2707
2708 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2709 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + 1, &adev->crtc_irq);
2710 if (r)
2711 return r;
2712 }
2713
2714 for (i = 8; i < 20; i += 2) {
2715 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i, &adev->pageflip_irq);
2716 if (r)
2717 return r;
2718 }
2719
2720 /* HPD hotplug */
2721 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 42, &adev->hpd_irq);
2722 if (r)
2723 return r;
2724
2725 adev_to_drm(adev)->mode_config.funcs = &amdgpu_mode_funcs;
2726
2727 adev_to_drm(adev)->mode_config.async_page_flip = true;
2728
2729 adev_to_drm(adev)->mode_config.max_width = 16384;
2730 adev_to_drm(adev)->mode_config.max_height = 16384;
2731
2732 adev_to_drm(adev)->mode_config.preferred_depth = 24;
2733 if (adev->asic_type == CHIP_HAWAII)
2734 /* disable prefer shadow for now due to hibernation issues */
2735 adev_to_drm(adev)->mode_config.prefer_shadow = 0;
2736 else
2737 adev_to_drm(adev)->mode_config.prefer_shadow = 1;
2738
2739 adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true;
2740
2741 r = amdgpu_display_modeset_create_props(adev);
2742 if (r)
2743 return r;
2744
2745 adev_to_drm(adev)->mode_config.max_width = 16384;
2746 adev_to_drm(adev)->mode_config.max_height = 16384;
2747
2748 /* allocate crtcs */
2749 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2750 r = dce_v8_0_crtc_init(adev, i);
2751 if (r)
2752 return r;
2753 }
2754
2755 if (amdgpu_atombios_get_connector_info_from_object_table(adev))
2756 amdgpu_display_print_display_setup(adev_to_drm(adev));
2757 else
2758 return -EINVAL;
2759
2760 /* setup afmt */
2761 r = dce_v8_0_afmt_init(adev);
2762 if (r)
2763 return r;
2764
2765 r = dce_v8_0_audio_init(adev);
2766 if (r)
2767 return r;
2768
2769 /* Disable vblank IRQs aggressively for power-saving */
2770 /* XXX: can this be enabled for DC? */
2771 adev_to_drm(adev)->vblank_disable_immediate = true;
2772
2773 r = drm_vblank_init(adev_to_drm(adev), adev->mode_info.num_crtc);
2774 if (r)
2775 return r;
2776
2777 /* Pre-DCE11 */
2778 INIT_DELAYED_WORK(&adev->hotplug_work,
2779 amdgpu_display_hotplug_work_func);
2780
2781 drm_kms_helper_poll_init(adev_to_drm(adev));
2782
2783 adev->mode_info.mode_config_initialized = true;
2784 return 0;
2785 }
2786
dce_v8_0_sw_fini(struct amdgpu_ip_block * ip_block)2787 static int dce_v8_0_sw_fini(struct amdgpu_ip_block *ip_block)
2788 {
2789 struct amdgpu_device *adev = ip_block->adev;
2790
2791 drm_edid_free(adev->mode_info.bios_hardcoded_edid);
2792
2793 drm_kms_helper_poll_fini(adev_to_drm(adev));
2794
2795 dce_v8_0_audio_fini(adev);
2796
2797 dce_v8_0_afmt_fini(adev);
2798
2799 drm_mode_config_cleanup(adev_to_drm(adev));
2800 adev->mode_info.mode_config_initialized = false;
2801
2802 return 0;
2803 }
2804
dce_v8_0_hw_init(struct amdgpu_ip_block * ip_block)2805 static int dce_v8_0_hw_init(struct amdgpu_ip_block *ip_block)
2806 {
2807 int i;
2808 struct amdgpu_device *adev = ip_block->adev;
2809
2810 /* disable vga render */
2811 dce_v8_0_set_vga_render_state(adev, false);
2812 /* init dig PHYs, disp eng pll */
2813 amdgpu_atombios_encoder_init_dig(adev);
2814 amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
2815
2816 /* initialize hpd */
2817 dce_v8_0_hpd_init(adev);
2818
2819 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
2820 dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
2821 }
2822
2823 dce_v8_0_pageflip_interrupt_init(adev);
2824
2825 return 0;
2826 }
2827
dce_v8_0_hw_fini(struct amdgpu_ip_block * ip_block)2828 static int dce_v8_0_hw_fini(struct amdgpu_ip_block *ip_block)
2829 {
2830 int i;
2831 struct amdgpu_device *adev = ip_block->adev;
2832
2833 dce_v8_0_hpd_fini(adev);
2834
2835 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
2836 dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
2837 }
2838
2839 dce_v8_0_pageflip_interrupt_fini(adev);
2840
2841 flush_delayed_work(&adev->hotplug_work);
2842
2843 return 0;
2844 }
2845
dce_v8_0_suspend(struct amdgpu_ip_block * ip_block)2846 static int dce_v8_0_suspend(struct amdgpu_ip_block *ip_block)
2847 {
2848 struct amdgpu_device *adev = ip_block->adev;
2849 int r;
2850
2851 r = amdgpu_display_suspend_helper(adev);
2852 if (r)
2853 return r;
2854
2855 adev->mode_info.bl_level =
2856 amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
2857
2858 return dce_v8_0_hw_fini(ip_block);
2859 }
2860
dce_v8_0_resume(struct amdgpu_ip_block * ip_block)2861 static int dce_v8_0_resume(struct amdgpu_ip_block *ip_block)
2862 {
2863 struct amdgpu_device *adev = ip_block->adev;
2864 int ret;
2865
2866 amdgpu_atombios_encoder_set_backlight_level_to_reg(adev,
2867 adev->mode_info.bl_level);
2868
2869 ret = dce_v8_0_hw_init(ip_block);
2870
2871 /* turn on the BL */
2872 if (adev->mode_info.bl_encoder) {
2873 u8 bl_level = amdgpu_display_backlight_get_level(adev,
2874 adev->mode_info.bl_encoder);
2875 amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
2876 bl_level);
2877 }
2878 if (ret)
2879 return ret;
2880
2881 return amdgpu_display_resume_helper(adev);
2882 }
2883
dce_v8_0_is_idle(struct amdgpu_ip_block * ip_block)2884 static bool dce_v8_0_is_idle(struct amdgpu_ip_block *ip_block)
2885 {
2886 return true;
2887 }
2888
dce_v8_0_soft_reset(struct amdgpu_ip_block * ip_block)2889 static int dce_v8_0_soft_reset(struct amdgpu_ip_block *ip_block)
2890 {
2891 u32 srbm_soft_reset = 0, tmp;
2892 struct amdgpu_device *adev = ip_block->adev;
2893
2894 if (dce_v8_0_is_display_hung(adev))
2895 srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK;
2896
2897 if (srbm_soft_reset) {
2898 tmp = RREG32(mmSRBM_SOFT_RESET);
2899 tmp |= srbm_soft_reset;
2900 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
2901 WREG32(mmSRBM_SOFT_RESET, tmp);
2902 tmp = RREG32(mmSRBM_SOFT_RESET);
2903
2904 udelay(50);
2905
2906 tmp &= ~srbm_soft_reset;
2907 WREG32(mmSRBM_SOFT_RESET, tmp);
2908 tmp = RREG32(mmSRBM_SOFT_RESET);
2909
2910 /* Wait a little for things to settle down */
2911 udelay(50);
2912 }
2913 return 0;
2914 }
2915
dce_v8_0_set_crtc_vblank_interrupt_state(struct amdgpu_device * adev,int crtc,enum amdgpu_interrupt_state state)2916 static void dce_v8_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
2917 int crtc,
2918 enum amdgpu_interrupt_state state)
2919 {
2920 u32 reg_block, lb_interrupt_mask;
2921
2922 if (crtc >= adev->mode_info.num_crtc) {
2923 DRM_DEBUG("invalid crtc %d\n", crtc);
2924 return;
2925 }
2926
2927 switch (crtc) {
2928 case 0:
2929 reg_block = CRTC0_REGISTER_OFFSET;
2930 break;
2931 case 1:
2932 reg_block = CRTC1_REGISTER_OFFSET;
2933 break;
2934 case 2:
2935 reg_block = CRTC2_REGISTER_OFFSET;
2936 break;
2937 case 3:
2938 reg_block = CRTC3_REGISTER_OFFSET;
2939 break;
2940 case 4:
2941 reg_block = CRTC4_REGISTER_OFFSET;
2942 break;
2943 case 5:
2944 reg_block = CRTC5_REGISTER_OFFSET;
2945 break;
2946 default:
2947 DRM_DEBUG("invalid crtc %d\n", crtc);
2948 return;
2949 }
2950
2951 switch (state) {
2952 case AMDGPU_IRQ_STATE_DISABLE:
2953 lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + reg_block);
2954 lb_interrupt_mask &= ~LB_INTERRUPT_MASK__VBLANK_INTERRUPT_MASK_MASK;
2955 WREG32(mmLB_INTERRUPT_MASK + reg_block, lb_interrupt_mask);
2956 break;
2957 case AMDGPU_IRQ_STATE_ENABLE:
2958 lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + reg_block);
2959 lb_interrupt_mask |= LB_INTERRUPT_MASK__VBLANK_INTERRUPT_MASK_MASK;
2960 WREG32(mmLB_INTERRUPT_MASK + reg_block, lb_interrupt_mask);
2961 break;
2962 default:
2963 break;
2964 }
2965 }
2966
dce_v8_0_set_crtc_vline_interrupt_state(struct amdgpu_device * adev,int crtc,enum amdgpu_interrupt_state state)2967 static void dce_v8_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev,
2968 int crtc,
2969 enum amdgpu_interrupt_state state)
2970 {
2971 u32 reg_block, lb_interrupt_mask;
2972
2973 if (crtc >= adev->mode_info.num_crtc) {
2974 DRM_DEBUG("invalid crtc %d\n", crtc);
2975 return;
2976 }
2977
2978 switch (crtc) {
2979 case 0:
2980 reg_block = CRTC0_REGISTER_OFFSET;
2981 break;
2982 case 1:
2983 reg_block = CRTC1_REGISTER_OFFSET;
2984 break;
2985 case 2:
2986 reg_block = CRTC2_REGISTER_OFFSET;
2987 break;
2988 case 3:
2989 reg_block = CRTC3_REGISTER_OFFSET;
2990 break;
2991 case 4:
2992 reg_block = CRTC4_REGISTER_OFFSET;
2993 break;
2994 case 5:
2995 reg_block = CRTC5_REGISTER_OFFSET;
2996 break;
2997 default:
2998 DRM_DEBUG("invalid crtc %d\n", crtc);
2999 return;
3000 }
3001
3002 switch (state) {
3003 case AMDGPU_IRQ_STATE_DISABLE:
3004 lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + reg_block);
3005 lb_interrupt_mask &= ~LB_INTERRUPT_MASK__VLINE_INTERRUPT_MASK_MASK;
3006 WREG32(mmLB_INTERRUPT_MASK + reg_block, lb_interrupt_mask);
3007 break;
3008 case AMDGPU_IRQ_STATE_ENABLE:
3009 lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + reg_block);
3010 lb_interrupt_mask |= LB_INTERRUPT_MASK__VLINE_INTERRUPT_MASK_MASK;
3011 WREG32(mmLB_INTERRUPT_MASK + reg_block, lb_interrupt_mask);
3012 break;
3013 default:
3014 break;
3015 }
3016 }
3017
dce_v8_0_set_hpd_irq_state(struct amdgpu_device * adev,struct amdgpu_irq_src * src,unsigned type,enum amdgpu_interrupt_state state)3018 static int dce_v8_0_set_hpd_irq_state(struct amdgpu_device *adev,
3019 struct amdgpu_irq_src *src,
3020 unsigned type,
3021 enum amdgpu_interrupt_state state)
3022 {
3023 u32 dc_hpd_int_cntl;
3024
3025 if (type >= adev->mode_info.num_hpd) {
3026 DRM_DEBUG("invalid hpd %d\n", type);
3027 return 0;
3028 }
3029
3030 switch (state) {
3031 case AMDGPU_IRQ_STATE_DISABLE:
3032 dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
3033 dc_hpd_int_cntl &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
3034 WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
3035 break;
3036 case AMDGPU_IRQ_STATE_ENABLE:
3037 dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
3038 dc_hpd_int_cntl |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
3039 WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
3040 break;
3041 default:
3042 break;
3043 }
3044
3045 return 0;
3046 }
3047
dce_v8_0_set_crtc_irq_state(struct amdgpu_device * adev,struct amdgpu_irq_src * src,unsigned type,enum amdgpu_interrupt_state state)3048 static int dce_v8_0_set_crtc_irq_state(struct amdgpu_device *adev,
3049 struct amdgpu_irq_src *src,
3050 unsigned type,
3051 enum amdgpu_interrupt_state state)
3052 {
3053 switch (type) {
3054 case AMDGPU_CRTC_IRQ_VBLANK1:
3055 dce_v8_0_set_crtc_vblank_interrupt_state(adev, 0, state);
3056 break;
3057 case AMDGPU_CRTC_IRQ_VBLANK2:
3058 dce_v8_0_set_crtc_vblank_interrupt_state(adev, 1, state);
3059 break;
3060 case AMDGPU_CRTC_IRQ_VBLANK3:
3061 dce_v8_0_set_crtc_vblank_interrupt_state(adev, 2, state);
3062 break;
3063 case AMDGPU_CRTC_IRQ_VBLANK4:
3064 dce_v8_0_set_crtc_vblank_interrupt_state(adev, 3, state);
3065 break;
3066 case AMDGPU_CRTC_IRQ_VBLANK5:
3067 dce_v8_0_set_crtc_vblank_interrupt_state(adev, 4, state);
3068 break;
3069 case AMDGPU_CRTC_IRQ_VBLANK6:
3070 dce_v8_0_set_crtc_vblank_interrupt_state(adev, 5, state);
3071 break;
3072 case AMDGPU_CRTC_IRQ_VLINE1:
3073 dce_v8_0_set_crtc_vline_interrupt_state(adev, 0, state);
3074 break;
3075 case AMDGPU_CRTC_IRQ_VLINE2:
3076 dce_v8_0_set_crtc_vline_interrupt_state(adev, 1, state);
3077 break;
3078 case AMDGPU_CRTC_IRQ_VLINE3:
3079 dce_v8_0_set_crtc_vline_interrupt_state(adev, 2, state);
3080 break;
3081 case AMDGPU_CRTC_IRQ_VLINE4:
3082 dce_v8_0_set_crtc_vline_interrupt_state(adev, 3, state);
3083 break;
3084 case AMDGPU_CRTC_IRQ_VLINE5:
3085 dce_v8_0_set_crtc_vline_interrupt_state(adev, 4, state);
3086 break;
3087 case AMDGPU_CRTC_IRQ_VLINE6:
3088 dce_v8_0_set_crtc_vline_interrupt_state(adev, 5, state);
3089 break;
3090 default:
3091 break;
3092 }
3093 return 0;
3094 }
3095
dce_v8_0_crtc_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)3096 static int dce_v8_0_crtc_irq(struct amdgpu_device *adev,
3097 struct amdgpu_irq_src *source,
3098 struct amdgpu_iv_entry *entry)
3099 {
3100 unsigned crtc = entry->src_id - 1;
3101 uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg);
3102 unsigned int irq_type = amdgpu_display_crtc_idx_to_irq_type(adev,
3103 crtc);
3104
3105 switch (entry->src_data[0]) {
3106 case 0: /* vblank */
3107 if (disp_int & interrupt_status_offsets[crtc].vblank)
3108 WREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc], LB_VBLANK_STATUS__VBLANK_ACK_MASK);
3109 else
3110 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3111
3112 if (amdgpu_irq_enabled(adev, source, irq_type)) {
3113 drm_handle_vblank(adev_to_drm(adev), crtc);
3114 }
3115 DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
3116 break;
3117 case 1: /* vline */
3118 if (disp_int & interrupt_status_offsets[crtc].vline)
3119 WREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc], LB_VLINE_STATUS__VLINE_ACK_MASK);
3120 else
3121 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3122
3123 DRM_DEBUG("IH: D%d vline\n", crtc + 1);
3124 break;
3125 default:
3126 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]);
3127 break;
3128 }
3129
3130 return 0;
3131 }
3132
dce_v8_0_set_pageflip_irq_state(struct amdgpu_device * adev,struct amdgpu_irq_src * src,unsigned type,enum amdgpu_interrupt_state state)3133 static int dce_v8_0_set_pageflip_irq_state(struct amdgpu_device *adev,
3134 struct amdgpu_irq_src *src,
3135 unsigned type,
3136 enum amdgpu_interrupt_state state)
3137 {
3138 u32 reg;
3139
3140 if (type >= adev->mode_info.num_crtc) {
3141 DRM_ERROR("invalid pageflip crtc %d\n", type);
3142 return -EINVAL;
3143 }
3144
3145 reg = RREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type]);
3146 if (state == AMDGPU_IRQ_STATE_DISABLE)
3147 WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
3148 reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
3149 else
3150 WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
3151 reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
3152
3153 return 0;
3154 }
3155
dce_v8_0_pageflip_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)3156 static int dce_v8_0_pageflip_irq(struct amdgpu_device *adev,
3157 struct amdgpu_irq_src *source,
3158 struct amdgpu_iv_entry *entry)
3159 {
3160 unsigned long flags;
3161 unsigned crtc_id;
3162 struct amdgpu_crtc *amdgpu_crtc;
3163 struct amdgpu_flip_work *works;
3164
3165 crtc_id = (entry->src_id - 8) >> 1;
3166 amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
3167
3168 if (crtc_id >= adev->mode_info.num_crtc) {
3169 DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
3170 return -EINVAL;
3171 }
3172
3173 if (RREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id]) &
3174 GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK)
3175 WREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id],
3176 GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK);
3177
3178 /* IRQ could occur when in initial stage */
3179 if (amdgpu_crtc == NULL)
3180 return 0;
3181
3182 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
3183 works = amdgpu_crtc->pflip_works;
3184 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) {
3185 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
3186 "AMDGPU_FLIP_SUBMITTED(%d)\n",
3187 amdgpu_crtc->pflip_status,
3188 AMDGPU_FLIP_SUBMITTED);
3189 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
3190 return 0;
3191 }
3192
3193 /* page flip completed. clean up */
3194 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
3195 amdgpu_crtc->pflip_works = NULL;
3196
3197 /* wakeup usersapce */
3198 if (works->event)
3199 drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
3200
3201 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
3202
3203 drm_crtc_vblank_put(&amdgpu_crtc->base);
3204 schedule_work(&works->unpin_work);
3205
3206 return 0;
3207 }
3208
dce_v8_0_hpd_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)3209 static int dce_v8_0_hpd_irq(struct amdgpu_device *adev,
3210 struct amdgpu_irq_src *source,
3211 struct amdgpu_iv_entry *entry)
3212 {
3213 uint32_t disp_int, mask;
3214 unsigned hpd;
3215
3216 if (entry->src_data[0] >= adev->mode_info.num_hpd) {
3217 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]);
3218 return 0;
3219 }
3220
3221 hpd = entry->src_data[0];
3222 disp_int = RREG32(interrupt_status_offsets[hpd].reg);
3223 mask = interrupt_status_offsets[hpd].hpd;
3224
3225 if (disp_int & mask) {
3226 dce_v8_0_hpd_int_ack(adev, hpd);
3227 schedule_delayed_work(&adev->hotplug_work, 0);
3228 DRM_DEBUG("IH: HPD%d\n", hpd + 1);
3229 }
3230
3231 return 0;
3232
3233 }
3234
dce_v8_0_set_clockgating_state(struct amdgpu_ip_block * ip_block,enum amd_clockgating_state state)3235 static int dce_v8_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
3236 enum amd_clockgating_state state)
3237 {
3238 return 0;
3239 }
3240
dce_v8_0_set_powergating_state(struct amdgpu_ip_block * ip_block,enum amd_powergating_state state)3241 static int dce_v8_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
3242 enum amd_powergating_state state)
3243 {
3244 return 0;
3245 }
3246
3247 static const struct amd_ip_funcs dce_v8_0_ip_funcs = {
3248 .name = "dce_v8_0",
3249 .early_init = dce_v8_0_early_init,
3250 .sw_init = dce_v8_0_sw_init,
3251 .sw_fini = dce_v8_0_sw_fini,
3252 .hw_init = dce_v8_0_hw_init,
3253 .hw_fini = dce_v8_0_hw_fini,
3254 .suspend = dce_v8_0_suspend,
3255 .resume = dce_v8_0_resume,
3256 .is_idle = dce_v8_0_is_idle,
3257 .soft_reset = dce_v8_0_soft_reset,
3258 .set_clockgating_state = dce_v8_0_set_clockgating_state,
3259 .set_powergating_state = dce_v8_0_set_powergating_state,
3260 };
3261
3262 static void
dce_v8_0_encoder_mode_set(struct drm_encoder * encoder,struct drm_display_mode * mode,struct drm_display_mode * adjusted_mode)3263 dce_v8_0_encoder_mode_set(struct drm_encoder *encoder,
3264 struct drm_display_mode *mode,
3265 struct drm_display_mode *adjusted_mode)
3266 {
3267 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3268
3269 amdgpu_encoder->pixel_clock = adjusted_mode->clock;
3270
3271 /* need to call this here rather than in prepare() since we need some crtc info */
3272 amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
3273
3274 /* set scaler clears this on some chips */
3275 dce_v8_0_set_interleave(encoder->crtc, mode);
3276
3277 if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
3278 dce_v8_0_afmt_enable(encoder, true);
3279 dce_v8_0_afmt_setmode(encoder, adjusted_mode);
3280 }
3281 }
3282
dce_v8_0_encoder_prepare(struct drm_encoder * encoder)3283 static void dce_v8_0_encoder_prepare(struct drm_encoder *encoder)
3284 {
3285 struct amdgpu_device *adev = drm_to_adev(encoder->dev);
3286 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3287 struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
3288
3289 if ((amdgpu_encoder->active_device &
3290 (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
3291 (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) !=
3292 ENCODER_OBJECT_ID_NONE)) {
3293 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
3294 if (dig) {
3295 dig->dig_encoder = dce_v8_0_pick_dig_encoder(encoder);
3296 if (amdgpu_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT)
3297 dig->afmt = adev->mode_info.afmt[dig->dig_encoder];
3298 }
3299 }
3300
3301 amdgpu_atombios_scratch_regs_lock(adev, true);
3302
3303 if (connector) {
3304 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
3305
3306 /* select the clock/data port if it uses a router */
3307 if (amdgpu_connector->router.cd_valid)
3308 amdgpu_i2c_router_select_cd_port(amdgpu_connector);
3309
3310 /* turn eDP panel on for mode set */
3311 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
3312 amdgpu_atombios_encoder_set_edp_panel_power(connector,
3313 ATOM_TRANSMITTER_ACTION_POWER_ON);
3314 }
3315
3316 /* this is needed for the pll/ss setup to work correctly in some cases */
3317 amdgpu_atombios_encoder_set_crtc_source(encoder);
3318 /* set up the FMT blocks */
3319 dce_v8_0_program_fmt(encoder);
3320 }
3321
dce_v8_0_encoder_commit(struct drm_encoder * encoder)3322 static void dce_v8_0_encoder_commit(struct drm_encoder *encoder)
3323 {
3324 struct drm_device *dev = encoder->dev;
3325 struct amdgpu_device *adev = drm_to_adev(dev);
3326
3327 /* need to call this here as we need the crtc set up */
3328 amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
3329 amdgpu_atombios_scratch_regs_lock(adev, false);
3330 }
3331
dce_v8_0_encoder_disable(struct drm_encoder * encoder)3332 static void dce_v8_0_encoder_disable(struct drm_encoder *encoder)
3333 {
3334 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3335 struct amdgpu_encoder_atom_dig *dig;
3336
3337 amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
3338
3339 if (amdgpu_atombios_encoder_is_digital(encoder)) {
3340 if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
3341 dce_v8_0_afmt_enable(encoder, false);
3342 dig = amdgpu_encoder->enc_priv;
3343 dig->dig_encoder = -1;
3344 }
3345 amdgpu_encoder->active_device = 0;
3346 }
3347
3348 /* these are handled by the primary encoders */
dce_v8_0_ext_prepare(struct drm_encoder * encoder)3349 static void dce_v8_0_ext_prepare(struct drm_encoder *encoder)
3350 {
3351
3352 }
3353
dce_v8_0_ext_commit(struct drm_encoder * encoder)3354 static void dce_v8_0_ext_commit(struct drm_encoder *encoder)
3355 {
3356
3357 }
3358
3359 static void
dce_v8_0_ext_mode_set(struct drm_encoder * encoder,struct drm_display_mode * mode,struct drm_display_mode * adjusted_mode)3360 dce_v8_0_ext_mode_set(struct drm_encoder *encoder,
3361 struct drm_display_mode *mode,
3362 struct drm_display_mode *adjusted_mode)
3363 {
3364
3365 }
3366
dce_v8_0_ext_disable(struct drm_encoder * encoder)3367 static void dce_v8_0_ext_disable(struct drm_encoder *encoder)
3368 {
3369
3370 }
3371
3372 static void
dce_v8_0_ext_dpms(struct drm_encoder * encoder,int mode)3373 dce_v8_0_ext_dpms(struct drm_encoder *encoder, int mode)
3374 {
3375
3376 }
3377
3378 static const struct drm_encoder_helper_funcs dce_v8_0_ext_helper_funcs = {
3379 .dpms = dce_v8_0_ext_dpms,
3380 .prepare = dce_v8_0_ext_prepare,
3381 .mode_set = dce_v8_0_ext_mode_set,
3382 .commit = dce_v8_0_ext_commit,
3383 .disable = dce_v8_0_ext_disable,
3384 /* no detect for TMDS/LVDS yet */
3385 };
3386
3387 static const struct drm_encoder_helper_funcs dce_v8_0_dig_helper_funcs = {
3388 .dpms = amdgpu_atombios_encoder_dpms,
3389 .mode_fixup = amdgpu_atombios_encoder_mode_fixup,
3390 .prepare = dce_v8_0_encoder_prepare,
3391 .mode_set = dce_v8_0_encoder_mode_set,
3392 .commit = dce_v8_0_encoder_commit,
3393 .disable = dce_v8_0_encoder_disable,
3394 .detect = amdgpu_atombios_encoder_dig_detect,
3395 };
3396
3397 static const struct drm_encoder_helper_funcs dce_v8_0_dac_helper_funcs = {
3398 .dpms = amdgpu_atombios_encoder_dpms,
3399 .mode_fixup = amdgpu_atombios_encoder_mode_fixup,
3400 .prepare = dce_v8_0_encoder_prepare,
3401 .mode_set = dce_v8_0_encoder_mode_set,
3402 .commit = dce_v8_0_encoder_commit,
3403 .detect = amdgpu_atombios_encoder_dac_detect,
3404 };
3405
dce_v8_0_encoder_destroy(struct drm_encoder * encoder)3406 static void dce_v8_0_encoder_destroy(struct drm_encoder *encoder)
3407 {
3408 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3409 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3410 amdgpu_atombios_encoder_fini_backlight(amdgpu_encoder);
3411 kfree(amdgpu_encoder->enc_priv);
3412 drm_encoder_cleanup(encoder);
3413 kfree(amdgpu_encoder);
3414 }
3415
3416 static const struct drm_encoder_funcs dce_v8_0_encoder_funcs = {
3417 .destroy = dce_v8_0_encoder_destroy,
3418 };
3419
dce_v8_0_encoder_add(struct amdgpu_device * adev,uint32_t encoder_enum,uint32_t supported_device,u16 caps)3420 static void dce_v8_0_encoder_add(struct amdgpu_device *adev,
3421 uint32_t encoder_enum,
3422 uint32_t supported_device,
3423 u16 caps)
3424 {
3425 struct drm_device *dev = adev_to_drm(adev);
3426 struct drm_encoder *encoder;
3427 struct amdgpu_encoder *amdgpu_encoder;
3428
3429 /* see if we already added it */
3430 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
3431 amdgpu_encoder = to_amdgpu_encoder(encoder);
3432 if (amdgpu_encoder->encoder_enum == encoder_enum) {
3433 amdgpu_encoder->devices |= supported_device;
3434 return;
3435 }
3436
3437 }
3438
3439 /* add a new one */
3440 amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL);
3441 if (!amdgpu_encoder)
3442 return;
3443
3444 encoder = &amdgpu_encoder->base;
3445 switch (adev->mode_info.num_crtc) {
3446 case 1:
3447 encoder->possible_crtcs = 0x1;
3448 break;
3449 case 2:
3450 default:
3451 encoder->possible_crtcs = 0x3;
3452 break;
3453 case 4:
3454 encoder->possible_crtcs = 0xf;
3455 break;
3456 case 6:
3457 encoder->possible_crtcs = 0x3f;
3458 break;
3459 }
3460
3461 amdgpu_encoder->enc_priv = NULL;
3462
3463 amdgpu_encoder->encoder_enum = encoder_enum;
3464 amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
3465 amdgpu_encoder->devices = supported_device;
3466 amdgpu_encoder->rmx_type = RMX_OFF;
3467 amdgpu_encoder->underscan_type = UNDERSCAN_OFF;
3468 amdgpu_encoder->is_ext_encoder = false;
3469 amdgpu_encoder->caps = caps;
3470
3471 switch (amdgpu_encoder->encoder_id) {
3472 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
3473 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
3474 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3475 DRM_MODE_ENCODER_DAC, NULL);
3476 drm_encoder_helper_add(encoder, &dce_v8_0_dac_helper_funcs);
3477 break;
3478 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
3479 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
3480 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
3481 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
3482 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
3483 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
3484 amdgpu_encoder->rmx_type = RMX_FULL;
3485 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3486 DRM_MODE_ENCODER_LVDS, NULL);
3487 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder);
3488 } else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
3489 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3490 DRM_MODE_ENCODER_DAC, NULL);
3491 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3492 } else {
3493 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3494 DRM_MODE_ENCODER_TMDS, NULL);
3495 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3496 }
3497 drm_encoder_helper_add(encoder, &dce_v8_0_dig_helper_funcs);
3498 break;
3499 case ENCODER_OBJECT_ID_SI170B:
3500 case ENCODER_OBJECT_ID_CH7303:
3501 case ENCODER_OBJECT_ID_EXTERNAL_SDVOA:
3502 case ENCODER_OBJECT_ID_EXTERNAL_SDVOB:
3503 case ENCODER_OBJECT_ID_TITFP513:
3504 case ENCODER_OBJECT_ID_VT1623:
3505 case ENCODER_OBJECT_ID_HDMI_SI1930:
3506 case ENCODER_OBJECT_ID_TRAVIS:
3507 case ENCODER_OBJECT_ID_NUTMEG:
3508 /* these are handled by the primary encoders */
3509 amdgpu_encoder->is_ext_encoder = true;
3510 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3511 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3512 DRM_MODE_ENCODER_LVDS, NULL);
3513 else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
3514 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3515 DRM_MODE_ENCODER_DAC, NULL);
3516 else
3517 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3518 DRM_MODE_ENCODER_TMDS, NULL);
3519 drm_encoder_helper_add(encoder, &dce_v8_0_ext_helper_funcs);
3520 break;
3521 }
3522 }
3523
3524 static const struct amdgpu_display_funcs dce_v8_0_display_funcs = {
3525 .bandwidth_update = &dce_v8_0_bandwidth_update,
3526 .vblank_get_counter = &dce_v8_0_vblank_get_counter,
3527 .backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level,
3528 .backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level,
3529 .hpd_sense = &dce_v8_0_hpd_sense,
3530 .hpd_set_polarity = &dce_v8_0_hpd_set_polarity,
3531 .hpd_get_gpio_reg = &dce_v8_0_hpd_get_gpio_reg,
3532 .page_flip = &dce_v8_0_page_flip,
3533 .page_flip_get_scanoutpos = &dce_v8_0_crtc_get_scanoutpos,
3534 .add_encoder = &dce_v8_0_encoder_add,
3535 .add_connector = &amdgpu_connector_add,
3536 };
3537
dce_v8_0_set_display_funcs(struct amdgpu_device * adev)3538 static void dce_v8_0_set_display_funcs(struct amdgpu_device *adev)
3539 {
3540 adev->mode_info.funcs = &dce_v8_0_display_funcs;
3541 }
3542
3543 static const struct amdgpu_irq_src_funcs dce_v8_0_crtc_irq_funcs = {
3544 .set = dce_v8_0_set_crtc_irq_state,
3545 .process = dce_v8_0_crtc_irq,
3546 };
3547
3548 static const struct amdgpu_irq_src_funcs dce_v8_0_pageflip_irq_funcs = {
3549 .set = dce_v8_0_set_pageflip_irq_state,
3550 .process = dce_v8_0_pageflip_irq,
3551 };
3552
3553 static const struct amdgpu_irq_src_funcs dce_v8_0_hpd_irq_funcs = {
3554 .set = dce_v8_0_set_hpd_irq_state,
3555 .process = dce_v8_0_hpd_irq,
3556 };
3557
dce_v8_0_set_irq_funcs(struct amdgpu_device * adev)3558 static void dce_v8_0_set_irq_funcs(struct amdgpu_device *adev)
3559 {
3560 if (adev->mode_info.num_crtc > 0)
3561 adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_VLINE1 + adev->mode_info.num_crtc;
3562 else
3563 adev->crtc_irq.num_types = 0;
3564 adev->crtc_irq.funcs = &dce_v8_0_crtc_irq_funcs;
3565
3566 adev->pageflip_irq.num_types = adev->mode_info.num_crtc;
3567 adev->pageflip_irq.funcs = &dce_v8_0_pageflip_irq_funcs;
3568
3569 adev->hpd_irq.num_types = adev->mode_info.num_hpd;
3570 adev->hpd_irq.funcs = &dce_v8_0_hpd_irq_funcs;
3571 }
3572
3573 const struct amdgpu_ip_block_version dce_v8_0_ip_block = {
3574 .type = AMD_IP_BLOCK_TYPE_DCE,
3575 .major = 8,
3576 .minor = 0,
3577 .rev = 0,
3578 .funcs = &dce_v8_0_ip_funcs,
3579 };
3580
3581 const struct amdgpu_ip_block_version dce_v8_1_ip_block = {
3582 .type = AMD_IP_BLOCK_TYPE_DCE,
3583 .major = 8,
3584 .minor = 1,
3585 .rev = 0,
3586 .funcs = &dce_v8_0_ip_funcs,
3587 };
3588
3589 const struct amdgpu_ip_block_version dce_v8_2_ip_block = {
3590 .type = AMD_IP_BLOCK_TYPE_DCE,
3591 .major = 8,
3592 .minor = 2,
3593 .rev = 0,
3594 .funcs = &dce_v8_0_ip_funcs,
3595 };
3596
3597 const struct amdgpu_ip_block_version dce_v8_3_ip_block = {
3598 .type = AMD_IP_BLOCK_TYPE_DCE,
3599 .major = 8,
3600 .minor = 3,
3601 .rev = 0,
3602 .funcs = &dce_v8_0_ip_funcs,
3603 };
3604
3605 const struct amdgpu_ip_block_version dce_v8_5_ip_block = {
3606 .type = AMD_IP_BLOCK_TYPE_DCE,
3607 .major = 8,
3608 .minor = 5,
3609 .rev = 0,
3610 .funcs = &dce_v8_0_ip_funcs,
3611 };
3612